region = dlist_container(sb_region, fl_node, iter.cur);
- /* Skip regions that are certain not to have space. */
- if (region->contiguous_pages < npages)
+ /*
+ * Quickly skip regions which appear to have enough space to
+ * belong on this freelist but which don't have enough space to
+ * satisfy the request, to avoid probing every region on the list
+ * for its exact free space on every trip through.
+ */
+ if (region->contiguous_pages >= threshold &&
+ region->contiguous_pages < npages)
continue;
/*
- * The region we're examining was at one point reported to
- * have adequate space, but subsequent allocations might have
- * eroded that, so recheck. If there's enough, we're done!
- *
- * NB: For larger allocations this might be suboptimal, because
- * we might carve space out of a chunk that's bigger than we
- * really need rather than locating the best fit across all
- * chunks. It shouldn't be too far off, though, because
- * chunks with way more contiguous space available will be on
- * a higher-numbered freelist. For really large allocations,
- * it's probably better to malloc() directly than go through
- * this machinery.
+ * We're going to either use this region or move it to a
+ * lower-numbered freelist or both, so determine the precise size
+ * of the largest remaining run of pages.
*/
largest = FreePageManagerInquireLargest(region->fpm);
region->contiguous_pages = largest;
- if (largest >= npages)
- return region;
/*
* The region we're examining not only doesn't have enough
*/
if (largest < threshold)
{
- int new_freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS);
+ int new_freelist = Min(fls(largest), NUM_PRIVATE_FREELISTS);
dlist_delete(iter.cur);
dlist_push_head(&private_freelist[new_freelist],
®ion->fl_node);
}
+
+ /*
+ * If the region is big enough, use it. For larger allocations
+ * this might be suboptimal, because we might carve space out of a
+ * chunk that's bigger than we really need rather than locating
+ * the best fit across all chunks. It shouldn't be too far off,
+ * though, because chunks with way more contiguous space available
+ * will be on a higher-numbered freelist.
+ *
+ * NB: For really large backend-private allocations, it's probably
+ * better to malloc() directly than go through this machinery.
+ */
+ if (largest >= npages)
+ return region;
}
/* Try next freelist. */