#define NUM_PRIVATE_FREELISTS 16
static dlist_head private_freelist[NUM_PRIVATE_FREELISTS];
+#define contiguous_pages_to_freelist(n) \
+ Min(fls(n), NUM_PRIVATE_FREELISTS - 1)
+
/*
* Constants to set the size of backend-private regions. Superblocks are
* 16 pages each (64kB), and we want a number of superblocks to fit inside
AllocatorRegion *
GetRegionForPrivateAllocation(Size npages)
{
- int freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS);
+ int freelist = contiguous_pages_to_freelist(npages);
Size new_region_net_pages;
Size metadata_bytes;
char *region_start;
*/
if (largest < threshold)
{
- int new_freelist = Min(fls(largest), NUM_PRIVATE_FREELISTS);
+ int new_freelist = contiguous_pages_to_freelist(largest);
dlist_delete(iter.cur);
dlist_push_head(&private_freelist[new_freelist],
metadata_bytes / FPM_PAGE_SIZE,
new_region_net_pages);
region->contiguous_pages = new_region_net_pages; /* Now fix the value. */
- freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS);
+ freelist = contiguous_pages_to_freelist(new_region_net_pages);
dlist_push_head(&private_freelist[freelist], ®ion->fl_node);
AllocatorRegionAdjustLookup(region, true);
}
/* If necessary, move the region to a higher-numbered freelist. */
- old_freelist = Min(fls(region->contiguous_pages), NUM_PRIVATE_FREELISTS);
- new_freelist = Min(fls(npages), NUM_PRIVATE_FREELISTS);
+ old_freelist = contiguous_pages_to_freelist(region->contiguous_pages);
+ new_freelist = contiguous_pages_to_freelist(npages);
if (new_freelist > old_freelist)
{
dlist_delete(®ion->fl_node);