if (region != NULL && region->seg == NULL)
{
- sb_report_contiguous_freespace(region, contiguous_pages);
fpm->largest_reported_chunk = contiguous_pages;
+ sb_report_contiguous_freespace(region, contiguous_pages);
}
else
{
* Iterate through heaps back to front. We do it this way so that
* spans-of-spans are freed last.
*/
- for (heapno = SB_NUM_SIZE_CLASSES; heapno >= 0; --heapno)
+ for (heapno = SB_NUM_SIZE_CLASSES - 1; heapno >= 0; --heapno)
{
sb_heap *heap = &a->heaps[heapno];
- Size fclass;
+ int fclass;
for (fclass = 0; fclass < SB_FULLNESS_CLASSES; ++fclass)
{
while (span != NULL)
{
Size offset;
+ sb_span *nextspan;
superblock = relptr_access(base, span->start);
+ nextspan = relptr_access(base, span->nextspan);
region = sb_lookup_region(superblock);
Assert(region != NULL);
offset = superblock - fpm_segment_base(region->fpm);
Assert(offset % FPM_PAGE_SIZE == 0);
FreePageManagerPut(region->fpm, offset / FPM_PAGE_SIZE,
span->npages);
- span = relptr_access(base, span->nextspan);
+ span = nextspan;
}
}
}
region->region_start = region_start;
region->region_size = region_size;
region->usable_pages = new_region_net_pages;
+ sb_private_pages_allocated += region->usable_pages;
region->seg = NULL;
region->allocator = NULL;
region->fpm = (FreePageManager *)