/* Descend to appropriate child page. */
Assert(index < btp->hdr.nused);
child = relptr_access(base, btp->u.internal_key[index].child);
- if (relptr_access(base, child->hdr.parent) != btp)
- {
- elog(LOG, "btp = %zu, child = %zu, child parent = %zu",
- fpm_pointer_to_page(base, btp),
- fpm_pointer_to_page(base, child),
- fpm_pointer_to_page(base, relptr_access(base, child->hdr.parent)));
- elog(FATAL, "%s", FreePageManagerDump(fpm));
- }
+ Assert(relptr_access(base, child->hdr.parent) == btp);
btp = child;
}
* this to result in memory fragmentation if we're repeatedly asked to
* allocate chunks just a little smaller than what we have available.
* Hopefully, this is unlikely, because we expect most requests to be
- * single pages (for the bootstrap allocator) or superblock-sized chunks
- * (for the superblock allocator, and for address space map memory),
- * but no policy can be optimal under all circumstances unless it has
- * knowledge of future allocation patterns.
+ * single pages or superblock-sized chunks -- but no policy can be optimal
+ * under all circumstances unless it has knowledge of future allocation
+ * patterns.
*/
for (f = Min(npages, FPM_NUM_FREELISTS) - 1; f < FPM_NUM_FREELISTS; ++f)
{
return false;
/* Remove span from free list. */
+ Assert(victim->magic == FREE_PAGE_SPAN_LEADER_MAGIC);
prev = relptr_access(base, victim->prev);
next = relptr_access(base, victim->next);
if (prev != NULL)
(SB_SCLASS_FIRST_REGULAR + lengthof(sb_size_classes))
/* Helper functions. */
-static char *sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize);
+static char *sb_alloc_from_heap(char *base, sb_heap *heap,
+ Size obsize, Size nmax);
static char *sb_alloc_guts(char *base, sb_region *region,
sb_allocator *a, int size_class);
static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
* superblock that would otherwise become empty soon.
*/
static char *
-sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize)
+sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize, Size nmax)
{
sb_span *active_sb;
Size fclass;
- Size nmax = (FPM_PAGE_SIZE * SB_PAGES_PER_SUPERBLOCK) / obsize;
char *superblock;
char *result;
LWLock *lock = relptr_access(base, heap->lock);
char *result = NULL;
Size obsize;
+ Size nmax;
/* Work out object size. */
if (size_class == 0)
+ {
obsize = sizeof(sb_span);
+ nmax = FPM_PAGE_SIZE / obsize;
+ }
else
{
Assert(size_class >= SB_SCLASS_FIRST_REGULAR);
Assert(size_class < SB_NUM_SIZE_CLASSES);
obsize = sb_size_classes[size_class - SB_SCLASS_FIRST_REGULAR];
+ nmax = (FPM_PAGE_SIZE * SB_PAGES_PER_SUPERBLOCK) / obsize;
}
/* If locking is in use, acquire the lock. */
LWLockAcquire(lock, LW_EXCLUSIVE);
/* Attempt to allocate from the heap. */
- result = sb_alloc_from_heap(base, heap, obsize);
+ result = sb_alloc_from_heap(base, heap, obsize, nmax);
/*
* If there's no space in the current heap, but there are multiple heaps
sb_try_to_steal_superblock(base, a, heapproc, size_class))
{
/* The superblock we stole shouldn't full, so this should work. */
- result = sb_alloc_from_heap(base, heap, obsize);
+ result = sb_alloc_from_heap(base, heap, obsize, nmax);
Assert(result != NULL);
}
else
span->ninitialized = span->nused = 1;
/* This should work now. */
- result = sb_alloc_from_heap(base, heap, obsize);
+ result = sb_alloc_from_heap(base, heap, obsize, nmax);
Assert(result != NULL);
}
}
/* Lookup data for an entire 64-bit address space. */
typedef struct
{
- uint32 ncached;
uint32 cache_key[SB_LOOKUP_ROOT_CACHE_SIZE];
- sb_lookup_l2 *cache_value[SB_LOOKUP_L2_ENTRIES];
+ sb_lookup_l2 *cache_value[SB_LOOKUP_ROOT_CACHE_SIZE];
sb_lookup_l2 **l2;
} sb_lookup_root;
rootbits = (highbits >> SB_LOOKUP_L2_BITS) & (SB_LOOKUP_ROOT_ENTRIES - 1);
/* Check for L2 entry in toplevel cache. */
- for (i = 0; i < lookup_root.ncached; ++i)
+ for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
{
- if (lookup_root.cache_key[i] == highbits)
- l2 = lookup_root.cache_value[i];
- else if (lookup_root.cache_value[i] == NULL)
+ if (lookup_root.cache_value[i] == NULL)
unused = i;
+ else if (lookup_root.cache_key[i] == highbits)
+ l2 = lookup_root.cache_value[i];
}
/* If no hit, check the full L2 loookup table, if it's been initialized. */
free(l2);
return NULL;
}
- for (i = 0; i < lookup_root.ncached; ++i)
+ for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
lookup_root.l2[lookup_root.cache_key[i]] =
lookup_root.cache_value[i];
}