From bc032a0cdb8c48d19691a6b10e3a3d58f1e7a132 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Wed, 26 Mar 2014 16:23:59 -0700 Subject: [PATCH] Fix bugs. --- src/backend/utils/mmgr/freepage.c | 17 +++++------------ src/backend/utils/mmgr/sb_alloc.c | 17 +++++++++++------ src/backend/utils/mmgr/sb_region.c | 13 ++++++------- 3 files changed, 22 insertions(+), 25 deletions(-) diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 30fb190ee8..52b6c27395 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -990,14 +990,7 @@ FreePageBtreeSearch(FreePageManager *fpm, Size first_page, /* Descend to appropriate child page. */ Assert(index < btp->hdr.nused); child = relptr_access(base, btp->u.internal_key[index].child); - if (relptr_access(base, child->hdr.parent) != btp) - { - elog(LOG, "btp = %zu, child = %zu, child parent = %zu", - fpm_pointer_to_page(base, btp), - fpm_pointer_to_page(base, child), - fpm_pointer_to_page(base, relptr_access(base, child->hdr.parent))); - elog(FATAL, "%s", FreePageManagerDump(fpm)); - } + Assert(relptr_access(base, child->hdr.parent) == btp); btp = child; } @@ -1224,10 +1217,9 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page) * this to result in memory fragmentation if we're repeatedly asked to * allocate chunks just a little smaller than what we have available. * Hopefully, this is unlikely, because we expect most requests to be - * single pages (for the bootstrap allocator) or superblock-sized chunks - * (for the superblock allocator, and for address space map memory), - * but no policy can be optimal under all circumstances unless it has - * knowledge of future allocation patterns. + * single pages or superblock-sized chunks -- but no policy can be optimal + * under all circumstances unless it has knowledge of future allocation + * patterns. */ for (f = Min(npages, FPM_NUM_FREELISTS) - 1; f < FPM_NUM_FREELISTS; ++f) { @@ -1268,6 +1260,7 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page) return false; /* Remove span from free list. */ + Assert(victim->magic == FREE_PAGE_SPAN_LEADER_MAGIC); prev = relptr_access(base, victim->prev); next = relptr_access(base, victim->next); if (prev != NULL) diff --git a/src/backend/utils/mmgr/sb_alloc.c b/src/backend/utils/mmgr/sb_alloc.c index 9935d0d806..30dd5c5c5c 100644 --- a/src/backend/utils/mmgr/sb_alloc.c +++ b/src/backend/utils/mmgr/sb_alloc.c @@ -111,7 +111,8 @@ static char sb_size_class_map[] = { (SB_SCLASS_FIRST_REGULAR + lengthof(sb_size_classes)) /* Helper functions. */ -static char *sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize); +static char *sb_alloc_from_heap(char *base, sb_heap *heap, + Size obsize, Size nmax); static char *sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class); static void sb_init_span(char *base, sb_span *span, sb_heap *heap, @@ -295,11 +296,10 @@ sb_alloc(sb_allocator *a, Size size, int flags) * superblock that would otherwise become empty soon. */ static char * -sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize) +sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize, Size nmax) { sb_span *active_sb; Size fclass; - Size nmax = (FPM_PAGE_SIZE * SB_PAGES_PER_SUPERBLOCK) / obsize; char *superblock; char *result; @@ -412,15 +412,20 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) LWLock *lock = relptr_access(base, heap->lock); char *result = NULL; Size obsize; + Size nmax; /* Work out object size. */ if (size_class == 0) + { obsize = sizeof(sb_span); + nmax = FPM_PAGE_SIZE / obsize; + } else { Assert(size_class >= SB_SCLASS_FIRST_REGULAR); Assert(size_class < SB_NUM_SIZE_CLASSES); obsize = sb_size_classes[size_class - SB_SCLASS_FIRST_REGULAR]; + nmax = (FPM_PAGE_SIZE * SB_PAGES_PER_SUPERBLOCK) / obsize; } /* If locking is in use, acquire the lock. */ @@ -428,7 +433,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) LWLockAcquire(lock, LW_EXCLUSIVE); /* Attempt to allocate from the heap. */ - result = sb_alloc_from_heap(base, heap, obsize); + result = sb_alloc_from_heap(base, heap, obsize, nmax); /* * If there's no space in the current heap, but there are multiple heaps @@ -441,7 +446,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) sb_try_to_steal_superblock(base, a, heapproc, size_class)) { /* The superblock we stole shouldn't full, so this should work. */ - result = sb_alloc_from_heap(base, heap, obsize); + result = sb_alloc_from_heap(base, heap, obsize, nmax); Assert(result != NULL); } else @@ -500,7 +505,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class) span->ninitialized = span->nused = 1; /* This should work now. */ - result = sb_alloc_from_heap(base, heap, obsize); + result = sb_alloc_from_heap(base, heap, obsize, nmax); Assert(result != NULL); } } diff --git a/src/backend/utils/mmgr/sb_region.c b/src/backend/utils/mmgr/sb_region.c index b695bcf8c0..b98d56c2a6 100644 --- a/src/backend/utils/mmgr/sb_region.c +++ b/src/backend/utils/mmgr/sb_region.c @@ -68,9 +68,8 @@ typedef struct /* Lookup data for an entire 64-bit address space. */ typedef struct { - uint32 ncached; uint32 cache_key[SB_LOOKUP_ROOT_CACHE_SIZE]; - sb_lookup_l2 *cache_value[SB_LOOKUP_L2_ENTRIES]; + sb_lookup_l2 *cache_value[SB_LOOKUP_ROOT_CACHE_SIZE]; sb_lookup_l2 **l2; } sb_lookup_root; @@ -507,12 +506,12 @@ sb_find_leaf(Size highbits, bool insert) rootbits = (highbits >> SB_LOOKUP_L2_BITS) & (SB_LOOKUP_ROOT_ENTRIES - 1); /* Check for L2 entry in toplevel cache. */ - for (i = 0; i < lookup_root.ncached; ++i) + for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i) { - if (lookup_root.cache_key[i] == highbits) - l2 = lookup_root.cache_value[i]; - else if (lookup_root.cache_value[i] == NULL) + if (lookup_root.cache_value[i] == NULL) unused = i; + else if (lookup_root.cache_key[i] == highbits) + l2 = lookup_root.cache_value[i]; } /* If no hit, check the full L2 loookup table, if it's been initialized. */ @@ -558,7 +557,7 @@ sb_find_leaf(Size highbits, bool insert) free(l2); return NULL; } - for (i = 0; i < lookup_root.ncached; ++i) + for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i) lookup_root.l2[lookup_root.cache_key[i]] = lookup_root.cache_value[i]; } -- 2.39.5