Fix bugs.
authorRobert Haas <rhaas@postgresql.org>
Wed, 26 Mar 2014 23:23:59 +0000 (16:23 -0700)
committerRobert Haas <rhaas@postgresql.org>
Wed, 26 Mar 2014 23:23:59 +0000 (16:23 -0700)
src/backend/utils/mmgr/freepage.c
src/backend/utils/mmgr/sb_alloc.c
src/backend/utils/mmgr/sb_region.c

index 30fb190ee8590ba70f977f95a5740ff8ac8731f6..52b6c273957c2ec8504655cee08c4e45f08c2a7f 100644 (file)
@@ -990,14 +990,7 @@ FreePageBtreeSearch(FreePageManager *fpm, Size first_page,
                /* Descend to appropriate child page. */
                Assert(index < btp->hdr.nused);
                child = relptr_access(base, btp->u.internal_key[index].child);
-               if (relptr_access(base, child->hdr.parent) != btp)
-               {
-                       elog(LOG, "btp = %zu, child = %zu, child parent = %zu",
-                                fpm_pointer_to_page(base, btp),
-                                fpm_pointer_to_page(base, child),
-                                fpm_pointer_to_page(base, relptr_access(base, child->hdr.parent)));
-                       elog(FATAL, "%s", FreePageManagerDump(fpm));
-               }
+               Assert(relptr_access(base, child->hdr.parent) == btp);
                btp = child;
        }
 
@@ -1224,10 +1217,9 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
         * this to result in memory fragmentation if we're repeatedly asked to
         * allocate chunks just a little smaller than what we have available.
         * Hopefully, this is unlikely, because we expect most requests to be
-        * single pages (for the bootstrap allocator) or superblock-sized chunks
-        * (for the superblock allocator, and for address space map memory),
-        * but no policy can be optimal under all circumstances unless it has
-        * knowledge of future allocation patterns.
+        * single pages or superblock-sized chunks -- but no policy can be optimal
+        * under all circumstances unless it has knowledge of future allocation
+        * patterns.
         */
        for (f = Min(npages, FPM_NUM_FREELISTS) - 1; f < FPM_NUM_FREELISTS; ++f)
        {
@@ -1268,6 +1260,7 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
                return false;
 
        /* Remove span from free list. */
+       Assert(victim->magic == FREE_PAGE_SPAN_LEADER_MAGIC);
        prev = relptr_access(base, victim->prev);
        next = relptr_access(base, victim->next);
        if (prev != NULL)
index 9935d0d8065e38019e57a1eaf08a5427ec6df262..30dd5c5c5c43c466fd23b94c232cfcfd69c9f8da 100644 (file)
@@ -111,7 +111,8 @@ static char sb_size_class_map[] = {
        (SB_SCLASS_FIRST_REGULAR + lengthof(sb_size_classes))
 
 /* Helper functions. */
-static char *sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize);
+static char *sb_alloc_from_heap(char *base, sb_heap *heap,
+                                  Size obsize, Size nmax);
 static char *sb_alloc_guts(char *base, sb_region *region,
                          sb_allocator *a, int size_class);
 static void sb_init_span(char *base, sb_span *span, sb_heap *heap,
@@ -295,11 +296,10 @@ sb_alloc(sb_allocator *a, Size size, int flags)
  * superblock that would otherwise become empty soon.
  */
 static char *
-sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize)
+sb_alloc_from_heap(char *base, sb_heap *heap, Size obsize, Size nmax)
 {
        sb_span *active_sb;
        Size    fclass;
-       Size    nmax = (FPM_PAGE_SIZE * SB_PAGES_PER_SUPERBLOCK) / obsize;
        char   *superblock;
        char   *result;
 
@@ -412,15 +412,20 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
        LWLock *lock = relptr_access(base, heap->lock);
        char *result = NULL;
        Size    obsize;
+       Size    nmax;
 
        /* Work out object size. */
        if (size_class == 0)
+       {
                obsize = sizeof(sb_span);
+               nmax = FPM_PAGE_SIZE / obsize;
+       }
        else
        {
                Assert(size_class >= SB_SCLASS_FIRST_REGULAR);
                Assert(size_class < SB_NUM_SIZE_CLASSES);
                obsize = sb_size_classes[size_class - SB_SCLASS_FIRST_REGULAR];
+               nmax = (FPM_PAGE_SIZE * SB_PAGES_PER_SUPERBLOCK) / obsize;
        }
 
        /* If locking is in use, acquire the lock. */
@@ -428,7 +433,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
                LWLockAcquire(lock, LW_EXCLUSIVE);
 
        /* Attempt to allocate from the heap. */
-       result = sb_alloc_from_heap(base, heap, obsize);
+       result = sb_alloc_from_heap(base, heap, obsize, nmax);
 
        /*
         * If there's no space in the current heap, but there are multiple heaps
@@ -441,7 +446,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
                        sb_try_to_steal_superblock(base, a, heapproc, size_class))
                {
                        /* The superblock we stole shouldn't full, so this should work. */
-                       result = sb_alloc_from_heap(base, heap, obsize);
+                       result = sb_alloc_from_heap(base, heap, obsize, nmax);
                        Assert(result != NULL);
                }
                else
@@ -500,7 +505,7 @@ sb_alloc_guts(char *base, sb_region *region, sb_allocator *a, int size_class)
                                span->ninitialized = span->nused = 1;
 
                        /* This should work now. */
-                       result = sb_alloc_from_heap(base, heap, obsize);
+                       result = sb_alloc_from_heap(base, heap, obsize, nmax);
                        Assert(result != NULL);
                }
        }
index b695bcf8c0b3efdce69987c715c3e3770eb9e582..b98d56c2a62d654944cf17c7d62a029a779e037c 100644 (file)
@@ -68,9 +68,8 @@ typedef struct
 /* Lookup data for an entire 64-bit address space. */
 typedef struct
 {
-       uint32  ncached;
        uint32  cache_key[SB_LOOKUP_ROOT_CACHE_SIZE];
-       sb_lookup_l2 *cache_value[SB_LOOKUP_L2_ENTRIES];
+       sb_lookup_l2 *cache_value[SB_LOOKUP_ROOT_CACHE_SIZE];
        sb_lookup_l2 **l2;
 } sb_lookup_root;
 
@@ -507,12 +506,12 @@ sb_find_leaf(Size highbits, bool insert)
        rootbits = (highbits >> SB_LOOKUP_L2_BITS) & (SB_LOOKUP_ROOT_ENTRIES - 1);
 
        /* Check for L2 entry in toplevel cache. */
-       for (i = 0; i < lookup_root.ncached; ++i)
+       for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
        {
-               if (lookup_root.cache_key[i] == highbits)
-                       l2 = lookup_root.cache_value[i];
-               else if (lookup_root.cache_value[i] == NULL)
+               if (lookup_root.cache_value[i] == NULL)
                        unused = i;
+               else if (lookup_root.cache_key[i] == highbits)
+                       l2 = lookup_root.cache_value[i];
        }
 
        /* If no hit, check the full L2 loookup table, if it's been initialized. */
@@ -558,7 +557,7 @@ sb_find_leaf(Size highbits, bool insert)
                                free(l2);
                                return NULL;
                        }
-                       for (i = 0; i < lookup_root.ncached; ++i)
+                       for (i = 0; i < SB_LOOKUP_ROOT_CACHE_SIZE; ++i)
                                lookup_root.l2[lookup_root.cache_key[i]] =
                                        lookup_root.cache_value[i];
                }