More hacking.
authorRobert Haas <rhaas@postgresql.org>
Thu, 20 Feb 2014 01:47:13 +0000 (20:47 -0500)
committerRobert Haas <rhaas@postgresql.org>
Thu, 20 Feb 2014 01:47:13 +0000 (20:47 -0500)
src/backend/utils/mmgr/freepage.c

index 2e4dda83fcc284d1724017da8a389052d9890b17..eeb2ab23a8632fc922987ef1630e9780a7e01fe8 100644 (file)
@@ -158,6 +158,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
        if (lock != NULL)
                LWLockAcquire(lock, LW_EXCLUSIVE);
        result = FreePageManagerGetInternal(fpm, npages, first_page);
+       /* XXX. Try to softly PutInternal recycled pages? */
        if (lock != NULL)
                LWLockRelease(lock);
 
@@ -177,22 +178,9 @@ FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
        if (lock != NULL)
                LWLockAcquire(lock, LW_EXCLUSIVE);
 
-       /*
-        * As a special case, we store the very first range in the FreePageManager
-        * itself, so that a request for the entire number of pages will succeed.
-        * Otherwise, we must build or update a btree.
-        */
-       if (fpm->btree_depth == 0 && fpm->singleton_npages == 0)
-       {
-               fpm->singleton_first_page = first_page;
-               fpm->singleton_npages = npages;
-       }
-       else if (fpm->btree_depth == 0)
-       {
-               /* XXX Create the btree. */
-       }
-       else
-               FreePageManagerPutInternal(fpm, first_page, npages, false);
+       FreePageManagerPutInternal(fpm, first_page, npages, false);
+
+       /* XXX. Try to softly PutInternal recycled pages? */
 
        /* Release lock (if there is one). */
        if (lock != NULL)
@@ -368,31 +356,47 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
                relptr_copy(next->prev, victim->prev);
 
        /*
-        * If the span we found is exactly the right size, remove it from the
-        * btree completely.  Otherwise, adjust the btree entry to reflect the
-        * still-unallocated portion of the span, and put that portion on the
-        * appropriate free list.
+        * If we haven't initialized the btree yet, the victim must be the single
+        * span stored within the FreePageManager itself.  Otherwise, we need
+        * to update the btree.
         */
-       FreePageBtreeSearch(fpm, victim_page, &result);
-       Assert(result.page_exact != NULL);
-       if (victim->npages == npages)
-               FreePageBtreeRemove(fpm, result.page_exact, result.index_exact);
+       if (relptr_is_null(fpm->btree_root))
+       {
+               Assert(fpm_pointer_to_page(base, victim) == fpm->singleton_first_page);
+               Assert(victim->npages = fpm->singleton_npages);
+               Assert(victim->npages >= npages);
+               fpm->singleton_first_page += npages;
+               fpm->singleton_npages -= npages;
+       }
        else
        {
-               FreePageBtreeLeafKey *key;
-
-               /* Adjust btree to reflect remaining pages. */
-               Assert(victim->npages > npages);
-               key = &result.page_exact->u.leaf_key[result.index_exact];
-               Assert(key->npages == victim->npages);
-               key->first_page += npages;
-               key->npages -= npages;
-               if (result.index_exact == 0)
-                       FreePageBtreeAdjustAncestorKeys(fpm, result.page_exact);
-
-               /* Put the unallocated pages back on the appropriate free list. */
-               FreePagePushSpanLeader(fpm, victim_page + npages,
-                                                          victim->npages - npages);
+               /*
+                * If the span we found is exactly the right size, remove it from the
+                * btree completely.  Otherwise, adjust the btree entry to reflect the
+                * still-unallocated portion of the span, and put that portion on the
+                * appropriate free list.
+                */
+               FreePageBtreeSearch(fpm, victim_page, &result);
+               Assert(result.page_exact != NULL);
+               if (victim->npages == npages)
+                       FreePageBtreeRemove(fpm, result.page_exact, result.index_exact);
+               else
+               {
+                       FreePageBtreeLeafKey *key;
+
+                       /* Adjust btree to reflect remaining pages. */
+                       Assert(victim->npages > npages);
+                       key = &result.page_exact->u.leaf_key[result.index_exact];
+                       Assert(key->npages == victim->npages);
+                       key->first_page += npages;
+                       key->npages -= npages;
+                       if (result.index_exact == 0)
+                               FreePageBtreeAdjustAncestorKeys(fpm, result.page_exact);
+
+                       /* Put the unallocated pages back on the appropriate free list. */
+                       FreePagePushSpanLeader(fpm, victim_page + npages,
+                                                                  victim->npages - npages);
+               }
        }
 
        /* Return results to caller. */
@@ -408,8 +412,10 @@ FreePageBtreeGetRecycled(FreePageManager *fpm)
 {
        char *base = fpm_segment_base(fpm);
        FreePageSpanLeader *victim = relptr_access(base, fpm->btree_recycle);
-       FreePageSpanLeader *newhead = relptr_access(base, victim->next);
+       FreePageSpanLeader *newhead;
 
+       Assert(victim != NULL);
+       newhead = relptr_access(base, victim->next);
        relptr_copy(newhead->prev, victim->prev);
        relptr_store(base, fpm->btree_recycle, newhead);
        Assert(fpm_pointer_is_page_aligned(base, victim));
@@ -782,6 +788,50 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,
        FreePageBtree *btp;
        Size    index;
 
+       /*
+        * As a special case, we store the very first range in the FreePageManager
+        * itself, so that a request for the entire number of pages will succeed.
+        * Otherwise, we must build or update a btree.
+        */
+       if (fpm->btree_depth == 0 && fpm->singleton_npages == 0)
+       {
+               fpm->singleton_first_page = first_page;
+               fpm->singleton_npages = npages;
+               return true;
+       }
+
+       /*
+        * When we see the second range, we need to initialize the btree for
+        * real.
+        */
+       if (fpm->btree_depth == 0)
+       {
+               char *base = fpm_segment_base(fpm);
+               Size    root_page;
+               FreePageBtree *root;
+
+               if (!relptr_is_null(fpm->btree_recycle))
+                       root = FreePageBtreeGetRecycled(fpm);
+               else if (FreePageManagerGetInternal(fpm, 1, &root_page))
+                       root = (FreePageBtree *) fpm_page_to_pointer(base, root_page);
+               else
+               {
+                       /* We'd better be able to get a page from the existing range. */
+                       elog(FATAL, "free page manager btree is corrupt");
+               }
+
+               /* Create the btree and move the preexisting range into it. */
+               root->hdr.magic = FREE_PAGE_LEAF_MAGIC;
+               root->hdr.nused = 1;
+               relptr_store(base, root->hdr.parent, (FreePageBtree *) NULL);
+               root->u.leaf_key[0].first_page = fpm->singleton_first_page;
+               root->u.leaf_key[0].npages = fpm->singleton_npages;
+               fpm->singleton_first_page = 0;
+               fpm->singleton_npages = 0;
+
+               /* Fall through to insert the new key. */
+       }
+
        /* Search the btree. */
        FreePageBtreeSearch(fpm, first_page, &result);
        Assert(result.page_exact == NULL);              /* can't already be there */