static void FreePageBtreeAdjustAncestorKeys(FreePageManager *fpm,
FreePageBtree *btp);
static Size FreePageBtreeFirstKey(FreePageBtree *btp);
+static FreePageBtree *FreePageBtreeGetRecycled(FreePageManager *fpm);
static void FreePageBtreeInsertInternal(char *base, FreePageBtree *btp,
Size index, Size first_page, FreePageBtree *child);
static void FreePageBtreeInsertLeaf(FreePageBtree *btp, Size index,
FreePageBtree *btp);
static bool FreePageManagerGetInternal(FreePageManager *fpm, Size npages,
Size *first_page);
-static bool FreePageManagerPutInternal(FreePageManager *fpm, Size first_page,
- Size npages, bool soft);
+static void FreePagePopSpanLeader(FreePageManager *fpm, Size pageno);
static void FreePagePushSpanLeader(FreePageManager *fpm, Size first_page,
Size npages);
+static bool FreePageManagerPutInternal(FreePageManager *fpm, Size first_page,
+ Size npages, bool soft);
/*
* Initialize a new, empty free page manager.
}
}
-/*
- * Like FreePageManagerGet, this function allocates a run of pages of the
- * given length from the free page manager, but without taking and releasing
- * the lock. The caller is responsible for making sure the lock is already
- * held.
- */
-static bool
-FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
-{
- char *base = fpm_segment_base(fpm);
- FreePageSpanLeader *victim = NULL;
- FreePageSpanLeader *prev;
- FreePageSpanLeader *next;
- FreePageBtreeSearchResult result;
- Size victim_page = 0; /* placate compiler */
- Size f;
-
- /*
- * Search for a free span.
- *
- * Right now, we use a simple best-fit policy here, but it's possible for
- * this to result in memory fragmentation if we're repeatedly asked to
- * allocate chunks just a little smaller than what we have available.
- * Hopefully, this is unlikely, because we expect most requests to be
- * single pages (for the bootstrap allocator) or superblock-sized chunks
- * (for the superblock allocator, and for address space map memory),
- * but no policy can be optimal under all circumstances unless it has
- * knowledge of future allocation patterns.
- */
- for (f = Max(npages, FPM_NUM_FREELISTS) - 1; f < FPM_NUM_FREELISTS; ++f)
- {
- /* Skip empty freelists. */
- if (relptr_is_null(fpm->freelist[f]))
- continue;
-
- /*
- * All of the freelists except the last one contain only items of a
- * single size, so we just take the first one. But the final free
- * list contains everything too big for any of the other lists, so
- * we need to search the list.
- */
- if (f < FPM_NUM_FREELISTS - 1)
- victim = relptr_access(base, fpm->freelist[f]);
- else
- {
- FreePageSpanLeader *candidate;
-
- candidate = relptr_access(base, fpm->freelist[f]);
- do
- {
- if (candidate->npages >= npages && (victim == NULL ||
- victim->npages > candidate->npages))
- {
- victim = candidate;
- if (victim->npages == npages)
- break;
- }
- candidate = relptr_access(base, candidate->next);
- } while (candidate != NULL);
- }
- break;
- }
-
- /* If we didn't find an allocatable span, return failure. */
- if (victim == NULL)
- return false;
-
- /* Remove span from free list. */
- prev = relptr_access(base, victim->prev);
- next = relptr_access(base, victim->next);
- if (prev != NULL)
- relptr_copy(prev->next, victim->next);
- else
- relptr_copy(fpm->freelist[f], victim->next);
- if (next != NULL)
- relptr_copy(next->prev, victim->prev);
-
- /*
- * If we haven't initialized the btree yet, the victim must be the single
- * span stored within the FreePageManager itself. Otherwise, we need
- * to update the btree.
- */
- if (relptr_is_null(fpm->btree_root))
- {
- Assert(fpm_pointer_to_page(base, victim) == fpm->singleton_first_page);
- Assert(victim->npages = fpm->singleton_npages);
- Assert(victim->npages >= npages);
- fpm->singleton_first_page += npages;
- fpm->singleton_npages -= npages;
- }
- else
- {
- /*
- * If the span we found is exactly the right size, remove it from the
- * btree completely. Otherwise, adjust the btree entry to reflect the
- * still-unallocated portion of the span, and put that portion on the
- * appropriate free list.
- */
- FreePageBtreeSearch(fpm, victim_page, &result);
- Assert(result.page_exact != NULL);
- if (victim->npages == npages)
- FreePageBtreeRemove(fpm, result.page_exact, result.index_exact);
- else
- {
- FreePageBtreeLeafKey *key;
-
- /* Adjust btree to reflect remaining pages. */
- Assert(victim->npages > npages);
- key = &result.page_exact->u.leaf_key[result.index_exact];
- Assert(key->npages == victim->npages);
- key->first_page += npages;
- key->npages -= npages;
- if (result.index_exact == 0)
- FreePageBtreeAdjustAncestorKeys(fpm, result.page_exact);
-
- /* Put the unallocated pages back on the appropriate free list. */
- FreePagePushSpanLeader(fpm, victim_page + npages,
- victim->npages - npages);
- }
- }
-
- /* Return results to caller. */
- *first_page = fpm_pointer_to_page(base, victim);
- return true;
-}
-
/*
* Get the first key on a btree page.
*/
return (FreePageBtree *) victim;
}
-/*
- * Put a page on the btree recycle list.
- */
-static void
-FreePageBtreeRecycle(FreePageManager *fpm, Size pageno)
-{
- char *base = fpm_segment_base(fpm);
- FreePageSpanLeader *head = relptr_access(base, fpm->btree_recycle);
- FreePageSpanLeader *span;
-
- span = (FreePageSpanLeader *) fpm_page_to_pointer(base, pageno);
- span->magic = FREE_PAGE_SPAN_LEADER_MAGIC;
- span->npages = 1;
- relptr_store(base, span->next, head);
- relptr_store(base, span->prev, (FreePageSpanLeader *) NULL);
- if (head != NULL)
- relptr_store(base, head->prev, span);
- relptr_store(base, fpm->btree_recycle, span);
-}
-
/*
* Insert an item into an internal page.
*/
++btp->hdr.nused;
}
+/*
+ * Put a page on the btree recycle list.
+ */
+static void
+FreePageBtreeRecycle(FreePageManager *fpm, Size pageno)
+{
+ char *base = fpm_segment_base(fpm);
+ FreePageSpanLeader *head = relptr_access(base, fpm->btree_recycle);
+ FreePageSpanLeader *span;
+
+ span = (FreePageSpanLeader *) fpm_page_to_pointer(base, pageno);
+ span->magic = FREE_PAGE_SPAN_LEADER_MAGIC;
+ span->npages = 1;
+ relptr_store(base, span->next, head);
+ relptr_store(base, span->prev, (FreePageSpanLeader *) NULL);
+ if (head != NULL)
+ relptr_store(base, head->prev, span);
+ relptr_store(base, fpm->btree_recycle, span);
+}
+
/*
* Remove an item from the btree at the given position on the given page.
*/
return newsibling;
}
+/*
+ * Like FreePageManagerGet, this function allocates a run of pages of the
+ * given length from the free page manager, but without taking and releasing
+ * the lock. The caller is responsible for making sure the lock is already
+ * held.
+ */
+static bool
+FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page)
+{
+ char *base = fpm_segment_base(fpm);
+ FreePageSpanLeader *victim = NULL;
+ FreePageSpanLeader *prev;
+ FreePageSpanLeader *next;
+ FreePageBtreeSearchResult result;
+ Size victim_page = 0; /* placate compiler */
+ Size f;
+
+ /*
+ * Search for a free span.
+ *
+ * Right now, we use a simple best-fit policy here, but it's possible for
+ * this to result in memory fragmentation if we're repeatedly asked to
+ * allocate chunks just a little smaller than what we have available.
+ * Hopefully, this is unlikely, because we expect most requests to be
+ * single pages (for the bootstrap allocator) or superblock-sized chunks
+ * (for the superblock allocator, and for address space map memory),
+ * but no policy can be optimal under all circumstances unless it has
+ * knowledge of future allocation patterns.
+ */
+ for (f = Max(npages, FPM_NUM_FREELISTS) - 1; f < FPM_NUM_FREELISTS; ++f)
+ {
+ /* Skip empty freelists. */
+ if (relptr_is_null(fpm->freelist[f]))
+ continue;
+
+ /*
+ * All of the freelists except the last one contain only items of a
+ * single size, so we just take the first one. But the final free
+ * list contains everything too big for any of the other lists, so
+ * we need to search the list.
+ */
+ if (f < FPM_NUM_FREELISTS - 1)
+ victim = relptr_access(base, fpm->freelist[f]);
+ else
+ {
+ FreePageSpanLeader *candidate;
+
+ candidate = relptr_access(base, fpm->freelist[f]);
+ do
+ {
+ if (candidate->npages >= npages && (victim == NULL ||
+ victim->npages > candidate->npages))
+ {
+ victim = candidate;
+ if (victim->npages == npages)
+ break;
+ }
+ candidate = relptr_access(base, candidate->next);
+ } while (candidate != NULL);
+ }
+ break;
+ }
+
+ /* If we didn't find an allocatable span, return failure. */
+ if (victim == NULL)
+ return false;
+
+ /* Remove span from free list. */
+ prev = relptr_access(base, victim->prev);
+ next = relptr_access(base, victim->next);
+ if (prev != NULL)
+ relptr_copy(prev->next, victim->next);
+ else
+ relptr_copy(fpm->freelist[f], victim->next);
+ if (next != NULL)
+ relptr_copy(next->prev, victim->prev);
+
+ /*
+ * If we haven't initialized the btree yet, the victim must be the single
+ * span stored within the FreePageManager itself. Otherwise, we need
+ * to update the btree.
+ */
+ if (relptr_is_null(fpm->btree_root))
+ {
+ Assert(fpm_pointer_to_page(base, victim) == fpm->singleton_first_page);
+ Assert(victim->npages = fpm->singleton_npages);
+ Assert(victim->npages >= npages);
+ fpm->singleton_first_page += npages;
+ fpm->singleton_npages -= npages;
+ }
+ else
+ {
+ /*
+ * If the span we found is exactly the right size, remove it from the
+ * btree completely. Otherwise, adjust the btree entry to reflect the
+ * still-unallocated portion of the span, and put that portion on the
+ * appropriate free list.
+ */
+ FreePageBtreeSearch(fpm, victim_page, &result);
+ Assert(result.page_exact != NULL);
+ if (victim->npages == npages)
+ FreePageBtreeRemove(fpm, result.page_exact, result.index_exact);
+ else
+ {
+ FreePageBtreeLeafKey *key;
+
+ /* Adjust btree to reflect remaining pages. */
+ Assert(victim->npages > npages);
+ key = &result.page_exact->u.leaf_key[result.index_exact];
+ Assert(key->npages == victim->npages);
+ key->first_page += npages;
+ key->npages -= npages;
+ if (result.index_exact == 0)
+ FreePageBtreeAdjustAncestorKeys(fpm, result.page_exact);
+
+ /* Put the unallocated pages back on the appropriate free list. */
+ FreePagePushSpanLeader(fpm, victim_page + npages,
+ victim->npages - npages);
+ }
+ }
+
+ /* Return results to caller. */
+ *first_page = fpm_pointer_to_page(base, victim);
+ return true;
+}
+
/*
* Remove a FreePageSpanLeader from the linked-list that contains it, either
* because we're changing the size of the span, or because we're allocating it.
* Put a range of pages into the btree and freelists, consolidating it with
* existing free spans just before and/or after it. If 'soft' is true,
* only perform the insertion if it can be done without allocating new btree
- * pages; if false, do it always. Returns true if the insertion was performed.
+ * pages; if false, do it always. Returns true if the insertion was performed;
+ * false if the soft flag caused it to be skipped.
*/
static bool
FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages,