From 97e6ad76f4d9604c3d52018b55d7ba170a41c4fd Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Thu, 16 Jan 2014 14:48:46 -0500 Subject: [PATCH] Hack, hack. --- src/backend/utils/mmgr/mspan.c | 310 ++++++++++++++++++++++++--------- 1 file changed, 231 insertions(+), 79 deletions(-) diff --git a/src/backend/utils/mmgr/mspan.c b/src/backend/utils/mmgr/mspan.c index 3f289cd0c3..2c9c3c8ada 100644 --- a/src/backend/utils/mmgr/mspan.c +++ b/src/backend/utils/mmgr/mspan.c @@ -127,6 +127,8 @@ struct mspan uint16 firstfree; /* First object on free list. */ }; +#define MSPAN_FIRSTFREE_NONE ((uint16) -1) + /* * Management information for an allocation context. */ @@ -138,10 +140,8 @@ struct mspan_context }; /* Helper functions. */ -static int mspan_adjust_superblock_fullness(char *base, mspan_context *cxt, - mspan *superblock, - int current_fullness_class); static char *mspan_allocate_from_superblock(char *base, mspan *superblock); +static mspan *mspan_allocate_span_descriptor(char *base, mspan_manager *mgr); static mspan_context *mspan_allocate_context_descriptor(char *base, mspan_manager *mgr); static void mspan_destroy_span(char *base, mspan *span); @@ -149,7 +149,10 @@ static void mspan_ensure_active_superblock(char *base, mspan_context *cxt, uint16 size_class); static mspan *mspan_find_free_span(char *base, mspan_manager *mgr, Size minpages, Size maxpages); -static void mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span); +static void mspan_link_span_to_context(char *base, mspan_context *cxt, + mspan *span); +static void mspan_link_span_to_manager(char *base, mspan_manager *mgr, + mspan *span); static void mspan_release_span(char *base, mspan_manager *mgr, mspan *span); static void mspan_unlink_span(char *base, mspan *span); static void mspan_update_page_map(char *base, mspan_manager *mgr, @@ -298,13 +301,12 @@ mspan_alloc(dsm_segment *seg, mspan_context *cxt, Size size, int flags) * we should instead destroy it and move back the boundary. */ } - else if (base != NULL) - { - /* XXX. Allocate from the boundary. */ - } else { - /* XXX. Allocate a new segment via malloc. */ + /* + * XXX. We need more core. Allocate either from the boundary or + * via malloc. + */ /* * XXX. How exactly are we going to give the segments we malloc * back to the OS? How are we even going to know where they are? @@ -367,72 +369,21 @@ mspan_alloc(dsm_segment *seg, mspan_context *cxt, Size size, int flags) mspan *superblock; void *result; + /* Find active superblock. */ if (relptr_is_null(cxt->small_allocation[aidx])) mspan_ensure_active_superblock(base, cxt, size_class); superblock = relptr_access(base, cxt->small_allocation[aidx]); Assert(superblock->span_type == size_class); + + /* Allocate from superblock if possible. */ result = mspan_allocate_from_superblock(base, superblock); if (result != NULL) return result; - mspan_adjust_superblock_fullness(base, cxt, superblock, 1); - } -} - -/* - * Determine whether a superblock is filed under the appropriate fullness - * class, and if not, move it to the right place. The return value is the - * new fullness class for the superblock. - */ -static int -mspan_adjust_superblock_fullness(char *base, mspan_context *cxt, - mspan *superblock, int current_fullness_class) -{ - uint16 total; - int fullness_class; - - Assert(superblock->span_type <= mspan_size_classes[superblock->span_type]); - total = MSPAN_SUPERBLOCK_SIZE / mspan_size_classes[superblock->span_type]; - Assert(superblock->nused <= total); - if (superblock->nused == 0) - fullness_class = 0; - else - { - fullness_class = ((superblock->nused * - MSPAN_NUMBER_OF_FULLNESS_CLASSES) - 1) / total; - Assert(fullness_class < MSPAN_NUMBER_OF_FULLNESS_CLASSES); - } - if (fullness_class != current_fullness_class) - { - int aidx; - mspan *head; - - /* It's on the wrong list, so unlink it from where it is now... */ + /* Move active superblock to proper fullness class. */ mspan_unlink_span(base, superblock); - - /* ...and put it where it's supposed to be. */ - aidx = superblock->span_type * MSPAN_NUMBER_OF_FULLNESS_CLASSES - + fullness_class; - head = relptr_access(base, cxt->small_allocation[aidx]); - if (head == NULL) - { - relptr_store(base, superblock->nextspan, superblock); - relptr_store(base, superblock->prevspan, superblock); - } - else - { - mspan *tail; - - tail = relptr_access(base, head->prevspan); - relptr_store(base, superblock->nextspan, head); - superblock->prevspan.relptr_off = head->prevspan.relptr_off; - relptr_store(base, head->prevspan, superblock); - relptr_store(base, tail->nextspan, superblock); - } - relptr_store(base, cxt->small_allocation[aidx], superblock); + mspan_link_span_to_context(base, cxt, superblock); } - - return fullness_class; } /* @@ -504,6 +455,7 @@ mspan_allocate_context_descriptor(char *base, mspan_manager *mgr) * appropriate free list. Also adjust the page map entries as * appropriate. */ + Assert(span->span_type == MSPAN_TYPE_FREE); mspan_update_page_map(base, mgr, pageno, 1, 0); mspan_unlink_span(base, span); if (span->npages == 1) @@ -512,7 +464,7 @@ mspan_allocate_context_descriptor(char *base, mspan_manager *mgr) { ++span->first_page; --span->npages; - mspan_recycle_span(base, mgr, span); + mspan_link_span_to_manager(base, mgr, span); /* * The last-page entry for this span is still OK, so no need to @@ -549,6 +501,52 @@ mspan_allocate_context_descriptor(char *base, mspan_manager *mgr) return cxt; } +/* + * Allocate new space for a new span descriptor. + */ +static mspan * +mspan_allocate_span_descriptor(char *base, mspan_manager *mgr) +{ + if (!relptr_is_null(mgr->spansuperblocks)) + { + mspan *spansuperblock = relptr_access(base, mgr->spansuperblocks); + char *result; + + /* Try to allocate from the first span-of-spans. */ + Assert(spansuperblock->span_type == MSPAN_TYPE_SPAN_OF_SPANS); + result = mspan_allocate_from_superblock(base, spansuperblock); + if (result != NULL) + return (mspan *) result; + + /* Walk the list looking for a span-of-spans that isn't full. */ + for (;;) + { + spansuperblock = relptr_access(base, spansuperblock->nextspan); + if (spansuperblock == NULL) + break; + Assert(spansuperblock->span_type == MSPAN_TYPE_SPAN_OF_SPANS); + result = mspan_allocate_from_superblock(base, spansuperblock); + if (result != NULL) + { + /* + * Move the span from which we allocate to head of list in + * the hope of speeding up future searches. + */ + mspan_unlink_span(base, spansuperblock); + mspan_link_span_to_manager(base, mgr, spansuperblock); + + /* Return a pointer to the space we allocated. */ + return (mspan *) result; + } + } + } + + /* + * XXX. We need to create a new span of spans, either from an available + * span or by allocating from the boundary or OS. + */ +} + /* * Attempt to allocate an object from a superblock. */ @@ -561,6 +559,68 @@ mspan_allocate_from_superblock(char *base, mspan *superblock) return NULL; } +/* + * Allocate a span. + * + * We can do this either by finding a free span which is already suitable + * or which can be split, or by allocating space from the boundary or OS and + * creating a span descriptor to describe it. + * + * If we're allocating storage for a span-of-spans, then cxt will be NULL; + * otherwise, it's the context with which the new span should be associated. + * + * If we're allocating storage for a large object, then pages should be the + * minimum number of pages required to hold the object; otherwise, it's + * ignored. + */ +static mspan * +mspan_allocate_span(char *base, mspan_manager *mgr, mspan_context *cxt, + uint16 span_type, Size pages) +{ + mspan *span; + + /* + * Search for an existing span. If we're allocating space for a large + * object, the span has to be large enough to hold the object; otherwise, + * we want something large enough to contain a superblock. + */ + if (span_type != MSPAN_TYPE_LARGE) + pages = MSPAN_PAGES_PER_SUPERBLOCK; + span = mspan_find_free_span(base, mgr, pages, 0); + if (span != NULL) + { + /* Remove the span from the free list. */ + mspan_unlink_span(base, span); + + /* Initialize the span for use. */ + span->span_type = span_type; + span->ninitialized = 0; + span->nused = 0; + span->firstfree = MSPAN_FIRSTFREE_NONE; + + /* Put the span on the list that ought to contain it. */ + if (span_type == MSPAN_TYPE_SPAN_OF_SPANS) + { + Assert(cxt == NULL); + mspan_link_span_to_manager(base, mgr, span); + } + else + { + Assert(cxt != NULL); + mspan_link_span_to_context(base, cxt, span); + } + + /* XXX. Update page map entries. */ + + if (span->npages > pages) + { + /* XXX. Split the span. */ + } + } + + /* XXX. Allocate storage for a new span. */ +} + /* * Deallocate a span descriptor. */ @@ -583,7 +643,8 @@ mspan_ensure_active_superblock(char *base, mspan_context *cxt, uint16 size_class) { /* - * XXX. Implementation needed. + * XXX. Search for an existing superblock that we can designate as the + * active superblock. */ } @@ -632,25 +693,113 @@ mspan_find_free_span(char *base, mspan_manager *mgr, Size minpages, } /* - * Put a span on the appropriate free list. + * Add a span to a linked list of spans. + * + * All the linked lists we use in this module are circularly-linked lists + * of relative pointers. The head of each list points to the fist element + * of the list. This function inserts a new element at the head of the list + * specified by ptr. */ static void -mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span) +mspan_link_span_internal(char *base, Size *ptr, mspan *span) { - Size fidx; - mspan *head; + relptr(mspan) rptr; #ifdef USE_ASSERT_CHECKING Assert(relptr_is_null(span->nextspan)); Assert(relptr_is_null(span->prevspan)); #endif - fidx = Min(span->npages, MSPAN_NUM_FREE_LISTS) - 1; - head = relptr_access(base, mgr->freelist[fidx]); - span->nextspan.relptr_off = mgr->freelist[fidx].relptr_off; - span->prevspan.relptr_off = head->prevspan.relptr_off; - relptr_store(base, head->prevspan, span); - relptr_store(base, mgr->freelist[fidx], span); + if (*ptr == 0) + { + relptr_store(base, span->nextspan, span); + relptr_store(base, span->prevspan, span); + } + else + { + mspan *head = (mspan *) (base + *ptr); + mspan *tail = relptr_access(base, head->prevspan); + + span->nextspan.relptr_off = *ptr; + span->prevspan.relptr_off = head->prevspan.relptr_off; + relptr_store(base, head->prevspan, span); + relptr_store(base, tail->nextspan, span); + } + relptr_store(base, rptr, span); + *ptr = rptr.relptr_off; +} + +/* + * Add the span to one of the linked lists within an mspan_context. + * + * The mspan_context maintains lists of allocated superblocks and large + * objects. To put an existing span object on the appropriate list, call + * this function. Free spans and spans-of-spans are associated with the + * manager, not the context; call mspan_link_span_to_manager for those. + */ +static void +mspan_link_span_to_context(char *base, mspan_context *cxt, mspan *span) +{ + Size *ptr; + + if (span->span_type == MSPAN_TYPE_LARGE) + ptr = &cxt->large_allocation.relptr_off; + else + { + uint16 total; + int fullness_class; + int aidx; + + Assert(span->span_type < lengthof(mspan_size_classes)); + total = MSPAN_SUPERBLOCK_SIZE / mspan_size_classes[span->span_type]; + Assert(span->nused <= total); + if (span->nused == 0) + fullness_class = 0; + else + { + fullness_class = ((span->nused * + MSPAN_NUMBER_OF_FULLNESS_CLASSES) - 1) / total; + Assert(fullness_class < MSPAN_NUMBER_OF_FULLNESS_CLASSES); + } + aidx = span->span_type * MSPAN_NUMBER_OF_FULLNESS_CLASSES + + fullness_class; + ptr = &cxt->small_allocation[aidx].relptr_off; + } + + mspan_link_span_internal(base, ptr, span); +} + +/* + * Add the span to one of the linked lists within an mspan_manager. + * + * The mspan_manager maintains a list of spans-of-spans, and a bunch of + * free lists. To put an existing span object on the appropriate list, + * call this function. Allocated superblocks and large objects are associated + * with the context, not the manager; call mspan_link_span_to_context for + * those. + */ +static void +mspan_link_span_to_manager(char *base, mspan_manager *mgr, mspan *span) +{ + Size *ptr; + +#ifdef USE_ASSERT_CHECKING + Assert(relptr_is_null(span->nextspan)); + Assert(relptr_is_null(span->prevspan)); +#endif + + if (span->span_type == MSPAN_TYPE_SPAN_OF_SPANS) + ptr = &mgr->spansuperblocks.relptr_off; + else + { + Size fidx; + + Assert(span->span_type == MSPAN_TYPE_FREE); + fidx = Min(span->npages, MSPAN_NUM_FREE_LISTS) - 1; + ptr = &mgr->freelist[fidx].relptr_off; + } + + mspan_link_span_internal(base, ptr, span); } /* @@ -660,6 +809,8 @@ mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span) static void mspan_release_span(char *base, mspan_manager *mgr, mspan *span) { + Assert(span->span_type != MSPAN_TYPE_FREE); + /* Remove this span from the list which contains it. */ mspan_unlink_span(base, span); @@ -729,8 +880,9 @@ mspan_release_span(char *base, mspan_manager *mgr, mspan *span) mspan_update_page_map(base, mgr, span->first_page + span->npages - 1, 1, ((char *) span) - base); - /* Put the span on the appropriate free list. */ - mspan_recycle_span(base, mgr, span); + /* Mark the span as free and put it on the appropriate free list. */ + span->span_type = MSPAN_TYPE_FREE; + mspan_link_span_to_manager(base, mgr, span); } /* -- 2.39.5