From 04c1d945d765bdaf4c4092bb7f6eb84a96d6a042 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Thu, 12 Dec 2013 15:20:07 -0500 Subject: [PATCH] Hack, hack. --- src/backend/utils/mmgr/mspan.c | 114 ++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 3 deletions(-) diff --git a/src/backend/utils/mmgr/mspan.c b/src/backend/utils/mmgr/mspan.c index bc706d9d37..f7df246ba1 100644 --- a/src/backend/utils/mmgr/mspan.c +++ b/src/backend/utils/mmgr/mspan.c @@ -73,6 +73,7 @@ struct mspan */ struct mspan_context { + relptr(mspan_manager) manager; relptr(mspan) large_allocation; relptr(mspan) small_allocation[MSPAN_SMALL_ALLOCATION_LISTS]; }; @@ -83,8 +84,8 @@ static mspan_context *mspan_allocate_context_descriptor(char *base, static void mspan_destroy_span(char *base, mspan *span); static mspan *mspan_find_free_span(char *base, mspan_manager *mgr, Size minpages, Size maxpages); -static void mspan_recycle_span(char *base, mspan_manager *mgr, - mspan *span); +static void mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span); +static void mspan_release_span(char *base, mspan_manager *mgr, mspan *span); static void mspan_unlink_span(char *base, mspan *span); static void mspan_update_page_map(char *base, mspan_manager *mgr, Size first_page, Size npages, Size value); @@ -172,6 +173,40 @@ mspan_context_create(dsm_segment *seg, mspan_manager *mgr) return cxt; } +/* + * Destroy an allocation context within an address space. + * + * This releases all storage associated with the context. + */ +void +mspan_context_destroy(dsm_segment *seg, mspan_context *cxt) +{ + char *base = (seg != NULL ? dsm_segment_address(seg) : NULL); + mspan_manager *mgr = relptr_access(base, cxt->manager); + int i; + + /* Release large allocations one at a time. */ + while (!relptr_is_null(cxt->large_allocation)) + { + mspan *span = relptr_access(base, cxt->large_allocation); + mspan_release_span(base, mgr, span); + } + + /* Release small allocations one superblock at a time. */ + for (i = 0; i < MSPAN_SMALL_ALLOCATION_LISTS; ++i) + { + while (!relptr_is_null(cxt->small_allocation[i])) + { + mspan *span = relptr_access(base, cxt->small_allocation[i]); + mspan_release_span(base, mgr, span); + } + } + + /* Put this context object back on the manager's free list. */ + * (Size *) cxt = mgr->freecontext.relptr_off; + relptr_store(base, mgr->freecontext, cxt); +} + /* * Allocate new space for a new context descriptor. * @@ -242,11 +277,11 @@ mspan_allocate_context_descriptor(char *base, mspan_manager *mgr) * appropriate. */ mspan_update_page_map(base, mgr, pageno, 1, 0); + mspan_unlink_span(base, span); /* XXX Head of circular list. */ if (span->npages == 1) mspan_destroy_span(base, span); else { - mspan_unlink_span(base, span); ++span->first_page; --span->npages; mspan_recycle_span(base, mgr, span); @@ -366,6 +401,79 @@ mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span) relptr_store(base, mgr->freelist[fidx], span); } +/* + * Release the memory consumed by a span, consolidating it with adjacent free + * spans if possible. + */ +static void +mspan_release_span(char *base, mspan_manager *mgr, mspan *span) +{ + mspan *preceding_span = NULL; + mspan *following_span = NULL; + + /* + * Find the spans that precede and follow the span to be released within + * the address space, if they are free. In the page map, 0 means no entry + * and any odd value means that the span is allocated, so we ignore those + * values. + */ + if (span->first_page > 0) + { + relptr(mspan) p; + + p.relptr_off = aspace_map_get(&mgr->page_map, + span->first_page - 1, base); + if (p.relptr_off != 0 && (p.relptr_off & 1) == 0) + preceding_span = relptr_access(base, p); + } + if (mgr->npages == 0 || span->first_page + span->npages < mgr->boundary) + { + relptr(mspan) f; + + f.relptr_off = aspace_map_get(&mgr->page_map, + span->first_page + span->npages, base); + if (f.relptr_off != 0 && (f.relptr_off & 1) == 0) + following_span = relptr_access(base, f); + } + + /* + * XXX. Remove this span from the list which contains it. + * + * If we're blowing away the entire context, then this will be some + * list of allocated objects ... and if we're freeing it because it's + * empty, it'll also be some list of allocated objects ... but if it's + * a span of spans then perhaps not. + */ + + /* + * XXX. Consolidate this span with the following span, if it's free. + */ + + /* + * XXX. Consolidate this span with the previous span, if it's free. + */ + + /* + * Make new page map entries for the span. + * + * Since allocated spans have page map entries with the least significant + * bit set, we need to make new entries regardless of whether we succeeded + * in consolidating with adjacent spans. If we did consolidate, we need + * new entries for that reason as well: the first and last pages of the + * new and larger span must point to the correct object. This coding may + * leave behind stale mappings between the first and last pages of the + * object, but it doesn't matter. For a free span, only the first and + * last pages will every be looked up in the page map; we needn't spend + * time fixing whatever junk entries may exist in the middle. + */ + mspan_update_page_map(base, mgr, span->first_page, 1, + ((char *) span) - base); + if (span->npages > 1) + mspan_update_page_map(base, mgr, span->first_page + span->npages - 1, + 1, ((char *) span) - base); + mspan_recycle_span(base, mgr, span); +} + /* * Update the page map. */ -- 2.39.5