From bf41892ad76162a5c66694c8608f5c30d5f40ef9 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Thu, 16 Jan 2014 16:57:54 -0500 Subject: [PATCH] Hack. --- src/backend/utils/mmgr/mspan.c | 118 ++++++++++++++++++++++++--------- 1 file changed, 87 insertions(+), 31 deletions(-) diff --git a/src/backend/utils/mmgr/mspan.c b/src/backend/utils/mmgr/mspan.c index 67216f7a26..372bd9f2e6 100644 --- a/src/backend/utils/mmgr/mspan.c +++ b/src/backend/utils/mmgr/mspan.c @@ -151,6 +151,8 @@ static void mspan_ensure_active_superblock(char *base, mspan_context *cxt, uint16 size_class); static mspan *mspan_find_free_span(char *base, mspan_manager *mgr, Size minpages, Size maxpages); +static void mspan_initialize_span(char *base, mspan_manager *mgr, + mspan_context *cxt, mspan *span, uint16 span_type); static void mspan_link_span_to_context(char *base, mspan_context *cxt, mspan *span); static void mspan_link_span_to_manager(char *base, mspan_manager *mgr, @@ -573,7 +575,8 @@ static mspan * mspan_allocate_span(char *base, mspan_manager *mgr, mspan_context *cxt, uint16 span_type, Size pages) { - mspan *span; + mspan *span; + Size first_page; /* * Search for an existing span. If we're allocating space for a large @@ -587,24 +590,7 @@ mspan_allocate_span(char *base, mspan_manager *mgr, mspan_context *cxt, { /* Remove the span from the free list. */ mspan_unlink_span(base, span); - - /* Initialize the span for use. */ - span->span_type = span_type; - span->ninitialized = 0; - span->nused = 0; - span->firstfree = MSPAN_FIRSTFREE_NONE; - - /* Put the span on the list that ought to contain it. */ - if (span_type == MSPAN_TYPE_SPAN_OF_SPANS) - { - Assert(cxt == NULL); - mspan_link_span_to_manager(base, mgr, span); - } - else - { - Assert(cxt != NULL); - mspan_link_span_to_context(base, cxt, span); - } + mspan_initialize_span(base, mgr, cxt, span, span_type); /* XXX. Update page map entries. */ @@ -632,18 +618,54 @@ mspan_allocate_span(char *base, mspan_manager *mgr, mspan_context *cxt, return NULL; } + /* Allocate storage for the new span. */ + if (base != NULL) + { + /* In the dynamic shared memory case, allocate from the boundary. */ + if (mgr->boundary + pages >= mgr->npages) + { + /* Not enough pages remaining. */ + mspan_destroy_span(base, span); + return NULL; + } + first_page = mgr->boundary; + mgr->boundary += pages; + } + else + { + /* + * XXX. Allocate more core via malloc. We need a system here for + * this. Obviously we shouldn't just allocate the smallest amount + * needed for this span unless that's already pretty big. Instead, + * we should allocate enough for this span and then throw the remainder + * in a bucket for later use. But the mechanism for that is not + * designed yet. + * + * XXX. How exactly are we going to give the segments we malloc + * back to the OS? How are we even going to know where they are? + * We can add them to the freelists as a big old span, but that's + * not going to help much in terms of identifying them later. + */ + first_page = 0; /* XXX. Bogus. */ + } + /* - * XXX. We need more core. Allocate either from the boundary or - * via malloc. - */ - /* - * XXX. How exactly are we going to give the segments we malloc - * back to the OS? How are we even going to know where they are? - * We can add them to the freelists as a big old span, but that's - * not going to help much in terms of identifying them later. + * If this is a span-of-spans, allocate a descriptor for the new span + * out of the span itself. Otherwise, we */ + if (span_type == MSPAN_TYPE_SPAN_OF_SPANS) + { + Assert(span == NULL); + span = (mspan *) (base + first_page * MSPAN_PAGE_SIZE); + } + Assert(span != NULL); - return NULL; + /* Initialize the new span. */ + span->first_page = first_page; + span->npages = pages; + mspan_initialize_span(base, mgr, cxt, span, span_type); + + return span; } /* @@ -717,6 +739,39 @@ mspan_find_free_span(char *base, mspan_manager *mgr, Size minpages, return NULL; } +/* + * Initialize a span descriptor. + */ +static void +mspan_initialize_span(char *base, mspan_manager *mgr, mspan_context *cxt, + mspan *span, uint16 span_type) +{ + /* The basics. */ + span->span_type = span_type; + span->firstfree = MSPAN_FIRSTFREE_NONE; + + /* + * Normally, the span starts out empty, but a span-of-spans contains + * its own descriptor, so it starts out containing one allocation. + * A span-of-spans is different in another way as well: it's managed + * by the manager, not the context. + */ + if (span_type == MSPAN_TYPE_SPAN_OF_SPANS) + { + Assert(cxt == NULL); + span->ninitialized = 1; + span->nused = 1; + mspan_link_span_to_manager(base, mgr, span); + } + else + { + Assert(cxt != NULL); + span->ninitialized = 0; + span->nused = 0; + mspan_link_span_to_context(base, cxt, span); + } +} + /* * Add a span to a linked list of spans. * @@ -726,7 +781,7 @@ mspan_find_free_span(char *base, mspan_manager *mgr, Size minpages, * specified by ptr. */ static void -mspan_link_span_internal(char *base, Size *ptr, mspan *span) +mspan_link_span_internal(char *base, void *parent, Size *ptr, mspan *span) { relptr(mspan) rptr; @@ -735,6 +790,7 @@ mspan_link_span_internal(char *base, Size *ptr, mspan *span) Assert(relptr_is_null(span->prevspan)); #endif + relptr_store(base, span->parent, parent); if (*ptr == 0) { relptr_store(base, span->nextspan, span); @@ -791,7 +847,7 @@ mspan_link_span_to_context(char *base, mspan_context *cxt, mspan *span) ptr = &cxt->small_allocation[aidx].relptr_off; } - mspan_link_span_internal(base, ptr, span); + mspan_link_span_internal(base, cxt, ptr, span); } /* @@ -824,7 +880,7 @@ mspan_link_span_to_manager(char *base, mspan_manager *mgr, mspan *span) ptr = &mgr->freelist[fidx].relptr_off; } - mspan_link_span_internal(base, ptr, span); + mspan_link_span_internal(base, mgr, ptr, span); } /* -- 2.39.5