From cfdb082390a5ea1e870c962fc5e5310c2ae260f6 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Mon, 12 May 2014 13:24:58 -0400 Subject: [PATCH] Rename sb_map to BlockAllocatorMap (source file balloc_map.c/h). --- src/backend/utils/mmgr/Makefile | 3 +- src/backend/utils/mmgr/balloc_map.c | 130 ++++++++++++++++++++++++++ src/backend/utils/mmgr/sb_alloc.c | 10 +- src/backend/utils/mmgr/sb_map.c | 137 ---------------------------- src/backend/utils/mmgr/sb_region.c | 22 +++-- src/include/utils/balloc_map.h | 25 +++++ src/include/utils/sb_map.h | 24 ----- src/include/utils/sb_region.h | 6 +- 8 files changed, 177 insertions(+), 180 deletions(-) create mode 100644 src/backend/utils/mmgr/balloc_map.c delete mode 100644 src/backend/utils/mmgr/sb_map.c create mode 100644 src/include/utils/balloc_map.h delete mode 100644 src/include/utils/sb_map.h diff --git a/src/backend/utils/mmgr/Makefile b/src/backend/utils/mmgr/Makefile index c318a73781..c4d61e4d31 100644 --- a/src/backend/utils/mmgr/Makefile +++ b/src/backend/utils/mmgr/Makefile @@ -12,6 +12,7 @@ subdir = src/backend/utils/mmgr top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global -OBJS = aset.o freepage.o mcxt.o portalmem.o sb_alloc.o sb_map.o sb_region.o +OBJS = aset.o freepage.o mcxt.o portalmem.o sb_alloc.o balloc_map.o \ + sb_region.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/utils/mmgr/balloc_map.c b/src/backend/utils/mmgr/balloc_map.c new file mode 100644 index 0000000000..f35c9518bc --- /dev/null +++ b/src/backend/utils/mmgr/balloc_map.c @@ -0,0 +1,130 @@ +/*------------------------------------------------------------------------- + * + * balloc_map.c + * Block allocator page-mapping infrastructure. + * + * The block allocator does not store metadata with each chunk, and + * therefore needs a way to find the metadata given only the pointer + * address. The first step is to translate the pointer address to a + * an offset relative to some base address, from which a page number + * can be calculated. Then, this module is reponsible for mapping the + * page number to an offset with the chunk where the associated span + * object is stored. We do this in the simplest possible way: one big + * array. + * + * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/backend/utils/mmgr/balloc_map.c + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "storage/shmem.h" +#include "utils/balloc_map.h" +#include "utils/freepage.h" + +const uint64 maxpages_4b = UINT64CONST(0x100000000) / FPM_PAGE_SIZE; + +struct BlockAllocatorMap +{ + relptr(BlockAllocatorMap) self; + Size offset; + Size npages; + bool use64; +}; + +/* Map layout for systems with 32-bit pointers, or shared segments < 4GB. */ +typedef struct BlockAllocatorMap32 +{ + BlockAllocatorMap hdr; + uint32 map[FLEXIBLE_ARRAY_MEMBER]; +} BlockAllocatorMap32; + +/* Map layout for systems with 64-bit pointers, except shared segments < 4GB. */ +typedef struct BlockAllocatorMap64 +{ + BlockAllocatorMap hdr; + uint64 map[FLEXIBLE_ARRAY_MEMBER]; +} BlockAllocatorMap64; + +#define balloc_map_base(m) \ + (((char *) m) - m->self.relptr_off) + +/* + * Compute the amount of space required for an BlockAllocatorMap covering a + * given number of pages. Note that for shared memory (i.e. when base != NULL), + * we assume that the pointers will always point to addresses within that + * same segment, but for backend-private memory that might not be the case. + */ +Size +BlockAllocatorMapSize(char *base, Size npages) +{ + Size map_bytes; + + if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b)) + map_bytes = add_size(offsetof(BlockAllocatorMap32, map), + mul_size(npages, sizeof(uint32))); + else + map_bytes = add_size(offsetof(BlockAllocatorMap64, map), + mul_size(npages, sizeof(uint64))); + + return map_bytes; +} + +/* + * Initialize a BlockAllocatorMap. Storage is provided by the caller. Note + * that we don't zero the array; the caller shouldn't try to get a value that + * hasn't been set. + */ +void +BlockAllocatorMapInitialize(BlockAllocatorMap *m, char *base, Size offset, + Size npages) +{ + relptr_store(base, m->self, m); + m->offset = offset; + m->npages = npages; + if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b)) + m->use64 = false; + else + m->use64 = true; +} + +/* + * Store a value into a BlockAllocatorMap. + */ +void +BlockAllocatorMapSet(BlockAllocatorMap *m, Size pageno, void *ptr) +{ + char *base = balloc_map_base(m); + Assert(pageno >= m->offset); + pageno -= m->offset; + Assert(pageno < m->npages); + + if (m->use64) + ((BlockAllocatorMap64 *) m)->map[pageno] = + (uint64) (((char *) ptr) - base); + else + ((BlockAllocatorMap32 *) m)->map[pageno] = + (uint32) (((char *) ptr) - base); +} + +/* + * Get a value from a BlockAllocatorMap. Getting a value not previously stored + * will produce an undefined result, so don't do that. + */ +void * +BlockAllocatorMapGet(BlockAllocatorMap *m, Size pageno) +{ + char *base = balloc_map_base(m); + Assert(pageno >= m->offset); + pageno -= m->offset; + Assert(pageno < m->npages); + + if (m->use64) + return base + ((BlockAllocatorMap64 *) m)->map[pageno]; + else + return base + ((BlockAllocatorMap32 *) m)->map[pageno]; +} diff --git a/src/backend/utils/mmgr/sb_alloc.c b/src/backend/utils/mmgr/sb_alloc.c index 334ffa0fc1..4674f9709d 100644 --- a/src/backend/utils/mmgr/sb_alloc.c +++ b/src/backend/utils/mmgr/sb_alloc.c @@ -269,7 +269,7 @@ sb_alloc(sb_allocator *a, Size size, int flags) sb_init_span(base, span, heap, ptr, npages, SB_SCLASS_SPAN_LARGE); if (lock != NULL) LWLockRelease(lock); - sb_map_set(region->pagemap, first_page, span); + BlockAllocatorMapSet(region->pagemap, first_page, span); return ptr; } @@ -331,7 +331,7 @@ sb_free(void *ptr) region = sb_lookup_region(ptr); fpm_base = fpm_segment_base(region->fpm); pageno = fpm_pointer_to_page(fpm_base, ptr); - span = sb_map_get(region->pagemap, pageno); + span = BlockAllocatorMapGet(region->pagemap, pageno); /* * If this is a shared-memory region, we might need locking. If so, @@ -481,7 +481,7 @@ sb_alloc_space(Size size) * there's no bookkeeping overhead associated with any single allocation; * the only thing we can really reflect here is the fact that allocations * will be rounded up to the next larger size class (or, for large allocations, - * to a full FPM page). The storage overhead of the sb_span, sb_map, + * to a full FPM page). The storage overhead of the sb_span, BlockAllocatorMap, * sb_region, and FreePageManager structures is typically spread across * enough small allocations to make reflecting those costs here difficult. * @@ -502,7 +502,7 @@ sb_chunk_space(void *ptr) region = sb_lookup_region(ptr); fpm_base = fpm_segment_base(region->fpm); pageno = fpm_pointer_to_page(fpm_base, ptr); - span = sb_map_get(region->pagemap, pageno); + span = BlockAllocatorMapGet(region->pagemap, pageno); /* Work out the size of the allocation. */ size_class = span->size_class; @@ -790,7 +790,7 @@ sb_ensure_active_superblock(char *base, sb_region *region, sb_allocator *a, /* Initialize span and pagemap. */ sb_init_span(base, span, heap, ptr, npages, size_class); for (i = 0; i < npages; ++i) - sb_map_set(region->pagemap, first_page + i, span); + BlockAllocatorMapSet(region->pagemap, first_page + i, span); return true; } diff --git a/src/backend/utils/mmgr/sb_map.c b/src/backend/utils/mmgr/sb_map.c deleted file mode 100644 index 7c629dfe0f..0000000000 --- a/src/backend/utils/mmgr/sb_map.c +++ /dev/null @@ -1,137 +0,0 @@ -/*------------------------------------------------------------------------- - * - * sb_map.c - * Superblock allocator page-mapping infrastructure. - * - * The superblock allocator does not store metadata with each chunk, and - * therefore needs a way to find the metadata given only the pointer - * address. The first step is to translate the pointer address to a - * an offset relative to some base address, from which a page number - * can be calculated. Then, this module is reponsible for mapping the - * page number to an offset with the chunk where the associated span - * object is stored. We do this in the simplest possible way: one big - * array. - * - * Span metadata is stored within the same chunk of memory as the span - * itself. Therefore, we can assume that the offset is less than 4GB - * whenever we're managing less than 4GB of pages, and use 4 byte - * offsets. When we're managing more than 4GB of pages, we use 8 byte - * offsets. (This could probably be optimized; for example, we could use - * 6 byte offsets for allocation sizes up to 256TB; also, if we assumed - * that the span object must itself be 2, 4, or 8 byte aligned, we could - * extend the cutoff point for offsets of any given length by a similar - * multiple. It's not clear that the extra math would be worthwhile.) - * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/backend/utils/mmgr/sb_map.c - * - *------------------------------------------------------------------------- - */ - -#include "postgres.h" - -#include "storage/shmem.h" -#include "utils/freepage.h" -#include "utils/sb_map.h" - -const uint64 maxpages_4b = UINT64CONST(0x100000000) / FPM_PAGE_SIZE; - -struct sb_map -{ - relptr(sb_map) self; - Size offset; - Size npages; - bool use64; -}; - -/* Map layout for segments less than 4GB. */ -typedef struct sb_map32 -{ - sb_map hdr; - uint32 map[FLEXIBLE_ARRAY_MEMBER]; -} sb_map32; - -/* Map layout for segments less than 8GB. */ -typedef struct sb_map64 -{ - sb_map hdr; - uint64 map[FLEXIBLE_ARRAY_MEMBER]; -} sb_map64; - -#define sb_map_base(m) \ - (((char *) m) - m->self.relptr_off) - -/* - * Compute the amount of space required for an sb_map covering a given - * number of pages. Note that for shared memory (i.e. when base != NULL), - * we assume that the pointers will always point to addresses within that - * same segment, but for backend-private memory that might not be the case. - */ -Size -sb_map_size(char *base, Size npages) -{ - Size map_bytes; - - if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b)) - map_bytes = add_size(offsetof(sb_map32, map), - mul_size(npages, sizeof(uint32))); - else - map_bytes = add_size(offsetof(sb_map64, map), - mul_size(npages, sizeof(uint64))); - - return map_bytes; -} - -/* - * Initialize an sb_map. Storage is provided by the caller. Note that we - * don't zero the array; the caller shouldn't try to get a value that hasn't - * been set. - */ -void -sb_map_initialize(sb_map *m, char *base, Size offset, Size npages) -{ - relptr_store(base, m->self, m); - m->offset = offset; - m->npages = npages; - if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b)) - m->use64 = false; - else - m->use64 = true; -} - -/* - * Store a value into an sb_map. - */ -void -sb_map_set(sb_map *m, Size pageno, void *ptr) -{ - char *base = sb_map_base(m); - Assert(pageno >= m->offset); - pageno -= m->offset; - Assert(pageno < m->npages); - - if (m->use64) - ((sb_map64 *) m)->map[pageno] = (uint64) (((char *) ptr) - base); - else - ((sb_map32 *) m)->map[pageno] = (uint32) (((char *) ptr) - base); -} - -/* - * Get a value from an sb_map. Getting a value not previously stored will - * produce an undefined result, so don't do that. - */ -void * -sb_map_get(sb_map *m, Size pageno) -{ - char *base = sb_map_base(m); - Assert(pageno >= m->offset); - pageno -= m->offset; - Assert(pageno < m->npages); - - if (m->use64) - return base + ((sb_map64 *) m)->map[pageno]; - else - return base + ((sb_map32 *) m)->map[pageno]; -} diff --git a/src/backend/utils/mmgr/sb_region.c b/src/backend/utils/mmgr/sb_region.c index 1c5156337d..ed4cd8311f 100644 --- a/src/backend/utils/mmgr/sb_region.c +++ b/src/backend/utils/mmgr/sb_region.c @@ -4,10 +4,10 @@ * Superblock allocator memory region manager. * * The superblock allocator operates on ranges of pages managed by a - * FreePageManager and reverse-mapped by an sb_map. When it's asked to - * free an object, it just gets a pointer address; our job is to figure - * out which page range contains that object and locate the - * FreePageManager, sb_map, and other metadata that the superblock + * FreePageManager and reverse-mapped by a BlockAllocatorMap. When it's + * asked to free an object, it just gets a pointer address; our job is to + * figure out which page range contains that object and locate the + * FreePageManager, BlockAllocatorMap, and other metadata that the superblock * allocator will need to do its thing. Moreover, when allocating an * object, the caller is only required to provide the superblock allocator * with a pointer to the sb_allocator object, which could be in either @@ -364,7 +364,8 @@ sb_private_region_for_allocator(Size npages) */ metadata_bytes = MAXALIGN(sizeof(sb_region)); metadata_bytes += MAXALIGN(sizeof(FreePageManager)); - metadata_bytes += MAXALIGN(sb_map_size(NULL, new_region_net_pages)); + metadata_bytes += + MAXALIGN(BlockAllocatorMapSize(NULL, new_region_net_pages)); if (metadata_bytes % FPM_PAGE_SIZE != 0) metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE); region_size = new_region_net_pages * FPM_PAGE_SIZE + metadata_bytes; @@ -398,7 +399,7 @@ sb_private_region_for_allocator(Size npages) region->allocator = NULL; region->fpm = (FreePageManager *) (region_start + MAXALIGN(sizeof(sb_region))); - region->pagemap = (sb_map *) + region->pagemap = (BlockAllocatorMap *) (((char *) region->fpm) + MAXALIGN(sizeof(FreePageManager))); region->contiguous_pages = new_region_net_pages + 1; @@ -406,8 +407,9 @@ sb_private_region_for_allocator(Size npages) FreePageManagerInitialize(region->fpm, region->region_start, NULL, false); FreePageManagerPut(region->fpm, metadata_bytes / FPM_PAGE_SIZE, new_region_net_pages); - sb_map_initialize(region->pagemap, NULL, metadata_bytes / FPM_PAGE_SIZE, - new_region_net_pages); + BlockAllocatorMapInitialize(region->pagemap, NULL, + metadata_bytes / FPM_PAGE_SIZE, + new_region_net_pages); region->contiguous_pages = new_region_net_pages; /* Now fix the value. */ freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS); dlist_push_head(&private_freelist[freelist], ®ion->fl_node); @@ -444,8 +446,8 @@ sb_report_contiguous_freespace(sb_region *region, Size npages) /* * If the entire region is free, deallocate it. The sb_region, - * FreePageManager, and sb_map for the region is stored within it, so - * they all go away when we free the managed space. + * FreePageManager, and BlockAllocatorMap for the region are stored + * within it, so they all go away when we free the managed space. */ if (npages == region->usable_pages) { diff --git a/src/include/utils/balloc_map.h b/src/include/utils/balloc_map.h new file mode 100644 index 0000000000..e32ae767a1 --- /dev/null +++ b/src/include/utils/balloc_map.h @@ -0,0 +1,25 @@ +/*------------------------------------------------------------------------- + * + * balloc_map.h + * Block allocator page-mapping infrastructure. + * + * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/utils/ba_map.h + * + *------------------------------------------------------------------------- + */ + +#ifndef BALLOC_MAP_H +#define BALLOC_MAP_H + +typedef struct BlockAllocatorMap BlockAllocatorMap; + +extern Size BlockAllocatorMapSize(char *base, Size npages); +extern void BlockAllocatorMapInitialize(BlockAllocatorMap *, char *base, + Size offset, Size npages); +extern void BlockAllocatorMapSet(BlockAllocatorMap *, Size pageno, void *ptr); +extern void *BlockAllocatorMapGet(BlockAllocatorMap *, Size pageno); + +#endif /* BALLOC_MAP_H */ diff --git a/src/include/utils/sb_map.h b/src/include/utils/sb_map.h deleted file mode 100644 index 519bf52290..0000000000 --- a/src/include/utils/sb_map.h +++ /dev/null @@ -1,24 +0,0 @@ -/*------------------------------------------------------------------------- - * - * sb_map.h - * Superblock allocator page-mapping infrastructure. - * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/utils/sb_map.h - * - *------------------------------------------------------------------------- - */ - -#ifndef SB_MAP_H -#define SB_MAP_H - -typedef struct sb_map sb_map; - -extern Size sb_map_size(char *base, Size npages); -extern void sb_map_initialize(sb_map *, char *base, Size offset, Size npages); -extern void sb_map_set(sb_map *, Size pageno, void *ptr); -extern void *sb_map_get(sb_map *, Size pageno); - -#endif /* SB_MAP_H */ diff --git a/src/include/utils/sb_region.h b/src/include/utils/sb_region.h index 6b2a578022..152056b217 100644 --- a/src/include/utils/sb_region.h +++ b/src/include/utils/sb_region.h @@ -19,7 +19,7 @@ #include "storage/shm_toc.h" #include "utils/freepage.h" #include "utils/sb_alloc.h" -#include "utils/sb_map.h" +#include "utils/balloc_map.h" /* Pages per superblock (in units of FPM_PAGE_SIZE). */ #define SB_PAGES_PER_SUPERBLOCK 16 @@ -36,7 +36,7 @@ typedef struct sb_region dsm_segment *seg; /* If not backend-private, DSM handle. */ sb_allocator *allocator; /* If not backend-private, shared allocator. */ FreePageManager *fpm; /* Free page manager for region (if any). */ - sb_map *pagemap; /* Page map for region (if any). */ + BlockAllocatorMap *pagemap; /* Page map for region (if any). */ Size contiguous_pages; /* Last reported contiguous free pages. */ dlist_node fl_node; /* Freelist links. */ } sb_region; @@ -48,7 +48,7 @@ typedef struct sb_region typedef struct sb_shared_region { relptr(FreePageManager) fpm; - relptr(sb_map) pagemap; + relptr(BlockAllocatorMap) pagemap; relptr(sb_allocator) allocator; int lwlock_tranche_id; char lwlock_tranche_name[FLEXIBLE_ARRAY_MEMBER]; -- 2.39.5