top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
-OBJS = aset.o freepage.o mcxt.o portalmem.o sb_alloc.o sb_map.o sb_region.o
+OBJS = aset.o freepage.o mcxt.o portalmem.o sb_alloc.o balloc_map.o \
+ sb_region.o
include $(top_srcdir)/src/backend/common.mk
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * balloc_map.c
+ * Block allocator page-mapping infrastructure.
+ *
+ * The block allocator does not store metadata with each chunk, and
+ * therefore needs a way to find the metadata given only the pointer
+ * address. The first step is to translate the pointer address to a
+ * an offset relative to some base address, from which a page number
+ * can be calculated. Then, this module is reponsible for mapping the
+ * page number to an offset with the chunk where the associated span
+ * object is stored. We do this in the simplest possible way: one big
+ * array.
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/backend/utils/mmgr/balloc_map.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/shmem.h"
+#include "utils/balloc_map.h"
+#include "utils/freepage.h"
+
+const uint64 maxpages_4b = UINT64CONST(0x100000000) / FPM_PAGE_SIZE;
+
+struct BlockAllocatorMap
+{
+ relptr(BlockAllocatorMap) self;
+ Size offset;
+ Size npages;
+ bool use64;
+};
+
+/* Map layout for systems with 32-bit pointers, or shared segments < 4GB. */
+typedef struct BlockAllocatorMap32
+{
+ BlockAllocatorMap hdr;
+ uint32 map[FLEXIBLE_ARRAY_MEMBER];
+} BlockAllocatorMap32;
+
+/* Map layout for systems with 64-bit pointers, except shared segments < 4GB. */
+typedef struct BlockAllocatorMap64
+{
+ BlockAllocatorMap hdr;
+ uint64 map[FLEXIBLE_ARRAY_MEMBER];
+} BlockAllocatorMap64;
+
+#define balloc_map_base(m) \
+ (((char *) m) - m->self.relptr_off)
+
+/*
+ * Compute the amount of space required for an BlockAllocatorMap covering a
+ * given number of pages. Note that for shared memory (i.e. when base != NULL),
+ * we assume that the pointers will always point to addresses within that
+ * same segment, but for backend-private memory that might not be the case.
+ */
+Size
+BlockAllocatorMapSize(char *base, Size npages)
+{
+ Size map_bytes;
+
+ if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b))
+ map_bytes = add_size(offsetof(BlockAllocatorMap32, map),
+ mul_size(npages, sizeof(uint32)));
+ else
+ map_bytes = add_size(offsetof(BlockAllocatorMap64, map),
+ mul_size(npages, sizeof(uint64)));
+
+ return map_bytes;
+}
+
+/*
+ * Initialize a BlockAllocatorMap. Storage is provided by the caller. Note
+ * that we don't zero the array; the caller shouldn't try to get a value that
+ * hasn't been set.
+ */
+void
+BlockAllocatorMapInitialize(BlockAllocatorMap *m, char *base, Size offset,
+ Size npages)
+{
+ relptr_store(base, m->self, m);
+ m->offset = offset;
+ m->npages = npages;
+ if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b))
+ m->use64 = false;
+ else
+ m->use64 = true;
+}
+
+/*
+ * Store a value into a BlockAllocatorMap.
+ */
+void
+BlockAllocatorMapSet(BlockAllocatorMap *m, Size pageno, void *ptr)
+{
+ char *base = balloc_map_base(m);
+ Assert(pageno >= m->offset);
+ pageno -= m->offset;
+ Assert(pageno < m->npages);
+
+ if (m->use64)
+ ((BlockAllocatorMap64 *) m)->map[pageno] =
+ (uint64) (((char *) ptr) - base);
+ else
+ ((BlockAllocatorMap32 *) m)->map[pageno] =
+ (uint32) (((char *) ptr) - base);
+}
+
+/*
+ * Get a value from a BlockAllocatorMap. Getting a value not previously stored
+ * will produce an undefined result, so don't do that.
+ */
+void *
+BlockAllocatorMapGet(BlockAllocatorMap *m, Size pageno)
+{
+ char *base = balloc_map_base(m);
+ Assert(pageno >= m->offset);
+ pageno -= m->offset;
+ Assert(pageno < m->npages);
+
+ if (m->use64)
+ return base + ((BlockAllocatorMap64 *) m)->map[pageno];
+ else
+ return base + ((BlockAllocatorMap32 *) m)->map[pageno];
+}
sb_init_span(base, span, heap, ptr, npages, SB_SCLASS_SPAN_LARGE);
if (lock != NULL)
LWLockRelease(lock);
- sb_map_set(region->pagemap, first_page, span);
+ BlockAllocatorMapSet(region->pagemap, first_page, span);
return ptr;
}
region = sb_lookup_region(ptr);
fpm_base = fpm_segment_base(region->fpm);
pageno = fpm_pointer_to_page(fpm_base, ptr);
- span = sb_map_get(region->pagemap, pageno);
+ span = BlockAllocatorMapGet(region->pagemap, pageno);
/*
* If this is a shared-memory region, we might need locking. If so,
* there's no bookkeeping overhead associated with any single allocation;
* the only thing we can really reflect here is the fact that allocations
* will be rounded up to the next larger size class (or, for large allocations,
- * to a full FPM page). The storage overhead of the sb_span, sb_map,
+ * to a full FPM page). The storage overhead of the sb_span, BlockAllocatorMap,
* sb_region, and FreePageManager structures is typically spread across
* enough small allocations to make reflecting those costs here difficult.
*
region = sb_lookup_region(ptr);
fpm_base = fpm_segment_base(region->fpm);
pageno = fpm_pointer_to_page(fpm_base, ptr);
- span = sb_map_get(region->pagemap, pageno);
+ span = BlockAllocatorMapGet(region->pagemap, pageno);
/* Work out the size of the allocation. */
size_class = span->size_class;
/* Initialize span and pagemap. */
sb_init_span(base, span, heap, ptr, npages, size_class);
for (i = 0; i < npages; ++i)
- sb_map_set(region->pagemap, first_page + i, span);
+ BlockAllocatorMapSet(region->pagemap, first_page + i, span);
return true;
}
+++ /dev/null
-/*-------------------------------------------------------------------------
- *
- * sb_map.c
- * Superblock allocator page-mapping infrastructure.
- *
- * The superblock allocator does not store metadata with each chunk, and
- * therefore needs a way to find the metadata given only the pointer
- * address. The first step is to translate the pointer address to a
- * an offset relative to some base address, from which a page number
- * can be calculated. Then, this module is reponsible for mapping the
- * page number to an offset with the chunk where the associated span
- * object is stored. We do this in the simplest possible way: one big
- * array.
- *
- * Span metadata is stored within the same chunk of memory as the span
- * itself. Therefore, we can assume that the offset is less than 4GB
- * whenever we're managing less than 4GB of pages, and use 4 byte
- * offsets. When we're managing more than 4GB of pages, we use 8 byte
- * offsets. (This could probably be optimized; for example, we could use
- * 6 byte offsets for allocation sizes up to 256TB; also, if we assumed
- * that the span object must itself be 2, 4, or 8 byte aligned, we could
- * extend the cutoff point for offsets of any given length by a similar
- * multiple. It's not clear that the extra math would be worthwhile.)
- *
- * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * src/backend/utils/mmgr/sb_map.c
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres.h"
-
-#include "storage/shmem.h"
-#include "utils/freepage.h"
-#include "utils/sb_map.h"
-
-const uint64 maxpages_4b = UINT64CONST(0x100000000) / FPM_PAGE_SIZE;
-
-struct sb_map
-{
- relptr(sb_map) self;
- Size offset;
- Size npages;
- bool use64;
-};
-
-/* Map layout for segments less than 4GB. */
-typedef struct sb_map32
-{
- sb_map hdr;
- uint32 map[FLEXIBLE_ARRAY_MEMBER];
-} sb_map32;
-
-/* Map layout for segments less than 8GB. */
-typedef struct sb_map64
-{
- sb_map hdr;
- uint64 map[FLEXIBLE_ARRAY_MEMBER];
-} sb_map64;
-
-#define sb_map_base(m) \
- (((char *) m) - m->self.relptr_off)
-
-/*
- * Compute the amount of space required for an sb_map covering a given
- * number of pages. Note that for shared memory (i.e. when base != NULL),
- * we assume that the pointers will always point to addresses within that
- * same segment, but for backend-private memory that might not be the case.
- */
-Size
-sb_map_size(char *base, Size npages)
-{
- Size map_bytes;
-
- if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b))
- map_bytes = add_size(offsetof(sb_map32, map),
- mul_size(npages, sizeof(uint32)));
- else
- map_bytes = add_size(offsetof(sb_map64, map),
- mul_size(npages, sizeof(uint64)));
-
- return map_bytes;
-}
-
-/*
- * Initialize an sb_map. Storage is provided by the caller. Note that we
- * don't zero the array; the caller shouldn't try to get a value that hasn't
- * been set.
- */
-void
-sb_map_initialize(sb_map *m, char *base, Size offset, Size npages)
-{
- relptr_store(base, m->self, m);
- m->offset = offset;
- m->npages = npages;
- if (sizeof(Size) <= 4 || (base != NULL && npages < maxpages_4b))
- m->use64 = false;
- else
- m->use64 = true;
-}
-
-/*
- * Store a value into an sb_map.
- */
-void
-sb_map_set(sb_map *m, Size pageno, void *ptr)
-{
- char *base = sb_map_base(m);
- Assert(pageno >= m->offset);
- pageno -= m->offset;
- Assert(pageno < m->npages);
-
- if (m->use64)
- ((sb_map64 *) m)->map[pageno] = (uint64) (((char *) ptr) - base);
- else
- ((sb_map32 *) m)->map[pageno] = (uint32) (((char *) ptr) - base);
-}
-
-/*
- * Get a value from an sb_map. Getting a value not previously stored will
- * produce an undefined result, so don't do that.
- */
-void *
-sb_map_get(sb_map *m, Size pageno)
-{
- char *base = sb_map_base(m);
- Assert(pageno >= m->offset);
- pageno -= m->offset;
- Assert(pageno < m->npages);
-
- if (m->use64)
- return base + ((sb_map64 *) m)->map[pageno];
- else
- return base + ((sb_map32 *) m)->map[pageno];
-}
* Superblock allocator memory region manager.
*
* The superblock allocator operates on ranges of pages managed by a
- * FreePageManager and reverse-mapped by an sb_map. When it's asked to
- * free an object, it just gets a pointer address; our job is to figure
- * out which page range contains that object and locate the
- * FreePageManager, sb_map, and other metadata that the superblock
+ * FreePageManager and reverse-mapped by a BlockAllocatorMap. When it's
+ * asked to free an object, it just gets a pointer address; our job is to
+ * figure out which page range contains that object and locate the
+ * FreePageManager, BlockAllocatorMap, and other metadata that the superblock
* allocator will need to do its thing. Moreover, when allocating an
* object, the caller is only required to provide the superblock allocator
* with a pointer to the sb_allocator object, which could be in either
*/
metadata_bytes = MAXALIGN(sizeof(sb_region));
metadata_bytes += MAXALIGN(sizeof(FreePageManager));
- metadata_bytes += MAXALIGN(sb_map_size(NULL, new_region_net_pages));
+ metadata_bytes +=
+ MAXALIGN(BlockAllocatorMapSize(NULL, new_region_net_pages));
if (metadata_bytes % FPM_PAGE_SIZE != 0)
metadata_bytes += FPM_PAGE_SIZE - (metadata_bytes % FPM_PAGE_SIZE);
region_size = new_region_net_pages * FPM_PAGE_SIZE + metadata_bytes;
region->allocator = NULL;
region->fpm = (FreePageManager *)
(region_start + MAXALIGN(sizeof(sb_region)));
- region->pagemap = (sb_map *)
+ region->pagemap = (BlockAllocatorMap *)
(((char *) region->fpm) + MAXALIGN(sizeof(FreePageManager)));
region->contiguous_pages = new_region_net_pages + 1;
FreePageManagerInitialize(region->fpm, region->region_start, NULL, false);
FreePageManagerPut(region->fpm, metadata_bytes / FPM_PAGE_SIZE,
new_region_net_pages);
- sb_map_initialize(region->pagemap, NULL, metadata_bytes / FPM_PAGE_SIZE,
- new_region_net_pages);
+ BlockAllocatorMapInitialize(region->pagemap, NULL,
+ metadata_bytes / FPM_PAGE_SIZE,
+ new_region_net_pages);
region->contiguous_pages = new_region_net_pages; /* Now fix the value. */
freelist = Min(fls(new_region_net_pages), NUM_PRIVATE_FREELISTS);
dlist_push_head(&private_freelist[freelist], ®ion->fl_node);
/*
* If the entire region is free, deallocate it. The sb_region,
- * FreePageManager, and sb_map for the region is stored within it, so
- * they all go away when we free the managed space.
+ * FreePageManager, and BlockAllocatorMap for the region are stored
+ * within it, so they all go away when we free the managed space.
*/
if (npages == region->usable_pages)
{
--- /dev/null
+/*-------------------------------------------------------------------------
+ *
+ * balloc_map.h
+ * Block allocator page-mapping infrastructure.
+ *
+ * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/utils/ba_map.h
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef BALLOC_MAP_H
+#define BALLOC_MAP_H
+
+typedef struct BlockAllocatorMap BlockAllocatorMap;
+
+extern Size BlockAllocatorMapSize(char *base, Size npages);
+extern void BlockAllocatorMapInitialize(BlockAllocatorMap *, char *base,
+ Size offset, Size npages);
+extern void BlockAllocatorMapSet(BlockAllocatorMap *, Size pageno, void *ptr);
+extern void *BlockAllocatorMapGet(BlockAllocatorMap *, Size pageno);
+
+#endif /* BALLOC_MAP_H */
+++ /dev/null
-/*-------------------------------------------------------------------------
- *
- * sb_map.h
- * Superblock allocator page-mapping infrastructure.
- *
- * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
- * Portions Copyright (c) 1994, Regents of the University of California
- *
- * src/include/utils/sb_map.h
- *
- *-------------------------------------------------------------------------
- */
-
-#ifndef SB_MAP_H
-#define SB_MAP_H
-
-typedef struct sb_map sb_map;
-
-extern Size sb_map_size(char *base, Size npages);
-extern void sb_map_initialize(sb_map *, char *base, Size offset, Size npages);
-extern void sb_map_set(sb_map *, Size pageno, void *ptr);
-extern void *sb_map_get(sb_map *, Size pageno);
-
-#endif /* SB_MAP_H */
#include "storage/shm_toc.h"
#include "utils/freepage.h"
#include "utils/sb_alloc.h"
-#include "utils/sb_map.h"
+#include "utils/balloc_map.h"
/* Pages per superblock (in units of FPM_PAGE_SIZE). */
#define SB_PAGES_PER_SUPERBLOCK 16
dsm_segment *seg; /* If not backend-private, DSM handle. */
sb_allocator *allocator; /* If not backend-private, shared allocator. */
FreePageManager *fpm; /* Free page manager for region (if any). */
- sb_map *pagemap; /* Page map for region (if any). */
+ BlockAllocatorMap *pagemap; /* Page map for region (if any). */
Size contiguous_pages; /* Last reported contiguous free pages. */
dlist_node fl_node; /* Freelist links. */
} sb_region;
typedef struct sb_shared_region
{
relptr(FreePageManager) fpm;
- relptr(sb_map) pagemap;
+ relptr(BlockAllocatorMap) pagemap;
relptr(sb_allocator) allocator;
int lwlock_tranche_id;
char lwlock_tranche_name[FLEXIBLE_ARRAY_MEMBER];