From 09bb5bb6e07923788c188971e90c7c57e5346e9e Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 13 May 2014 10:51:35 -0400 Subject: [PATCH] Renaming cleanup. --- src/backend/utils/mmgr/aregion.c | 2 +- src/backend/utils/mmgr/balloc.c | 109 ++++++++++++++++-------------- src/backend/utils/mmgr/freepage.c | 6 +- 3 files changed, 61 insertions(+), 56 deletions(-) diff --git a/src/backend/utils/mmgr/aregion.c b/src/backend/utils/mmgr/aregion.c index e5054c0495..f40c46782d 100644 --- a/src/backend/utils/mmgr/aregion.c +++ b/src/backend/utils/mmgr/aregion.c @@ -389,7 +389,7 @@ GetRegionForPrivateAllocation(Size npages) * * NB: We temporarily set region->contiguous_pages to a value one more * than the actual number. This is because calling FreePageManagerPut - * will provoke a callback to sb_report_contiguous_freespace, which we + * will provoke a callback to ReportRegionContiguousFreespace, which we * want to exit quickly and, in particular, without deallocating the * region. */ diff --git a/src/backend/utils/mmgr/balloc.c b/src/backend/utils/mmgr/balloc.c index edcce77a19..d155dbab7d 100644 --- a/src/backend/utils/mmgr/balloc.c +++ b/src/backend/utils/mmgr/balloc.c @@ -170,14 +170,15 @@ static bool BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region, BlockAllocatorContext *context, BlockAllocatorHeap *heap, int size_class); -static void sb_init_span(char *base, BlockAllocatorSpan *span, - BlockAllocatorHeap *heap, char *ptr, Size npages, - uint16 size_class); -static void sb_out_of_memory_error(BlockAllocatorContext *context); -static bool sb_transfer_first_span(char *base, BlockAllocatorHeap *heap, - int fromclass, int toclass); -static void sb_unlink_span(char *base, BlockAllocatorHeap *heap, - BlockAllocatorSpan *span); +static void BlockAllocatorInitSpan(char *base, BlockAllocatorSpan *span, + BlockAllocatorHeap *heap, char *ptr, Size npages, + uint16 size_class); +static void BlockAllocatorMemoryError(BlockAllocatorContext *context); +static bool BlockAllocatorTransferFirstSpan(char *base, + BlockAllocatorHeap *heap, + int fromclass, int toclass); +static void BlockAllocatorUnlinkSpan(char *base, BlockAllocatorHeap *heap, + BlockAllocatorSpan *span); /* * Create a backend-private allocator. @@ -258,7 +259,7 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags) if (span == NULL) { if ((flags & SB_ALLOC_SOFT_FAIL) == 0) - sb_out_of_memory_error(context); + BlockAllocatorMemoryError(context); return NULL; } @@ -272,7 +273,7 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags) { /* XXX. Free the span. */ if ((flags & SB_ALLOC_SOFT_FAIL) == 0) - sb_out_of_memory_error(context); + BlockAllocatorMemoryError(context); return NULL; } ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page); @@ -280,7 +281,8 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags) /* Initialize span and pagemap. */ if (lock != NULL) LWLockAcquire(lock, LW_EXCLUSIVE); - sb_init_span(base, span, heap, ptr, npages, BA_SCLASS_SPAN_LARGE); + BlockAllocatorInitSpan(base, span, heap, ptr, npages, + BA_SCLASS_SPAN_LARGE); if (lock != NULL) LWLockRelease(lock); BlockAllocatorMapSet(region->pagemap, first_page, span); @@ -324,12 +326,12 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags) /* Attempt the actual allocation. */ result = BlockAllocatorAllocGuts(base, region, context, size_class); if (result == NULL && (flags & SB_ALLOC_SOFT_FAIL) == 0) - sb_out_of_memory_error(context); + BlockAllocatorMemoryError(context); return result; } /* - * Free memory allocated via sb_alloc. + * Free memory allocated via BlockAllocatorAlloc. */ void BlockAllocatorFree(void *ptr) @@ -373,7 +375,7 @@ BlockAllocatorFree(void *ptr) BlockAllocatorHeap *heap = relptr_access(base, span->parent); Size first_page; - sb_unlink_span(base, heap, span); + BlockAllocatorUnlinkSpan(base, heap, span); first_page = fpm_pointer_to_page(fpm_base, relptr_access(base, span->start)); FreePageManagerPut(region->fpm, first_page, span->npages); @@ -404,7 +406,7 @@ BlockAllocatorFree(void *ptr) * move it to the next-lower fullness class. */ - sb_unlink_span(base, heap, span); + BlockAllocatorUnlinkSpan(base, heap, span); span->fclass = BA_FULLNESS_CLASSES - 2; relptr_copy(span->nextspan, heap->spans[BA_FULLNESS_CLASSES - 2]); relptr_store(base, span->prevspan, (BlockAllocatorSpan *) NULL); @@ -428,7 +430,7 @@ BlockAllocatorFree(void *ptr) * block, it will be very inefficient if we deallocate and * reallocate the block every time. */ - sb_unlink_span(base, heap, span); + BlockAllocatorUnlinkSpan(base, heap, span); first_page = fpm_pointer_to_page(fpm_base, relptr_access(base, span->start)); FreePageManagerPut(region->fpm, first_page, span->npages); @@ -602,7 +604,7 @@ BlockAllocatorAllocGuts(char *base, AllocatorRegion *region, { BlockAllocatorHeap *heap = &context->heaps[size_class]; LWLock *lock = relptr_access(base, heap->lock); - BlockAllocatorSpan *active_sb; + BlockAllocatorSpan *span; char *block; char *result; Size obsize; @@ -630,26 +632,27 @@ BlockAllocatorAllocGuts(char *base, AllocatorRegion *region, * it should never be completely full. Thus we can either pop the * free list or, failing that, initialize a new object. */ - active_sb = relptr_access(base, heap->spans[1]); - Assert(active_sb != NULL && active_sb->nallocatable > 0); - block = relptr_access(base, active_sb->start); + span = relptr_access(base, heap->spans[1]); + Assert(span != NULL && span->nallocatable > 0); + block = relptr_access(base, span->start); Assert(size_class < BA_NUM_SIZE_CLASSES); obsize = balloc_size_classes[size_class]; - if (active_sb->firstfree != BA_SPAN_NOTHING_FREE) + if (span->firstfree != BA_SPAN_NOTHING_FREE) { - result = block + active_sb->firstfree * obsize; - active_sb->firstfree = * (Size *) result; + result = block + span->firstfree * obsize; + span->firstfree = * (Size *) result; } else { - result = block + active_sb->ninitialized * obsize; - ++active_sb->ninitialized; + result = block + span->ninitialized * obsize; + ++span->ninitialized; } - --active_sb->nallocatable; + --span->nallocatable; /* If it's now full, move it to the highest-numbered fullness class. */ - if (active_sb->nallocatable == 0) - sb_transfer_first_span(base, heap, 1, BA_FULLNESS_CLASSES - 1); + if (span->nallocatable == 0) + BlockAllocatorTransferFirstSpan(base, heap, 1, + BA_FULLNESS_CLASSES - 1); /* We're all done. Release the lock. */ if (lock != NULL) @@ -767,10 +770,10 @@ BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region, */ Assert(relptr_is_null(heap->spans[1])); for (fclass = 2; fclass < BA_FULLNESS_CLASSES - 1; ++fclass) - if (sb_transfer_first_span(base, heap, fclass, 1)) + if (BlockAllocatorTransferFirstSpan(base, heap, fclass, 1)) return true; if (relptr_is_null(heap->spans[1]) && - sb_transfer_first_span(base, heap, 0, 1)) + BlockAllocatorTransferFirstSpan(base, heap, 0, 1)) return true; /* @@ -815,7 +818,7 @@ BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region, span = (BlockAllocatorSpan *) ptr; /* Initialize span and pagemap. */ - sb_init_span(base, span, heap, ptr, npages, size_class); + BlockAllocatorInitSpan(base, span, heap, ptr, npages, size_class); for (i = 0; i < npages; ++i) BlockAllocatorMapSet(region->pagemap, first_page + i, span); @@ -826,8 +829,9 @@ BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region, * Add a new span to fullness class 1 of the indicated heap. */ static void -sb_init_span(char *base, BlockAllocatorSpan *span, BlockAllocatorHeap *heap, - char *ptr, Size npages, uint16 size_class) +BlockAllocatorInitSpan(char *base, BlockAllocatorSpan *span, + BlockAllocatorHeap *heap, char *ptr, Size npages, + uint16 size_class) { BlockAllocatorSpan *head = relptr_access(base, heap->spans[1]); Size obsize = balloc_size_classes[size_class]; @@ -861,29 +865,13 @@ sb_init_span(char *base, BlockAllocatorSpan *span, BlockAllocatorHeap *heap, span->fclass = 1; } -/* - * Report an out-of-memory condition. - */ -static void -sb_out_of_memory_error(BlockAllocatorContext *context) -{ - if (context->private) - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); - else - ereport(ERROR, - (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of shared memory"))); -} - /* * Transfer the first span in one fullness class to the head of another * fullness class. */ static bool -sb_transfer_first_span(char *base, BlockAllocatorHeap *heap, - int fromclass, int toclass) +BlockAllocatorTransferFirstSpan(char *base, BlockAllocatorHeap *heap, + int fromclass, int toclass) { BlockAllocatorSpan *span; BlockAllocatorSpan *nextspan; @@ -910,11 +898,28 @@ sb_transfer_first_span(char *base, BlockAllocatorHeap *heap, return true; } +/* + * Report an out-of-memory condition. + */ +static void +BlockAllocatorMemoryError(BlockAllocatorContext *context) +{ + if (context->private) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of memory"))); + else + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("out of shared memory"))); +} + /* * Remove span from current list. */ static void -sb_unlink_span(char *base, BlockAllocatorHeap *heap, BlockAllocatorSpan *span) +BlockAllocatorUnlinkSpan(char *base, BlockAllocatorHeap *heap, + BlockAllocatorSpan *span) { BlockAllocatorSpan *nextspan = relptr_access(base, span->nextspan); BlockAllocatorSpan *prevspan = relptr_access(base, span->prevspan); diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 0005c3ea55..698192b3a6 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -220,7 +220,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page) /* * Return the size of the largest run of pages that the user could * succesfully get. (If this value subsequently increases, it will trigger - * a callback to sb_report_contiguous_freespace.) + * a callback to ReportRegionContiguousFreespace.) */ Size FreePageManagerInquireLargest(FreePageManager *fpm) @@ -267,7 +267,7 @@ FreePageManagerInquireLargest(FreePageManager *fpm) /* * Transfer a run of pages to the free page manager. (If the number of * contiguous pages now available is larger than it was previously, then - * we attempt to report this to the sb_region module.) + * we report this to the allocation region manager.) */ void FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages) @@ -299,7 +299,7 @@ FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages) /* * If we now have more contiguous pages available than previously - * reported, attempt to notify sb_region system. + * reported, attempt to notify allocation region manager. * * Reporting is only needed for backend-private regions, so we can skip * it when locking is in use, or if we discover that the region has an -- 2.39.5