Renaming cleanup.
authorRobert Haas <rhaas@postgresql.org>
Tue, 13 May 2014 14:51:35 +0000 (10:51 -0400)
committerRobert Haas <rhaas@postgresql.org>
Tue, 13 May 2014 14:51:35 +0000 (10:51 -0400)
src/backend/utils/mmgr/aregion.c
src/backend/utils/mmgr/balloc.c
src/backend/utils/mmgr/freepage.c

index e5054c0495aae873019c94d37ac3fe04f2c9ad14..f40c46782d9b5e81012dfd822fddbcd8e52f0c67 100644 (file)
@@ -389,7 +389,7 @@ GetRegionForPrivateAllocation(Size npages)
         *
         * NB: We temporarily set region->contiguous_pages to a value one more
         * than the actual number.  This is because calling FreePageManagerPut
-        * will provoke a callback to sb_report_contiguous_freespace, which we
+        * will provoke a callback to ReportRegionContiguousFreespace, which we
         * want to exit quickly and, in particular, without deallocating the
         * region.
         */
index edcce77a1955c9ac1f0d6820ea17ed7f6f1b94e6..d155dbab7d29ed84a69f125c26e7953b5f286abc 100644 (file)
@@ -170,14 +170,15 @@ static bool BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region,
                                                        BlockAllocatorContext *context,
                                                        BlockAllocatorHeap *heap,
                                                        int size_class);
-static void sb_init_span(char *base, BlockAllocatorSpan *span,
-                        BlockAllocatorHeap *heap, char *ptr, Size npages,
-                        uint16 size_class);
-static void sb_out_of_memory_error(BlockAllocatorContext *context);
-static bool sb_transfer_first_span(char *base, BlockAllocatorHeap *heap,
-                                          int fromclass, int toclass);
-static void sb_unlink_span(char *base, BlockAllocatorHeap *heap,
-                                                  BlockAllocatorSpan *span);
+static void BlockAllocatorInitSpan(char *base, BlockAllocatorSpan *span,
+                                          BlockAllocatorHeap *heap, char *ptr, Size npages,
+                                          uint16 size_class);
+static void BlockAllocatorMemoryError(BlockAllocatorContext *context);
+static bool BlockAllocatorTransferFirstSpan(char *base,
+                                                               BlockAllocatorHeap *heap,
+                                                               int fromclass, int toclass);
+static void BlockAllocatorUnlinkSpan(char *base, BlockAllocatorHeap *heap,
+                                                BlockAllocatorSpan *span);
 
 /*
  * Create a backend-private allocator.
@@ -258,7 +259,7 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags)
                if (span == NULL)
                {
                        if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
-                               sb_out_of_memory_error(context);
+                               BlockAllocatorMemoryError(context);
                        return NULL;
                }
 
@@ -272,7 +273,7 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags)
                {
                        /* XXX. Free the span. */
                        if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
-                               sb_out_of_memory_error(context);
+                               BlockAllocatorMemoryError(context);
                        return NULL;
                }
                ptr = fpm_page_to_pointer(fpm_segment_base(region->fpm), first_page);
@@ -280,7 +281,8 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags)
                /* Initialize span and pagemap. */
                if (lock != NULL)
                        LWLockAcquire(lock, LW_EXCLUSIVE);
-               sb_init_span(base, span, heap, ptr, npages, BA_SCLASS_SPAN_LARGE);
+               BlockAllocatorInitSpan(base, span, heap, ptr, npages,
+                                                          BA_SCLASS_SPAN_LARGE);
                if (lock != NULL)
                        LWLockRelease(lock);
                BlockAllocatorMapSet(region->pagemap, first_page, span);
@@ -324,12 +326,12 @@ BlockAllocatorAlloc(BlockAllocatorContext *context, Size size, int flags)
        /* Attempt the actual allocation. */
        result = BlockAllocatorAllocGuts(base, region, context, size_class);
        if (result == NULL && (flags & SB_ALLOC_SOFT_FAIL) == 0)
-               sb_out_of_memory_error(context);
+               BlockAllocatorMemoryError(context);
        return result;          
 }
 
 /*
- * Free memory allocated via sb_alloc.
+ * Free memory allocated via BlockAllocatorAlloc.
  */
 void
 BlockAllocatorFree(void *ptr)
@@ -373,7 +375,7 @@ BlockAllocatorFree(void *ptr)
                BlockAllocatorHeap *heap = relptr_access(base, span->parent);
                Size    first_page;
 
-               sb_unlink_span(base, heap, span);
+               BlockAllocatorUnlinkSpan(base, heap, span);
                first_page = fpm_pointer_to_page(fpm_base,
                                                                                 relptr_access(base, span->start));
                FreePageManagerPut(region->fpm, first_page, span->npages);
@@ -404,7 +406,7 @@ BlockAllocatorFree(void *ptr)
                 * move it to the next-lower fullness class.
                 */
 
-               sb_unlink_span(base, heap, span);
+               BlockAllocatorUnlinkSpan(base, heap, span);
                span->fclass = BA_FULLNESS_CLASSES - 2;
                relptr_copy(span->nextspan, heap->spans[BA_FULLNESS_CLASSES - 2]);
                relptr_store(base, span->prevspan, (BlockAllocatorSpan *) NULL);
@@ -428,7 +430,7 @@ BlockAllocatorFree(void *ptr)
                 * block, it will be very inefficient if we deallocate and
                 * reallocate the block every time.
                 */
-               sb_unlink_span(base, heap, span);
+               BlockAllocatorUnlinkSpan(base, heap, span);
                first_page = fpm_pointer_to_page(fpm_base,
                                                                                 relptr_access(base, span->start));
                FreePageManagerPut(region->fpm, first_page, span->npages);
@@ -602,7 +604,7 @@ BlockAllocatorAllocGuts(char *base, AllocatorRegion *region,
 {
        BlockAllocatorHeap *heap = &context->heaps[size_class];
        LWLock *lock = relptr_access(base, heap->lock);
-       BlockAllocatorSpan *active_sb;
+       BlockAllocatorSpan *span;
        char   *block;
        char   *result;
        Size    obsize;
@@ -630,26 +632,27 @@ BlockAllocatorAllocGuts(char *base, AllocatorRegion *region,
         * it should never be completely full.  Thus we can either pop the
         * free list or, failing that, initialize a new object.
         */
-       active_sb = relptr_access(base, heap->spans[1]);
-       Assert(active_sb != NULL && active_sb->nallocatable > 0);
-       block = relptr_access(base, active_sb->start);
+       span = relptr_access(base, heap->spans[1]);
+       Assert(span != NULL && span->nallocatable > 0);
+       block = relptr_access(base, span->start);
        Assert(size_class < BA_NUM_SIZE_CLASSES);
        obsize = balloc_size_classes[size_class];
-       if (active_sb->firstfree != BA_SPAN_NOTHING_FREE)
+       if (span->firstfree != BA_SPAN_NOTHING_FREE)
        {
-               result = block + active_sb->firstfree * obsize;
-               active_sb->firstfree = * (Size *) result;
+               result = block + span->firstfree * obsize;
+               span->firstfree = * (Size *) result;
        }
        else
        {
-               result = block + active_sb->ninitialized * obsize;
-               ++active_sb->ninitialized;
+               result = block + span->ninitialized * obsize;
+               ++span->ninitialized;
        }
-       --active_sb->nallocatable;
+       --span->nallocatable;
 
        /* If it's now full, move it to the highest-numbered fullness class. */
-       if (active_sb->nallocatable == 0)
-               sb_transfer_first_span(base, heap, 1, BA_FULLNESS_CLASSES - 1);
+       if (span->nallocatable == 0)
+               BlockAllocatorTransferFirstSpan(base, heap, 1,
+                                                                               BA_FULLNESS_CLASSES - 1);
 
        /* We're all done.  Release the lock. */
        if (lock != NULL)
@@ -767,10 +770,10 @@ BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region,
         */
        Assert(relptr_is_null(heap->spans[1]));
        for (fclass = 2; fclass < BA_FULLNESS_CLASSES - 1; ++fclass)
-               if (sb_transfer_first_span(base, heap, fclass, 1))
+               if (BlockAllocatorTransferFirstSpan(base, heap, fclass, 1))
                        return true;
        if (relptr_is_null(heap->spans[1]) &&
-               sb_transfer_first_span(base, heap, 0, 1))
+               BlockAllocatorTransferFirstSpan(base, heap, 0, 1))
                        return true;
 
        /*
@@ -815,7 +818,7 @@ BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region,
                span = (BlockAllocatorSpan *) ptr;
 
        /* Initialize span and pagemap. */
-       sb_init_span(base, span, heap, ptr, npages, size_class);
+       BlockAllocatorInitSpan(base, span, heap, ptr, npages, size_class);
        for (i = 0; i < npages; ++i)
                BlockAllocatorMapSet(region->pagemap, first_page + i, span);
 
@@ -826,8 +829,9 @@ BlockAllocatorEnsureActiveBlock(char *base, AllocatorRegion *region,
  * Add a new span to fullness class 1 of the indicated heap.
  */
 static void
-sb_init_span(char *base, BlockAllocatorSpan *span, BlockAllocatorHeap *heap,
-                        char *ptr, Size npages, uint16 size_class)
+BlockAllocatorInitSpan(char *base, BlockAllocatorSpan *span,
+                                          BlockAllocatorHeap *heap, char *ptr, Size npages,
+                                          uint16 size_class)
 {
        BlockAllocatorSpan *head = relptr_access(base, heap->spans[1]);
        Size    obsize = balloc_size_classes[size_class];
@@ -861,29 +865,13 @@ sb_init_span(char *base, BlockAllocatorSpan *span, BlockAllocatorHeap *heap,
        span->fclass = 1;
 }
 
-/*
- * Report an out-of-memory condition.
- */
-static void
-sb_out_of_memory_error(BlockAllocatorContext *context)
-{
-       if (context->private)
-               ereport(ERROR,
-                               (errcode(ERRCODE_OUT_OF_MEMORY),
-                                errmsg("out of memory")));
-       else
-               ereport(ERROR,
-                               (errcode(ERRCODE_OUT_OF_MEMORY),
-                                errmsg("out of shared memory")));
-}
-
 /*
  * Transfer the first span in one fullness class to the head of another
  * fullness class.
  */
 static bool
-sb_transfer_first_span(char *base, BlockAllocatorHeap *heap,
-                                          int fromclass, int toclass)
+BlockAllocatorTransferFirstSpan(char *base, BlockAllocatorHeap *heap,
+                                                               int fromclass, int toclass)
 {
        BlockAllocatorSpan *span;
        BlockAllocatorSpan *nextspan;
@@ -910,11 +898,28 @@ sb_transfer_first_span(char *base, BlockAllocatorHeap *heap,
        return true;
 }
 
+/*
+ * Report an out-of-memory condition.
+ */
+static void
+BlockAllocatorMemoryError(BlockAllocatorContext *context)
+{
+       if (context->private)
+               ereport(ERROR,
+                               (errcode(ERRCODE_OUT_OF_MEMORY),
+                                errmsg("out of memory")));
+       else
+               ereport(ERROR,
+                               (errcode(ERRCODE_OUT_OF_MEMORY),
+                                errmsg("out of shared memory")));
+}
+
 /*
  * Remove span from current list.
  */
 static void
-sb_unlink_span(char *base, BlockAllocatorHeap *heap, BlockAllocatorSpan *span)
+BlockAllocatorUnlinkSpan(char *base, BlockAllocatorHeap *heap,
+                                                BlockAllocatorSpan *span)
 {
        BlockAllocatorSpan *nextspan = relptr_access(base, span->nextspan);
        BlockAllocatorSpan *prevspan = relptr_access(base, span->prevspan);
index 0005c3ea55163189c4dba8a2027d8ecc2a7f3d49..698192b3a658921026061a70890b72637990fe32 100644 (file)
@@ -220,7 +220,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
 /*
  * Return the size of the largest run of pages that the user could
  * succesfully get.  (If this value subsequently increases, it will trigger
- * a callback to sb_report_contiguous_freespace.)
+ * a callback to ReportRegionContiguousFreespace.)
  */
 Size
 FreePageManagerInquireLargest(FreePageManager *fpm)
@@ -267,7 +267,7 @@ FreePageManagerInquireLargest(FreePageManager *fpm)
 /*
  * Transfer a run of pages to the free page manager.  (If the number of
  * contiguous pages now available is larger than it was previously, then
- * we attempt to report this to the sb_region module.)
+ * we report this to the allocation region manager.)
  */
 void
 FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
@@ -299,7 +299,7 @@ FreePageManagerPut(FreePageManager *fpm, Size first_page, Size npages)
 
        /*
         * If we now have more contiguous pages available than previously
-        * reported, attempt to notify sb_region system.
+        * reported, attempt to notify allocation region manager.
         *
         * Reporting is only needed for backend-private regions, so we can skip
         * it when locking is in use, or if we discover that the region has an