sb_alloc, less guts.
authorRobert Haas <rhaas@postgresql.org>
Fri, 21 Mar 2014 16:51:12 +0000 (12:51 -0400)
committerRobert Haas <rhaas@postgresql.org>
Fri, 21 Mar 2014 16:51:12 +0000 (12:51 -0400)
src/backend/utils/mmgr/sb_alloc.c
src/include/utils/freepage.h

index 7128391b79de547cee9e68731f38ad746de4ad7e..8d441a2c342cd0bf9024d900c9b5cb38a4da9abc 100644 (file)
@@ -13,7 +13,8 @@
 
 #include "postgres.h"
 
-#include "utils/sb_alloc.h"
+#include "miscadmin.h"
+#include "utils/sb_region.h"
 
 /*
  * Small allocations are handled by dividing a relatively large chunk of
@@ -49,7 +50,6 @@ static const uint16 sb_size_classes[] = {
        5456, 6552, 7280, 8192                  /* 4 classes separated by ~1024 bytes */
 };
 
-#if 0
 /*
  * The following lookup table is used to map the size of small objects
  * (less than 1kB) onto the corresponding size class.  To use this table,
@@ -66,8 +66,16 @@ static char sb_size_class_map[] = {
        22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
        23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23
 };
-#define MSPAN_SIZE_CLASS_MAP_QUANTUM   8
-#endif
+#define SB_SIZE_CLASS_MAP_QUANTUM      8
+
+/* Special size classes. */
+#define SB_SCLASS_SPAN_OF_SPANS                        0
+#define SB_SCLASS_SPAN_LARGE                   1
+#define SB_SCLASS_FIRST_REGULAR                        2
+
+/* Helper functions. */
+static char *sb_alloc_guts(sb_region *region, sb_allocator *a, int size_class);
+static void sb_out_of_memory_error(sb_allocator *a);
 
 /*
  * Create a backend-private allocator.
@@ -104,3 +112,130 @@ sb_create_private_allocator(void)
 
        return a;
 }
+
+/*
+ * Allocate memory.
+ */
+void *
+sb_alloc(sb_allocator *a, Size size, int flags)
+{
+       sb_region *region = NULL;
+       char *base = NULL;
+       int             size_class;
+       char   *result;
+
+       /*
+        * For shared memory allocation, pointers are relative to the start of the
+        * region, so finding out that information is essential.  For
+        * backend-private memory allocation, allocators aren't uniquely tied to
+        * a region; we'll only need to grab a region if we can't allocate out of
+        * an existing superblock.
+        */
+       if (!a->private)
+       {
+               region = sb_lookup_region(a);
+               if (region == NULL)
+                       elog(ERROR, "sb_region not found");
+               base = region->region_start;
+       }
+
+       /* If it's too big for a superblock, just grab a raw run of pages. */
+       if (size > sb_size_classes[lengthof(sb_size_classes) - 1])
+       {
+               Size    npages = fpm_size_to_pages(size);
+               Size    first_page;
+               sb_span *span;
+
+               /* Obtain a span object. */
+               span = (sb_span *) sb_alloc_guts(region, a, SB_SCLASS_SPAN_OF_SPANS);
+               if (span == NULL)
+               {
+                       if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
+                               sb_out_of_memory_error(a);
+                       return NULL;
+               }
+
+               /* Find a region from which to allocate. */
+               if (region == NULL)
+                       region = sb_private_region_for_allocator(npages);
+
+               /* Here's where we try to perform the actual allocation. */
+               if (region == NULL ||
+                       !FreePageManagerGet(region->fpm, npages, &first_page))
+               {
+                       /* XXX. Free the span. */
+                       if ((flags & SB_ALLOC_SOFT_FAIL) == 0)
+                               sb_out_of_memory_error(a);
+                       return NULL;
+               }
+
+               /* XXX. Put the span on the large object heap! */
+
+               sb_map_set(region->pagemap, first_page, ((char *) span) - base);
+
+               return fpm_page_to_pointer(fpm_segment_base(region->fpm),
+                                                                  first_page);
+       }
+
+       /* Map allocation to a size class. */
+       if (size < lengthof(sb_size_class_map) * SB_SIZE_CLASS_MAP_QUANTUM)
+       {
+               int     mapidx;
+
+               mapidx = (size + SB_SIZE_CLASS_MAP_QUANTUM - 1) /
+                                       SB_SIZE_CLASS_MAP_QUANTUM;
+               size_class = sb_size_class_map[mapidx];
+       }
+       else
+       {
+               uint16  min = sb_size_class_map[lengthof(sb_size_class_map) - 1];
+               uint16  max = lengthof(sb_size_classes) - 1;
+
+               while (min < max)
+               {
+                       uint16  mid = (min + max) / 2;
+                       uint16  class_size = sb_size_classes[mid];
+
+                       if (class_size < size)
+                               min = mid + 1;
+                       else
+                               max = mid;
+               }
+
+               size_class = min;
+       }
+       Assert(size <= sb_size_classes[size_class]);
+       size_class += SB_SCLASS_FIRST_REGULAR;
+
+       /* Attempt the actual allocation. */
+       result = sb_alloc_guts(region, a, size_class);
+       if (result == NULL && (flags & SB_ALLOC_SOFT_FAIL) == 0)
+               sb_out_of_memory_error(a);
+       return result;          
+}
+
+/*
+ * Guts of the memory allocation routine.
+ */
+static char *
+sb_alloc_guts(sb_region *region, sb_allocator *a, int size_class)
+{
+       /* XXX */
+       return NULL;
+}
+
+/*
+ * Report an out-of-memory condition.
+ */
+static void
+sb_out_of_memory_error(sb_allocator *a)
+{
+       if (a->private)
+               ereport(ERROR,
+                               (errcode(ERRCODE_OUT_OF_MEMORY),
+                                errmsg("out of memory")));
+       else
+               ereport(ERROR,
+                               (errcode(ERRCODE_OUT_OF_MEMORY),
+                                errmsg("out of shared memory")));
+}
index 19da8d7d2be14f4460a72a31339d8c82f57d21f5..dd905d72b7efe2beb6b8569848a43a19f80a45fd 100644 (file)
@@ -69,6 +69,10 @@ struct FreePageManager
 #define fpm_pointer_to_page(base, ptr)         \
        (((Size) (((char *) (ptr)) - (base))) / FPM_PAGE_SIZE)
 
+/* Macro to convert an allocation size to a number of pages. */
+#define fpm_size_to_pages(sz) \
+       (TYPEALIGN((sz), FPM_PAGE_SIZE))
+
 /* Macros to check alignment of absolute and relative pointers. */
 #define fpm_pointer_is_page_aligned(base, ptr)         \
        (((Size) (((char *) (ptr)) - (base))) % FPM_PAGE_SIZE == 0)