/*
* Small allocations are handled by dividing a relatively large chunk of
- * memory into many small objects of equal size. The chunks of memory used
- * for this purpose are called superblocks. Since chunks within a superblock
- * can only be used to satisfy allocations of a single size class, it's
- * important not to make the chunks too large. On the other hand, setting
- * up a chunk has some overhead, so we don't want to make them too small,
- * either.
+ * memory called a superblock into many small objects of equal size. The
+ * chunk sizes are defined by the following array. Larger size classes are
+ * spaced more widely than smaller size classes. We fudge the spacing for
+ * size classes >1k to avoid space wastage: based on the knowledge that we
+ * plan to allocate 64k superblocks, we bump the maximum object size up
+ * to the largest multiple of 8 bytes that still lets us fit the same
+ * number of objects into one superblock.
+ *
+ * NB: Because of this fudging, if the size of a superblock is ever changed,
+ * these size classes should be reworked to be optimal for the new size.
+ *
+ * NB: The optimal spacing for size classes, as well as the size of the
+ * superblocks themselves, is not a question that has one right answer.
+ * Some allocators (such as tcmalloc) use more closely-spaced size classes
+ * than we do here, while others (like aset.c) use more widely-spaced classes.
+ * Spacing the classes more closely avoids wasting memory within individual
+ * chunks, but also means a larger number of potentially-unfilled superblocks.
+ * This system is really only suitable for allocating relatively large amounts
+ * of memory, where the unfilled superblocks will be a small percentage of
+ * the total allocations.
+ */
+static int mspan_size_classes[] = {
+ 8, 16, 24, 32, 40, 48, 56, 64, /* 8 classes separated by 8 bytes */
+ 80, 96, 112, 128, /* 4 classes separated by 16 bytes */
+ 160, 192, 224, 256, /* 4 classes separated by 32 bytes */
+ 320, 384, 448, 512, /* 4 classes separated by 64 bytes */
+ 640, 768, 896, 1024, /* 4 classes separated by 128 bytes */
+ 1280, 1560, 1816, 2048, /* 4 classes separated by ~256 bytes */
+ 2616, 3120, 3640, 4096, /* 4 classes separated by ~512 bytes */
+ 5456, 6552, 7280, 8192 /* 4 classes separated by ~1024 bytes */
+};
+#define MSPAN_SUPERBLOCK_SIZE 65536 /* must be a multiple of page size */
+#define MSPAN_PAGES_PER_SUPERBLOCK (MSPAN_SUPERBLOCK_SIZE >> MSPAN_PAGE_BITS)
+
+/*
+ * The following lookup table is used to map the size of small objects
+ * (less than 1kB) onto the corresponding size class. To use this table,
+ * round the size of the object up to the next multiple of 8 bytes, and then
+ * index into this array.
+ */
+static char mspan_size_class_map[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23
+};
+
+/*
+ * We divide partially-filled superblocks into four fullness classes.
+ * Generally, fullness class N represent blocks where the precentage of
+ * free objects is >= (N * 25%) and < (N * 25%). As an exception, however,
+ * the superblock from which we're allocating is always in fullness class 0;
+ * we only move it to the appropriate class once it is completely filled.
*/
-#define MSPAN_SMALL_OBJECT_SIZE_LIMIT 8192
-#define MSPAN_PAGES_PER_SUPERBLOCK 16
-#define MSPAN_SUPERBLOCK_SIZE \
- (MSPAN_PAGES_PER_SUPERBLOCK * MSPAN_PAGE_SIZE)
-#define MSPAN_NUMBER_OF_SIZE_CLASSSES 36
#define MSPAN_NUMBER_OF_FULLNESS_CLASSES 4
#define MSPAN_SMALL_ALLOCATION_LISTS \
- (MSPAN_NUMBER_OF_SIZE_CLASSSES * MSPAN_NUMBER_OF_FULLNESS_CLASSES)
+ (lengthof(mspan_size_classes) * MSPAN_NUMBER_OF_FULLNESS_CLASSES)
+
+/*
+ * Some spans are superblocks; in those cases, the span_type will be equal
+ * to the size class. We use other constants to represent free spans,
+ * large allocations, and other types of special spans.
+ */
+#define MSPAN_TYPE_FREE ((uint16) -1)
+#define MSPAN_TYPE_LARGE ((uint16) -2)
+#define MSPAN_TYPE_SPAN_OF_SPANS ((uint16) -3)
/*
* Management information for a span of memory.
* appropriate.
*/
mspan_update_page_map(base, mgr, pageno, 1, 0);
- mspan_unlink_span(base, span); /* XXX Head of circular list. */
+ mspan_unlink_span(base, span);
if (span->npages == 1)
mspan_destroy_span(base, span);
else
static void
mspan_recycle_span(char *base, mspan_manager *mgr, mspan *span)
{
- int fidx;
+ Size fidx;
mspan *head;
#ifdef USE_ASSERT_CHECKING
Assert(relptr_is_null(span->prevspan));
#endif
- fidx = span->npages >= MSPAN_NUM_FREE_LISTS ? MSPAN_NUM_FREE_LISTS : 1;
+ fidx = Min(span->npages, MSPAN_NUM_FREE_LISTS) - 1;
head = relptr_access(base, mgr->freelist[fidx]);
span->nextspan.relptr_off = mgr->freelist[fidx].relptr_off;
span->prevspan.relptr_off = head->prevspan.relptr_off;
static void
mspan_release_span(char *base, mspan_manager *mgr, mspan *span)
{
- mspan *preceding_span = NULL;
- mspan *following_span = NULL;
+ /* Remove this span from the list which contains it. */
+ mspan_unlink_span(base, span);
/*
* Find the spans that precede and follow the span to be released within
- * the address space, if they are free. In the page map, 0 means no entry
- * and any odd value means that the span is allocated, so we ignore those
- * values.
+ * the address space; and if they are free, consolidate them with this
+ * one. (In the page map, 0 means no entry and any odd value means that
+ * the span is allocated, so we ignore those values.)
*/
if (span->first_page > 0)
{
p.relptr_off = aspace_map_get(&mgr->page_map,
span->first_page - 1, base);
if (p.relptr_off != 0 && (p.relptr_off & 1) == 0)
- preceding_span = relptr_access(base, p);
+ {
+ mspan *preceding_span = relptr_access(base, p);
+
+ mspan_unlink_span(base, preceding_span);
+ span->first_page = preceding_span->first_page;
+ span->npages += preceding_span->npages;
+ mspan_destroy_span(base, preceding_span);
+ }
}
if (mgr->npages == 0 || span->first_page + span->npages < mgr->boundary)
{
f.relptr_off = aspace_map_get(&mgr->page_map,
span->first_page + span->npages, base);
if (f.relptr_off != 0 && (f.relptr_off & 1) == 0)
- following_span = relptr_access(base, f);
- }
-
- /*
- * XXX. Remove this span from the list which contains it.
- *
- * If we're blowing away the entire context, then this will be some
- * list of allocated objects ... and if we're freeing it because it's
- * empty, it'll also be some list of allocated objects ... but if it's
- * a span of spans then perhaps not.
- */
-
- /*
- * XXX. Consolidate this span with the following span, if it's free.
- */
+ {
+ mspan *following_span = relptr_access(base, f);
- /*
- * XXX. Consolidate this span with the previous span, if it's free.
- */
+ mspan_unlink_span(base, following_span);
+ span->npages += following_span->npages;
+ mspan_destroy_span(base, following_span);
+ }
+ }
/*
* Make new page map entries for the span.
if (span->npages > 1)
mspan_update_page_map(base, mgr, span->first_page + span->npages - 1,
1, ((char *) span) - base);
+
+ /* Put the span on the appropriate free list. */
mspan_recycle_span(base, mgr, span);
}
static void
mspan_unlink_span(char *base, mspan *span)
{
+ void *parent;
mspan *next;
mspan *prev;
+ relptr(mspan) s;
+ Size newhead;
+
+ /*
+ * If this span is the head of the containing list, then we've got to
+ * adjust the head pointer to reference the next element, or zero it out.
+ */
+ parent = relptr_access(base, span->parent);
+ relptr_store(base, s, span);
+ newhead = (span == next ? 0 : span->nextspan.relptr_off);
+ switch (span->span_type)
+ {
+ case MSPAN_TYPE_FREE:
+ {
+ mspan_manager *mgr = parent;
+ Size fidx = Min(span->npages, MSPAN_NUM_FREE_LISTS) - 1;
+
+ if (mgr->freelist[fidx].relptr_off == s.relptr_off)
+ mgr->freelist[fidx].relptr_off = newhead;
+ }
+ case MSPAN_TYPE_LARGE:
+ {
+ mspan_context *cxt = parent;
+
+ if (cxt->large_allocation.relptr_off == s.relptr_off)
+ cxt->large_allocation.relptr_off = newhead;
+ }
+ case MSPAN_TYPE_SPAN_OF_SPANS:
+ {
+ mspan_manager *mgr = parent;
+
+ if (mgr->spansuperblocks.relptr_off == s.relptr_off)
+ mgr->spansuperblocks.relptr_off = newhead;
+ }
+ default:
+ {
+ mspan_context *cxt = parent;
+ int i;
+ int aidx;
+
+ Assert(aidx < lengthof(mspan_size_classes));
+ aidx = span->span_type * MSPAN_NUMBER_OF_FULLNESS_CLASSES;
+ for (i = 0; i < MSPAN_NUMBER_OF_FULLNESS_CLASSES; ++i)
+ {
+ if (cxt->small_allocation[aidx + i].relptr_off == s.relptr_off)
+ {
+ cxt->small_allocation[aidx + i].relptr_off = newhead;
+ break;
+ }
+ }
+ }
+ }
+ /* Adjust next and previous pointers for our neighbors. */
next = relptr_access(base, span->nextspan);
prev = relptr_access(base, span->prevspan);
Assert(next != NULL && prev != NULL);