aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/alloc/arena_alloc.c77
1 files changed, 70 insertions, 7 deletions
diff --git a/lib/alloc/arena_alloc.c b/lib/alloc/arena_alloc.c
index 832f5b0..1321259 100644
--- a/lib/alloc/arena_alloc.c
+++ b/lib/alloc/arena_alloc.c
@@ -23,7 +23,9 @@ struct arena_blk {
};
static void *alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz,
- ptrdiff_t align);
+ ptrdiff_t align);
+static void *resize(allocator_t mem, void *ptr, ptrdiff_t oldnmemb,
+ ptrdiff_t newnmemb, ptrdiff_t elemsz, ptrdiff_t align);
static void freeall(allocator_t mem);
static arena_blk_t *mkblk(ptrdiff_t blksz);
[[noreturn]] static void errjmp(jmp_buf *env);
@@ -32,16 +34,11 @@ void *
arena_alloc(allocator_t mem, alloc_mode_t mode, void *ptr, ptrdiff_t oldnmemb,
ptrdiff_t newnmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
- (void)ptr;
- (void)oldnmemb;
switch (mode) {
case ALLOC_NEW:
return alloc(mem, newnmemb, elemsz, align);
case ALLOC_RESIZE:
- /* TODO: Make this more efficient */
- void *p = alloc(mem, newnmemb, elemsz, align);
- memcpy(p, ptr, MIN(oldnmemb, newnmemb) * elemsz);
- return p;
+ return resize(mem, ptr, oldnmemb, newnmemb, elemsz, align);
case ALLOC_FREE:
/* TODO: Allow freeing the very last allocation */
return nullptr;
@@ -99,6 +96,72 @@ alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz, ptrdiff_t align)
return blk->head + sizeof(arena_blk_t) + padding;
}
+void *
+resize(allocator_t mem, void *ptr, ptrdiff_t oldnmemb, ptrdiff_t newnmemb,
+ ptrdiff_t elemsz, ptrdiff_t align)
+{
+ if (oldnmemb == newnmemb)
+ return ptr;
+
+ arena_ctx_t *ctx = mem.ctx;
+ arena_blk_t *blk = ctx->_head;
+
+ /* There are 4 cases we should consider when resizing an allocation:
+
+ 1. We are shrinking the final allocation
+ 2. We are shrinking any other allocation
+ 3. We are growing the final allocation
+ 4. We are growing any other allocation
+
+ Shrinking is typically a no-op, however in case 1 we can be a bit smart.
+ If we want to shrink the most recent allocation we can actually shift the
+ finger pointer in the arena block to free up more space in the current
+ block for future allocations, allowing us to potentially cut down on
+ wasteful calls to mmap() and friends in the future.
+
+ Likewise with growing an allocation, this is typically a very simple
+ alloc+memcpy(), however we can be smart in the case that we are growing
+ the last allocation. If we want to grow the last allocation we can
+ actually first see if there is free-space remaining in the current block
+ and simply move the finger pointer avoiding any unneccesary memory
+ wastage. Not only this but if we don’t have enough space in the current
+ block we can attempt to resize the block using mremap() if we’re on
+ Linux. */
+
+ bool resizing_last_alloc = (uint8_t *)ptr + oldnmemb * elemsz == blk->fngr;
+
+ if (oldnmemb > newnmemb) {
+ if (resizing_last_alloc)
+ blk->fngr = (uint8_t *)ptr + newnmemb * elemsz;
+ return ptr;
+ }
+
+ ptrdiff_t bufsz;
+ if (ckd_mul(&bufsz, newnmemb, elemsz)) {
+ errno = EOVERFLOW;
+ errjmp(mem.err);
+ }
+
+ if (resizing_last_alloc) {
+ uint8_t *alloc_tail = (uint8_t *)ptr + bufsz;
+ if (alloc_tail <= blk->tail) {
+ blk->fngr = alloc_tail;
+ return ptr;
+ }
+#if __linux__
+ void *p = mremap(blk->head, blk->tail - blk->head,
+ alloc_tail - blk->head, 0);
+ if (p != MAP_FAILED) {
+ blk->fngr = blk->tail = alloc_tail;
+ return ptr;
+ }
+#endif
+ }
+
+ void *p = alloc(mem, newnmemb, elemsz, align);
+ return memcpy(p, ptr, oldnmemb * elemsz);
+}
+
void
freeall(allocator_t mem)
{