aboutsummaryrefslogtreecommitdiff
path: root/lib/alloc/arena_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/alloc/arena_alloc.c')
-rw-r--r--lib/alloc/arena_alloc.c155
1 files changed, 106 insertions, 49 deletions
diff --git a/lib/alloc/arena_alloc.c b/lib/alloc/arena_alloc.c
index 65123d4..d953423 100644
--- a/lib/alloc/arena_alloc.c
+++ b/lib/alloc/arena_alloc.c
@@ -1,80 +1,137 @@
+/* TODO: Support malloc() backend for systems without MAP_ANONYMOUS */
+
#include <sys/mman.h>
#include <errno.h>
+#include <setjmp.h>
#include <stdckdint.h>
-#include <stdlib.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
#include "_attrs.h"
+#include "_charN_t.h"
#include "alloc.h"
+#include "error.h"
#include "macros.h"
-#define IS_POW_2(n) ((n) != 0 && ((n) & ((n) - 1)) == 0)
+#define PAD(len, align) (((len) + (align) - 1) & ~((align) - 1))
-[[_mlib_pure, _mlib_inline]] static size_t pad(size_t, size_t);
-static struct _region *mkregion(size_t);
+struct arena_blk {
+ char8_t *head, *tail, *fngr;
+ struct arena_blk *next;
+};
-size_t
-pad(size_t len, size_t align)
-{
- return (len + align - 1) & ~(align - 1);
-}
+static void *alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz,
+ ptrdiff_t align);
+static void freeall(allocator_t mem);
+static arena_blk_t *mkblk(ptrdiff_t blksz);
+[[noreturn]] static void errjmp(jmp_buf *env);
-struct _region *
-mkregion(size_t cap)
+void *
+arena_alloc(allocator_t mem, alloc_mode_t mode, void *ptr, ptrdiff_t oldnmemb,
+ ptrdiff_t newnmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
- struct _region *r = malloc(sizeof(struct _region));
- if (r == nullptr)
+ (void)ptr;
+ (void)oldnmemb;
+ switch (mode) {
+ case ALLOC_NEW:
+ return alloc(mem, newnmemb, elemsz, align);
+ case ALLOC_RESIZE:
+ /* TODO: Make this more efficient */
+ void *p = alloc(mem, newnmemb, elemsz, align);
+ memcpy(p, ptr, MIN(oldnmemb, newnmemb) * elemsz);
+ return p;
+ case ALLOC_FREE:
+ /* TODO: Allow freeing the very last allocation */
return nullptr;
- *r = (struct _region){
- .cap = cap,
- .data = mmap(nullptr, cap, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0),
- };
- if (r->data == MAP_FAILED) {
- int save = errno;
- free(r);
- errno = save;
+ case ALLOC_FREEALL:
+ freeall(mem);
return nullptr;
+ default:
+ unreachable();
}
- r->last = r->data;
- return r;
}
void *
-arena_alloc(arena *a, size_t sz, size_t n, size_t align)
+alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
- ASSUME(a != nullptr);
- ASSUME(IS_POW_2(align));
+ arena_ctx_t *ctx = mem.ctx;
+ if (ctx->blksz == 0) {
+ long blksz = sysconf(_SC_PAGESIZE);
+ if (blksz == -1)
+ errjmp(mem.err);
+ ctx->blksz = blksz;
+ }
- if (ckd_mul(&sz, sz, n)) {
+ ptrdiff_t bufsz;
+ if (ckd_mul(&bufsz, nmemb, elemsz)) {
errno = EOVERFLOW;
- return nullptr;
+ errjmp(mem.err);
}
- for (struct _region *r = a->_head; r != nullptr; r = r->next) {
- size_t nlen, off = pad(r->len, align);
+ for (arena_blk_t *blk = ctx->_head; blk != nullptr; blk = blk->next) {
+ ptrdiff_t nbufsz, off = PAD((uintptr_t)blk->fngr, align);
+ if (ckd_add(&nbufsz, bufsz, off))
+ continue;
- /* Technically there are other ways to solve this… but at this point you
- might as well just fail */
- if (ckd_add(&nlen, off, sz)) {
- errno = EOVERFLOW;
- return nullptr;
+ if (blk->tail - blk->fngr >= nbufsz) {
+ void *p = blk->fngr + off;
+ blk->fngr += nbufsz;
+ return p;
}
+ }
- if (nlen <= r->cap) {
- void *ret = (char *)r->data + off;
- r->len = nlen;
- r->last = ret;
- return ret;
- }
+ /* No page exists that is large enough for our allocation */
+ ptrdiff_t padding = PAD(sizeof(arena_blk_t), align);
+
+ if (ckd_add(&bufsz, bufsz, sizeof(arena_blk_t))
+ || ckd_add(&bufsz, bufsz, padding))
+ {
+ errno = EOVERFLOW;
+ errjmp(mem.err);
}
+
+ arena_blk_t *blk = mkblk(MAX(bufsz, ctx->blksz));
+ if (blk == nullptr)
+ errjmp(mem.err);
+ blk->next = ctx->_head;
+ blk->fngr = blk->head + bufsz;
+ ctx->_head = blk;
+ return blk->head + sizeof(arena_blk_t) + padding;
+}
- /* No page exists with enough space */
- struct _region *r = mkregion(MAX(sz, a->_init));
- if (r == nullptr)
+void
+freeall(allocator_t mem)
+{
+ arena_ctx_t *ctx = mem.ctx;
+ arena_blk_t *blk = ctx->_head;
+ while (blk != nullptr) {
+ arena_blk_t *next = blk->next;
+ (void)munmap(blk, blk->tail - blk->head);
+ blk = next;
+ }
+ ctx->_head = nullptr;
+}
+
+static arena_blk_t *
+mkblk(ptrdiff_t blksz)
+{
+ arena_blk_t blk;
+ /* blk.next and blk.fngr get set by the caller */
+ blk.head = mmap(nullptr, blksz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (blk.head == MAP_FAILED)
return nullptr;
- r->next = a->_head;
- r->len = sz;
- a->_head = r;
- return r->data;
+ blk.tail = blk.head + blksz;
+ return memcpy(blk.head, &blk, sizeof blk);
+}
+
+void
+errjmp(jmp_buf *env)
+{
+ if (env != nullptr)
+ longjmp(*env, 1);
+ err("arena_alloc:");
}