aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/alloc.h18
-rw-r--r--src/arena.c99
2 files changed, 116 insertions, 1 deletions
diff --git a/src/alloc.h b/src/alloc.h
index 80e2384..004e6ec 100644
--- a/src/alloc.h
+++ b/src/alloc.h
@@ -1,13 +1,29 @@
#ifndef ORYX_ALLOC_H
#define ORYX_ALLOC_H
+#include <stdalign.h>
#include <stddef.h>
#include "common.h"
+typedef struct _arena *arena;
+
/* Allocate a buffer of NMEMB elements of size SIZE. If PTR is non-null then
reallocate the buffer it points to. Aborts on out-of-memory or overflow. */
void *bufalloc(void *ptr, size_t nmemb, size_t size)
- __attribute__((returns_nonnull, alloc_size(2, 3)));
+ __attribute__((returns_nonnull, warn_unused_result, alloc_size(2, 3)));
+
+/* Allocate a buffer of NMEMB elements of size SIZE with alignment ALIGN using
+ the arena-allocator A. */
+void *arena_alloc(arena *a, size_t nmemb, size_t size, size_t align)
+ __attribute__((returns_nonnull, warn_unused_result, malloc,
+ alloc_size(2, 3), alloc_align(4)));
+
+/* Deallocate all memory associated with the arena A. */
+void arena_free(arena *a)
+ __attribute__((nonnull));
+
+/* Allocate a buffer of N elements of type T using the arena-allocator A. */
+#define arena_new(a, T, n) ((T *)arena_alloc((a), (n), sizeof(T), alignof(T)))
#endif /* !ORYX_ALLOC_H */
diff --git a/src/arena.c b/src/arena.c
new file mode 100644
index 0000000..c9b20aa
--- /dev/null
+++ b/src/arena.c
@@ -0,0 +1,99 @@
+#include <sys/mman.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include "alloc.h"
+#include "errors.h"
+
+/* TODO: Support implementations without MAP_ANON? */
+#ifndef MAP_ANON
+static_assert(NULL, "MAP_ANON not available on this system");
+#endif
+
+#if DEBUG
+# define ARENA_DFLT_CAP (8)
+#else
+# define ARENA_DFLT_CAP (2048)
+#endif
+
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#define IS_POW_2(n) ((n) != 0 && ((n) & ((n)-1)) == 0)
+
+struct _arena {
+ size_t len, cap;
+ void *data, *last;
+ struct _arena *next;
+};
+
+static struct _arena *mkblk(size_t) __attribute__((returns_nonnull));
+static inline size_t pad(size_t, size_t) __attribute__((const, always_inline));
+
+void *
+arena_alloc(struct _arena **a, size_t nmemb, size_t size, size_t align)
+{
+ assert(IS_POW_2(align));
+ assert(nmemb * size != 0);
+
+ if (size > SIZE_MAX / nmemb) {
+ errno = EOVERFLOW;
+ err("%s:", __func__);
+ }
+
+ size *= nmemb;
+
+ for (struct _arena *p = *a; p != NULL; p = p->next) {
+ size_t nlen, off;
+ off = pad(p->len, align);
+ nlen = size + off;
+
+ if (nlen <= p->cap) {
+ void *ret = (char *)p->data + off;
+ p->len = nlen;
+ p->last = ret;
+ return ret;
+ }
+ }
+
+ /* No page exists with enough space */
+ struct _arena *p = mkblk(MAX(size, ARENA_DFLT_CAP));
+ p->len = size;
+ p->next = *a;
+ *a = p;
+ return p->data;
+}
+
+void
+arena_free(struct _arena **a)
+{
+ struct _arena *cur, *next;
+ for (cur = *a; cur != NULL; cur = next) {
+ next = cur->next;
+ munmap(cur->data, cur->cap);
+ free(cur);
+ }
+ *a = NULL;
+}
+
+struct _arena *
+mkblk(size_t cap)
+{
+ struct _arena *a = malloc(sizeof(*a));
+ if (a == NULL)
+ err("malloc:");
+ a->cap = cap;
+ a->data = mmap(NULL, cap, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if (a->data == MAP_FAILED)
+ err("mmap:");
+ a->last = a->data;
+ return a;
+}
+
+size_t
+pad(size_t len, size_t align)
+{
+ return (len + align - 1) & ~(align - 1);
+}