aboutsummaryrefslogtreecommitdiff
path: root/lib/alloc/arena_alloc.c
blob: d9534232dac07a7a4e3bc793f1eedb3456338755 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
/* TODO: Support malloc() backend for systems without MAP_ANONYMOUS */

#include <sys/mman.h>

#include <errno.h>
#include <setjmp.h>
#include <stdckdint.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>

#include "_attrs.h"
#include "_charN_t.h"
#include "alloc.h"
#include "error.h"
#include "macros.h"

#define PAD(len, align) (((len) + (align) - 1) & ~((align) - 1))

struct arena_blk {
	char8_t *head, *tail, *fngr;
	struct arena_blk *next;
};

static void *alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz,
				   ptrdiff_t align);
static void freeall(allocator_t mem);
static arena_blk_t *mkblk(ptrdiff_t blksz);
[[noreturn]] static void errjmp(jmp_buf *env);

void *
arena_alloc(allocator_t mem, alloc_mode_t mode, void *ptr, ptrdiff_t oldnmemb,
			ptrdiff_t newnmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
	(void)ptr;
	(void)oldnmemb;
	switch (mode) {
	case ALLOC_NEW:
		return alloc(mem, newnmemb, elemsz, align);
	case ALLOC_RESIZE:
		/* TODO: Make this more efficient */
		void *p = alloc(mem, newnmemb, elemsz, align);
		memcpy(p, ptr, MIN(oldnmemb, newnmemb) * elemsz);
		return p;
	case ALLOC_FREE:
		/* TODO: Allow freeing the very last allocation */
		return nullptr;
	case ALLOC_FREEALL:
		freeall(mem);
		return nullptr;
	default:
		unreachable();
	}
}

void *
alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
	arena_ctx_t *ctx = mem.ctx;
	if (ctx->blksz == 0) {
		long blksz = sysconf(_SC_PAGESIZE);
		if (blksz == -1)
			errjmp(mem.err);
		ctx->blksz = blksz;
	}

	ptrdiff_t bufsz;
	if (ckd_mul(&bufsz, nmemb, elemsz)) {
		errno = EOVERFLOW;
		errjmp(mem.err);
	}

	for (arena_blk_t *blk = ctx->_head; blk != nullptr; blk = blk->next) {
		ptrdiff_t nbufsz, off = PAD((uintptr_t)blk->fngr, align);
		if (ckd_add(&nbufsz, bufsz, off))
			continue;

		if (blk->tail - blk->fngr >= nbufsz) {
			void *p = blk->fngr + off;
			blk->fngr += nbufsz;
			return p;
		}
	}

	/* No page exists that is large enough for our allocation */
	ptrdiff_t padding = PAD(sizeof(arena_blk_t), align);

	if (ckd_add(&bufsz, bufsz, sizeof(arena_blk_t))
	 || ckd_add(&bufsz, bufsz, padding))
	{
		errno = EOVERFLOW;
		errjmp(mem.err);
	}
	
	arena_blk_t *blk = mkblk(MAX(bufsz, ctx->blksz));
	if (blk == nullptr)
		errjmp(mem.err);
	blk->next = ctx->_head;
	blk->fngr = blk->head + bufsz;
	ctx->_head = blk;
	return blk->head + sizeof(arena_blk_t) + padding;
}

void
freeall(allocator_t mem)
{
	arena_ctx_t *ctx = mem.ctx;
	arena_blk_t *blk = ctx->_head;
	while (blk != nullptr) {
		arena_blk_t *next = blk->next;
		(void)munmap(blk, blk->tail - blk->head);
		blk = next;
	}
	ctx->_head = nullptr;
}

static arena_blk_t *
mkblk(ptrdiff_t blksz)
{
	arena_blk_t blk;
	/* blk.next and blk.fngr get set by the caller */
	blk.head = mmap(nullptr, blksz, PROT_READ | PROT_WRITE,
					MAP_PRIVATE | MAP_ANON, -1, 0);
	if (blk.head == MAP_FAILED)
		return nullptr;
	blk.tail = blk.head + blksz;
	return memcpy(blk.head, &blk, sizeof blk);
}	

void
errjmp(jmp_buf *env)
{
	if (env != nullptr)
		longjmp(*env, 1);
	err("arena_alloc:");
}