aboutsummaryrefslogtreecommitdiff
path: root/lib/alloc/arena_alloc.c
blob: c25c132f063e6e1b1c2b0d257cd6b502911824a3 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
/* TODO: Support malloc() backend for systems without MAP_ANONYMOUS */

#include <sys/mman.h>

#include <errno.h>
#include <setjmp.h>
#include <stdckdint.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>

#include "_attrs.h"
#include "alloc.h"
#include "error.h"
#include "macros.h"

#define PAD(len, align) (((len) + (align) - 1) & ~((align) - 1))

struct arena_blk {
	uint8_t *head, *tail, *fngr;
	struct arena_blk *next;
};

static void *alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz,
                   ptrdiff_t align);
static void *resize(allocator_t mem, void *ptr, ptrdiff_t oldnmemb,
                    ptrdiff_t newnmemb, ptrdiff_t elemsz, ptrdiff_t align);
static void free_(allocator_t mem, void *ptr, ptrdiff_t oldnmemb,
                  ptrdiff_t elemsz);
static void freeall(allocator_t mem);
static arena_blk_t *mkblk(ptrdiff_t blksz);
[[noreturn]] static void errjmp(jmp_buf *env);

void *
arena_alloc(allocator_t mem, alloc_mode_t mode, void *ptr, ptrdiff_t oldnmemb,
            ptrdiff_t newnmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
	switch (mode) {
	case ALLOC_NEW:
		return alloc(mem, newnmemb, elemsz, align);
	case ALLOC_RESIZE:
		return resize(mem, ptr, oldnmemb, newnmemb, elemsz, align);
	case ALLOC_FREE:
		free_(mem, ptr, oldnmemb, elemsz);
		return nullptr;
	case ALLOC_FREEALL:
		freeall(mem);
		return nullptr;
	default:
		unreachable();
	}
}

void *
alloc(allocator_t mem, ptrdiff_t nmemb, ptrdiff_t elemsz, ptrdiff_t align)
{
	arena_ctx_t *ctx = mem.ctx;
	if (ctx->blksz == 0) {
		long blksz = sysconf(_SC_PAGESIZE);
		ctx->blksz = blksz == -1 ? 4096 : blksz;
	}

	ptrdiff_t bufsz;
	if (ckd_mul(&bufsz, nmemb, elemsz)) {
		errno = EOVERFLOW;
		errjmp(mem.err);
	}

	for (arena_blk_t *blk = ctx->_head; blk != nullptr; blk = blk->next) {
		ptrdiff_t nbufsz, off = PAD((uintptr_t)blk->fngr, align);
		if (ckd_add(&nbufsz, bufsz, off))
			continue;

		if (blk->tail - blk->fngr >= nbufsz) {
			void *p = blk->fngr + off;
			blk->fngr += nbufsz;
			return p;
		}
	}

	/* No page exists that is large enough for our allocation */
	ptrdiff_t padding = PAD(sizeof(arena_blk_t), align);

	if (ckd_add(&bufsz, bufsz, sizeof(arena_blk_t))
	 || ckd_add(&bufsz, bufsz, padding))
	{
		errno = EOVERFLOW;
		errjmp(mem.err);
	}
	
	arena_blk_t *blk = mkblk(MAX(bufsz, ctx->blksz));
	if (blk == nullptr)
		errjmp(mem.err);
	blk->next = ctx->_head;
	blk->fngr = blk->head + bufsz;
	ctx->_head = blk;
	return blk->head + sizeof(arena_blk_t) + padding;
}

void *
resize(allocator_t mem, void *ptr, ptrdiff_t oldnmemb, ptrdiff_t newnmemb,
       ptrdiff_t elemsz, ptrdiff_t align)
{
	if (oldnmemb == newnmemb)
		return ptr;

	arena_ctx_t *ctx = mem.ctx;
	arena_blk_t *blk = ctx->_head;

	/* There are 4 cases we should consider when resizing an allocation:

	   1.  We are shrinking the final allocation
	   2.  We are shrinking any other allocation
	   3.  We are growing the final allocation
	   4.  We are growing any other allocation

	   Shrinking is typically a no-op, however in case 1 we can be a bit smart.
	   If we want to shrink the most recent allocation we can actually shift the
	   finger pointer in the arena block to free up more space in the current
	   block for future allocations, allowing us to potentially cut down on
	   wasteful calls to mmap() and friends in the future.

	   Likewise with growing an allocation, this is typically a very simple
	   alloc+memcpy(), however we can be smart in the case that we are growing
	   the last allocation.  If we want to grow the last allocation we can
	   actually first see if there is free-space remaining in the current block
	   and simply move the finger pointer avoiding any unneccesary memory
	   wastage.  Not only this but if we don’t have enough space in the current
	   block we can attempt to resize the block using mremap() if we’re on
	   Linux. */

	bool resizing_last_alloc = (uint8_t *)ptr + oldnmemb * elemsz == blk->fngr;

	if (oldnmemb > newnmemb) {
		if (resizing_last_alloc)
			blk->fngr = (uint8_t *)ptr + newnmemb * elemsz;
		return ptr;
	}

	ptrdiff_t bufsz;
	if (ckd_mul(&bufsz, newnmemb, elemsz)) {
		errno = EOVERFLOW;
		errjmp(mem.err);
	}

	if (resizing_last_alloc) {
		uint8_t *alloc_tail = (uint8_t *)ptr + bufsz;
		if (alloc_tail <= blk->tail) {
			blk->fngr = alloc_tail;
			return ptr;
		}
#if __linux__
		void *p = mremap(blk->head, blk->tail - blk->head,
		                 alloc_tail - blk->head, 0);
		if (p != MAP_FAILED) {
			blk->fngr = blk->tail = alloc_tail;
			return ptr;
		}
#endif
	}
	
	void *p = alloc(mem, newnmemb, elemsz, align);
	return memcpy(p, ptr, oldnmemb * elemsz);
}

void
free_(allocator_t mem, void *ptr, ptrdiff_t oldnmemb, ptrdiff_t elemsz)
{
	arena_ctx_t *ctx = mem.ctx;
	arena_blk_t *blk = ctx->_head;
	if ((uint8_t *)ptr + oldnmemb * elemsz == blk->fngr)
		blk->fngr = ptr;
}

void
freeall(allocator_t mem)
{
	arena_ctx_t *ctx = mem.ctx;
	arena_blk_t *blk = ctx->_head;
	while (blk != nullptr) {
		arena_blk_t *next = blk->next;
		(void)munmap(blk, blk->tail - blk->head);
		blk = next;
	}
	ctx->_head = nullptr;
}

static arena_blk_t *
mkblk(ptrdiff_t blksz)
{
	arena_blk_t blk;
	/* blk.next and blk.fngr get set by the caller */
	blk.head = mmap(nullptr, blksz, PROT_READ | PROT_WRITE,
	                MAP_PRIVATE | MAP_ANON, -1, 0);
	if (blk.head == MAP_FAILED)
		return nullptr;
	blk.tail = blk.head + blksz;
	return memcpy(blk.head, &blk, sizeof blk);
}	

void
errjmp(jmp_buf *env)
{
	if (env != nullptr)
		longjmp(*env, 1);
	err("arena_alloc:");
}