aboutsummaryrefslogtreecommitdiff
path: root/src/arena.c
blob: f88b8cb49128b8d83b7fc2876c8ac81f50ba6c35 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
#include <sys/mman.h>

#include <assert.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>

#include "alloc.h"
#include "common.h"
#include "errors.h"

/* TODO: Support malloc() backend for systems without MAP_ANON */
#ifndef MAP_ANON
#	error "System not supported (missing MAP_ANON)"
#endif

#define IS_POW_2(n) ((n) != 0 && ((n) & ((n)-1)) == 0)

struct _arena {
	/* DATA points to the start of the block’s memory while FREE points
	   to the beginning of the unused data in the block */
	void *data, *free;
	size_t cap;
	struct _arena *next;
};

/* Return a new arena block of size SZ */
static struct _arena *mkblk(size_t sz)
	__attribute__((returns_nonnull));

/* Return the padding required to properly align an allocation with
   alignment ALIGN at offset OFF */
static inline size_t pad(size_t off, size_t align)
	__attribute__((const, always_inline));

void *
arena_alloc(struct _arena **a, size_t nmemb, size_t size, size_t align)
{
	assert(IS_POW_2(align));
	assert(nmemb * size != 0);

	if (unlikely(size > SIZE_MAX / nmemb)) {
		errno = ENOMEM;
		err("%s:", __func__);
	}

	size *= nmemb;

	for (struct _arena *p = *a; p != NULL; p = p->next) {
		size_t padding = pad((char *)p->free - (char *)p->data, align);
		size_t freespc = p->cap - ((char *)p->free - (char *)p->data);
		size_t nsize   = size + padding;

		if (nsize <= freespc) {
			void *ret = p->free;
			p->free = (char *)p->free + nsize;
			return ret;
		}
	}

	/* No page exists with enough space */
	struct _arena *p = mkblk(MAX(size, _ARENA_DFLT_CAP));
	p->next = *a;
	*a = p;
	p->free = (char *)p->data + size;
	return p->data;
}

void *
_arena_grow(arena_t *a, void *ptr, size_t old_nmemb, size_t new_nmemb,
            size_t size, size_t align)
{
	assert(IS_POW_2(align));
	assert(new_nmemb * size != 0);
	assert(old_nmemb < new_nmemb);

	if (unlikely(size > SIZE_MAX / new_nmemb)) {
		errno = ENOMEM;
		err("%s:", __func__);
	}

	for (struct _arena *p = *a; p != NULL; p = p->next) {
		if (ptr < p->data || ptr > p->free)
			continue;

		/* If we need to grow the given allocation, but it was the last
		   allocation made in a region, then we first see if we can just eat
		   more trailing free space in the region to avoid a memcpy(). */
		size_t oldsz = old_nmemb * size;
		if ((char *)ptr == (char *)p->free - oldsz) {
			size_t rem = p->cap - ((char *)p->free - (char *)p->data);
			size_t need = (new_nmemb - old_nmemb) * size;
			if (need <= rem) {
				p->free = (char *)p->free + need;
				return ptr;
			}

			/* NOTE: After benchmarking, (albiet rather naïvely) it seems
			   that the closer to the original mapping size we are, the
			   faster mremap() runs.  With really large mappings it
			   converges with mmap() + memcpy() on performance though, so
			   it seems this is always the better solution on systems
			   with mremap() since it’s also a bit more memory efficient.

			   We can’t pass MREMAP_MAYMOVE though, because it would
			   invalidate existing pointers to data in the mapping. */
#if __linux__
			size_t ncap = p->cap + need - rem;
			void *nptr = mremap(p->data, p->cap, ncap, 0);
			if (nptr != MAP_FAILED) {
				p->cap = ncap;
				return ptr;
			}
			if (errno != ENOMEM)
				err("%s:", __func__);
#endif
		}

		void *dst = arena_alloc(a, new_nmemb, size, align);
		return memcpy(dst, ptr, old_nmemb * size);
	}

#if DEBUG
	err("%s:%d: tried to resize pointer that wasn’t allocated", __func__,
	    __LINE__);
#else
	__builtin_unreachable();
#endif
}

void
arena_free(struct _arena **a)
{
	struct _arena *cur, *next;
	for (cur = *a; cur != NULL; cur = next) {
		next = cur->next;
		munmap(cur->data, cur->cap);
		free(cur);
	}
	*a = NULL;
}

snapshot_t
arena_snapshot_create(struct _arena *a)
{
	return a == NULL ? NULL : a->free;
}

void
arena_snapshot_restore(struct _arena **a, snapshot_t snp)
{
	if (snp == NULL) {
		arena_free(a);
		return;
	}

	struct _arena *cur, *next;
	for (cur = *a; cur != NULL; cur = next) {
		next = cur->next;
		if (snp < cur->data || snp > cur->free) {
			munmap(cur->data, cur->cap);
			free(cur);
		} else {
			cur->free = snp;
			*a = cur;
			return;
		}
	}
}

struct _arena *
mkblk(size_t cap)
{
	struct _arena *a = malloc(sizeof(*a));
	if (a == NULL)
		err("malloc:");
	a->cap = cap;
	a->data = a->free = mmap(NULL, cap, PROT_READ | PROT_WRITE,
	                         MAP_PRIVATE | MAP_ANON, -1, 0);
	if (a->data == MAP_FAILED)
		err("mmap:");
	return a;
}

size_t
pad(size_t len, size_t align)
{
	return (len + align - 1) & ~(align - 1);
}