Line data Source code
1 : /* Copyright (c) 2008-2021, The Tor Project, Inc. */
2 : /* See LICENSE for licensing information */
3 :
4 : /**
5 : * \file memarea.c
6 : *
7 : * \brief Implementation for memarea_t, an allocator for allocating lots of
8 : * small objects that will be freed all at once.
9 : */
10 :
11 : #include "orconfig.h"
12 : #include "lib/memarea/memarea.h"
13 :
14 : #include <stdlib.h>
15 : #include <string.h>
16 :
17 : #include "lib/arch/bytes.h"
18 : #include "lib/cc/torint.h"
19 : #include "lib/smartlist_core/smartlist_core.h"
20 : #include "lib/smartlist_core/smartlist_foreach.h"
21 : #include "lib/log/log.h"
22 : #include "lib/log/util_bug.h"
23 : #include "lib/malloc/malloc.h"
24 :
25 : #ifndef DISABLE_MEMORY_SENTINELS
26 :
27 : /** If true, we try to detect any attempts to write beyond the length of a
28 : * memarea. */
29 : #define USE_SENTINELS
30 :
31 : /** All returned pointers should be aligned to the nearest multiple of this
32 : * value. */
33 : #define MEMAREA_ALIGN SIZEOF_VOID_P
34 :
35 : /** A value which, when masked out of a pointer, produces a maximally aligned
36 : * pointer. */
37 : #if MEMAREA_ALIGN == 4
38 : #define MEMAREA_ALIGN_MASK ((uintptr_t)3)
39 : #elif MEMAREA_ALIGN == 8
40 : #define MEMAREA_ALIGN_MASK ((uintptr_t)7)
41 : #else
42 : #error "void* is neither 4 nor 8 bytes long."
43 : #endif /* MEMAREA_ALIGN == 4 || ... */
44 :
45 : #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
46 : #define USE_ALIGNED_ATTRIBUTE
47 : /** Name for the 'memory' member of a memory chunk. */
48 : #define U_MEM mem
49 : #else
50 : #define U_MEM u.mem
51 : #endif /* defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) */
52 :
53 : #ifdef USE_SENTINELS
54 : /** Magic value that we stick at the end of a memarea so we can make sure
55 : * there are no run-off-the-end bugs. */
56 : #define SENTINEL_VAL 0x90806622u
57 : /** How many bytes per area do we devote to the sentinel? */
58 : #define SENTINEL_LEN sizeof(uint32_t)
59 : /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
60 : * end, set those bytes. */
61 : #define SET_SENTINEL(chunk) \
62 : STMT_BEGIN \
63 : set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
64 : STMT_END
65 : /** Assert that the sentinel on a memarea is set correctly. */
66 : #define CHECK_SENTINEL(chunk) \
67 : STMT_BEGIN \
68 : uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
69 : tor_assert(sent_val == SENTINEL_VAL); \
70 : STMT_END
71 : #else /* !defined(USE_SENTINELS) */
72 : #define SENTINEL_LEN 0
73 : #define SET_SENTINEL(chunk) STMT_NIL
74 : #define CHECK_SENTINEL(chunk) STMT_NIL
75 : #endif /* defined(USE_SENTINELS) */
76 :
77 : /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
78 : static inline void *
79 1340496 : realign_pointer(void *ptr)
80 : {
81 1340496 : uintptr_t x = (uintptr_t)ptr;
82 1340496 : x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
83 : /* Reinstate this if bug 930 ever reappears
84 : tor_assert(((void*)x) >= ptr);
85 : */
86 1340496 : return (void*)x;
87 : }
88 :
89 : /** Implements part of a memarea. New memory is carved off from chunk->mem in
90 : * increasing order until a request is too big, at which point a new chunk is
91 : * allocated. */
92 : typedef struct memarea_chunk_t {
93 : /** Next chunk in this area. Only kept around so we can free it. */
94 : struct memarea_chunk_t *next_chunk;
95 : size_t mem_size; /**< How much RAM is available in mem, total? */
96 : char *next_mem; /**< Next position in mem to allocate data at. If it's
97 : * equal to mem+mem_size, this chunk is full. */
98 : #ifdef USE_ALIGNED_ATTRIBUTE
99 : /** Actual content of the memory chunk. */
100 : char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
101 : #else
102 : union {
103 : char mem[1]; /**< Memory space in this chunk. */
104 : void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */
105 : } u; /**< Union used to enforce alignment when we don't have support for
106 : * doing it right. */
107 : #endif /* defined(USE_ALIGNED_ATTRIBUTE) */
108 : } memarea_chunk_t;
109 :
110 : /** How many bytes are needed for overhead before we get to the memory part
111 : * of a chunk? */
112 : #define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
113 :
114 : /** What's the smallest that we'll allocate a chunk? */
115 : #define CHUNK_SIZE 4096
116 :
117 : /** A memarea_t is an allocation region for a set of small memory requests
118 : * that will all be freed at once. */
119 : struct memarea_t {
120 : memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
121 : };
122 :
123 : /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
124 : static memarea_chunk_t *
125 67182 : alloc_chunk(size_t sz)
126 : {
127 67182 : tor_assert(sz < SIZE_T_CEILING);
128 :
129 67182 : size_t chunk_size = sz < CHUNK_SIZE ? CHUNK_SIZE : sz;
130 67182 : memarea_chunk_t *res;
131 67182 : chunk_size += SENTINEL_LEN;
132 67182 : res = tor_malloc(chunk_size);
133 67182 : res->next_chunk = NULL;
134 67182 : res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
135 67182 : res->next_mem = res->U_MEM;
136 67182 : tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
137 : ((char*)res)+chunk_size);
138 67182 : tor_assert(realign_pointer(res->next_mem) == res->next_mem);
139 67182 : SET_SENTINEL(res);
140 67182 : return res;
141 : }
142 :
143 : /** Release <b>chunk</b> from a memarea. */
144 : static void
145 67182 : memarea_chunk_free_unchecked(memarea_chunk_t *chunk)
146 : {
147 67182 : CHECK_SENTINEL(chunk);
148 67182 : tor_free(chunk);
149 67182 : }
150 :
151 : /** Allocate and return new memarea. */
152 : memarea_t *
153 63085 : memarea_new(void)
154 : {
155 63085 : memarea_t *head = tor_malloc(sizeof(memarea_t));
156 63085 : head->first = alloc_chunk(CHUNK_SIZE);
157 63085 : return head;
158 : }
159 :
160 : /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
161 : * and friends for this area */
162 : void
163 63085 : memarea_drop_all_(memarea_t *area)
164 : {
165 63085 : memarea_chunk_t *chunk, *next;
166 129854 : for (chunk = area->first; chunk; chunk = next) {
167 66769 : next = chunk->next_chunk;
168 66769 : memarea_chunk_free_unchecked(chunk);
169 : }
170 63085 : area->first = NULL; /*fail fast on */
171 63085 : tor_free(area);
172 63085 : }
173 :
174 : /** Forget about having allocated anything in <b>area</b>, and free some of
175 : * the backing storage associated with it, as appropriate. Invalidates all
176 : * pointers returned from memarea_alloc() for this area. */
177 : void
178 2405 : memarea_clear(memarea_t *area)
179 : {
180 2405 : memarea_chunk_t *chunk, *next;
181 2405 : if (area->first->next_chunk) {
182 484 : for (chunk = area->first->next_chunk; chunk; chunk = next) {
183 413 : next = chunk->next_chunk;
184 413 : memarea_chunk_free_unchecked(chunk);
185 : }
186 71 : area->first->next_chunk = NULL;
187 : }
188 2405 : area->first->next_mem = area->first->U_MEM;
189 2405 : }
190 :
191 : /** Return true iff <b>p</b> is in a range that has been returned by an
192 : * allocation from <b>area</b>. */
193 : int
194 4104 : memarea_owns_ptr(const memarea_t *area, const void *p)
195 : {
196 4104 : memarea_chunk_t *chunk;
197 4104 : const char *ptr = p;
198 4108 : for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
199 4105 : if (ptr >= chunk->U_MEM && ptr < chunk->next_mem)
200 : return 1;
201 : }
202 : return 0;
203 : }
204 :
205 : /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
206 : * bytes. <b>sz</b> should be significantly smaller than the area's chunk
207 : * size, though we can deal if it isn't. */
208 : void *
209 1273305 : memarea_alloc(memarea_t *area, size_t sz)
210 : {
211 1273305 : memarea_chunk_t *chunk = area->first;
212 1273305 : char *result;
213 1273305 : tor_assert(chunk);
214 1273305 : CHECK_SENTINEL(chunk);
215 1273305 : tor_assert(sz < SIZE_T_CEILING);
216 1273305 : if (sz == 0)
217 : sz = 1;
218 1273305 : tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
219 1273305 : const size_t space_remaining =
220 1273305 : (chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
221 1273305 : if (sz > space_remaining) {
222 4097 : if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
223 : /* This allocation is too big. Stick it in a special chunk, and put
224 : * that chunk second in the list. */
225 174 : memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE);
226 174 : new_chunk->next_chunk = chunk->next_chunk;
227 174 : chunk->next_chunk = new_chunk;
228 174 : chunk = new_chunk;
229 : } else {
230 3923 : memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE);
231 3923 : new_chunk->next_chunk = chunk;
232 3923 : area->first = chunk = new_chunk;
233 : }
234 4097 : tor_assert(chunk->mem_size >= sz);
235 : }
236 1273305 : result = chunk->next_mem;
237 1273305 : chunk->next_mem = chunk->next_mem + sz;
238 : /* Reinstate these if bug 930 ever comes back
239 : tor_assert(chunk->next_mem >= chunk->U_MEM);
240 : tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
241 : */
242 1273305 : chunk->next_mem = realign_pointer(chunk->next_mem);
243 1273305 : return result;
244 : }
245 :
246 : /** As memarea_alloc(), but clears the memory it returns. */
247 : void *
248 184143 : memarea_alloc_zero(memarea_t *area, size_t sz)
249 : {
250 184143 : void *result = memarea_alloc(area, sz);
251 184143 : memset(result, 0, sz);
252 184143 : return result;
253 : }
254 :
255 : /** As memdup, but returns the memory from <b>area</b>. */
256 : void *
257 150579 : memarea_memdup(memarea_t *area, const void *s, size_t n)
258 : {
259 150579 : char *result = memarea_alloc(area, n);
260 150579 : memcpy(result, s, n);
261 150579 : return result;
262 : }
263 :
264 : /** As strdup, but returns the memory from <b>area</b>. */
265 : char *
266 485 : memarea_strdup(memarea_t *area, const char *s)
267 : {
268 485 : return memarea_memdup(area, s, strlen(s)+1);
269 : }
270 :
271 : /** As strndup, but returns the memory from <b>area</b>. */
272 : char *
273 191900 : memarea_strndup(memarea_t *area, const char *s, size_t n)
274 : {
275 191900 : size_t ln = 0;
276 191900 : char *result;
277 191900 : tor_assert(n < SIZE_T_CEILING);
278 3527740 : for (ln = 0; ln < n && s[ln]; ++ln)
279 : ;
280 191900 : result = memarea_alloc(area, ln+1);
281 191900 : memcpy(result, s, ln);
282 191900 : result[ln]='\0';
283 191900 : return result;
284 : }
285 :
286 : /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
287 : * and <b>used_out</b> to the number of bytes currently used. */
288 : void
289 4 : memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
290 : {
291 4 : size_t a = 0, u = 0;
292 4 : memarea_chunk_t *chunk;
293 25 : for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
294 21 : CHECK_SENTINEL(chunk);
295 21 : a += CHUNK_HEADER_SIZE + chunk->mem_size;
296 21 : tor_assert(chunk->next_mem >= chunk->U_MEM);
297 21 : u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->U_MEM);
298 : }
299 4 : *allocated_out = a;
300 4 : *used_out = u;
301 4 : }
302 :
303 : /** Assert that <b>area</b> is okay. */
304 : void
305 1 : memarea_assert_ok(memarea_t *area)
306 : {
307 1 : memarea_chunk_t *chunk;
308 1 : tor_assert(area->first);
309 :
310 10 : for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
311 9 : CHECK_SENTINEL(chunk);
312 9 : tor_assert(chunk->next_mem >= chunk->U_MEM);
313 9 : tor_assert(chunk->next_mem <=
314 : (char*) realign_pointer(chunk->U_MEM+chunk->mem_size));
315 : }
316 1 : }
317 :
318 : #else /* defined(DISABLE_MEMORY_SENTINELS) */
319 :
320 : struct memarea_t {
321 : smartlist_t *pieces;
322 : };
323 :
324 : memarea_t *
325 : memarea_new(void)
326 : {
327 : memarea_t *ma = tor_malloc_zero(sizeof(memarea_t));
328 : ma->pieces = smartlist_new();
329 : return ma;
330 : }
331 : void
332 : memarea_drop_all_(memarea_t *area)
333 : {
334 : memarea_clear(area);
335 : smartlist_free(area->pieces);
336 : tor_free(area);
337 : }
338 : void
339 : memarea_clear(memarea_t *area)
340 : {
341 : SMARTLIST_FOREACH(area->pieces, void *, p, tor_free_(p));
342 : smartlist_clear(area->pieces);
343 : }
344 : int
345 : memarea_owns_ptr(const memarea_t *area, const void *ptr)
346 : {
347 : SMARTLIST_FOREACH(area->pieces, const void *, p, if (ptr == p) return 1;);
348 : return 0;
349 : }
350 :
351 : void *
352 : memarea_alloc(memarea_t *area, size_t sz)
353 : {
354 : void *result = tor_malloc(sz);
355 : smartlist_add(area->pieces, result);
356 : return result;
357 : }
358 :
359 : void *
360 : memarea_alloc_zero(memarea_t *area, size_t sz)
361 : {
362 : void *result = tor_malloc_zero(sz);
363 : smartlist_add(area->pieces, result);
364 : return result;
365 : }
366 : void *
367 : memarea_memdup(memarea_t *area, const void *s, size_t n)
368 : {
369 : void *r = memarea_alloc(area, n);
370 : memcpy(r, s, n);
371 : return r;
372 : }
373 : char *
374 : memarea_strdup(memarea_t *area, const char *s)
375 : {
376 : size_t n = strlen(s);
377 : char *r = memarea_alloc(area, n+1);
378 : memcpy(r, s, n);
379 : r[n] = 0;
380 : return r;
381 : }
382 : char *
383 : memarea_strndup(memarea_t *area, const char *s, size_t n)
384 : {
385 : size_t ln = strnlen(s, n);
386 : char *r = memarea_alloc(area, ln+1);
387 : memcpy(r, s, ln);
388 : r[ln] = 0;
389 : return r;
390 : }
391 : void
392 : memarea_get_stats(memarea_t *area,
393 : size_t *allocated_out, size_t *used_out)
394 : {
395 : (void)area;
396 : *allocated_out = *used_out = 128;
397 : }
398 : void
399 : memarea_assert_ok(memarea_t *area)
400 : {
401 : (void)area;
402 : }
403 :
404 : #endif /* !defined(DISABLE_MEMORY_SENTINELS) */
|