0
|
1 /*
|
|
2 mempool-allocfree.c : Memory pool manager for custom alloc+free
|
|
3
|
|
4 Copyright (c) 2001-2002 Timo Sirainen
|
|
5
|
|
6 Permission is hereby granted, free of charge, to any person obtaining
|
|
7 a copy of this software and associated documentation files (the
|
|
8 "Software"), to deal in the Software without restriction, including
|
|
9 without limitation the rights to use, copy, modify, merge, publish,
|
|
10 distribute, sublicense, and/or sell copies of the Software, and to
|
|
11 permit persons to whom the Software is furnished to do so, subject to
|
|
12 the following conditions:
|
|
13
|
|
14 The above copyright notice and this permission notice shall be
|
|
15 included in all copies or substantial portions of the Software.
|
|
16
|
|
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
19 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
20 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
21 CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
22 TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
23 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
24 */
|
|
25
|
|
26
|
|
27 /* TODO:
|
|
28
|
|
29 - p_free() could check better that it's freeing a properly allocated
|
|
30 memory from pool it was told to.
|
|
31
|
|
32 - try to keep optimal pool sizes ie. keep them in the size they're
|
|
33 using 90% of the time
|
|
34 - add statistics functions that print/return the memory usage
|
|
35 - don't free() anything, save them to unused-list and reuse later.
|
|
36 */
|
|
37
|
|
38 /* extensive debugging */
|
|
39 /* #define POOL_DEBUG */
|
|
40
|
|
41 /* always clear the memory area that has been free'd. for debugging mostly */
|
|
42 /* #define POOL_FREE_CLEAR 0xd0 */
|
|
43
|
|
44 /* Save the used pool block for each memory allocation, so you can be sure
|
|
45 that the memory is being free'd from correct pool. Also, with this option
|
|
46 we don't need to compare pointers which makes this code fully ANSI-C
|
|
47 compatible :) */
|
|
48 #define POOL_SAVE_BLOCK
|
|
49
|
|
50 #include "lib.h"
|
|
51 #include "mempool.h"
|
|
52
|
|
53 #include <stdlib.h>
|
|
54
|
|
55 #define is_pool(pool) ((pool) != NULL && (pool)->magic == 0xbeef)
|
|
56
|
|
57 #define check_pool(pool) \
|
|
58 if (!is_pool(pool)) \
|
|
59 i_panic("Trying to use invalid memory pool, aborting")
|
|
60
|
|
61 #define MEM_FREE_BIT 0x80000000
|
|
62
|
|
63 #define is_mem_free(size) (((size) & MEM_FREE_BIT) != 0)
|
|
64 #define mem_size(size) ((size) & ~MEM_FREE_BIT)
|
|
65
|
|
66 #define mem2int(_mem, _int) \
|
|
67 memcpy(&(_int), ((unsigned char *) (_mem)), sizeof(int));
|
|
68
|
|
69 #define int2mem(_mem, _int) \
|
|
70 memcpy(((unsigned char *) (_mem)), &(_int), sizeof(int));
|
|
71
|
|
72 #ifdef POOL_SAVE_BLOCK
|
|
73 # define ALLOC_EXTRA_SIZE sizeof(PoolBlock *)
|
|
74 #else
|
|
75 # define ALLOC_EXTRA_SIZE 0
|
|
76 #endif
|
|
77
|
|
78 /* max. number of bytes to even try to allocate. This is done just to avoid
|
|
79 allocating less memory than was actually requested because of integer
|
|
80 overflows. -128 is much more than is actually needed. */
|
|
81 #define MAX_ALLOC_SIZE (UINT_MAX - 128)
|
|
82
|
|
83 typedef struct {
|
|
84 /* total number of bytes in data[] */
|
|
85 unsigned int size;
|
|
86 /* free data position at data+free_index, none if >= size */
|
|
87 unsigned int free_index;
|
|
88 /* largest data[] size (not including int size) */
|
|
89 unsigned int largest_free_space;
|
|
90
|
|
91 /* unsigned int size - if the last bit is set the data block is used
|
|
92 if 0, the rest of the block is free
|
|
93 unsigned char data[]
|
|
94 unsigned int size
|
|
95 ... */
|
|
96 unsigned char data[1];
|
|
97 } PoolBlock;
|
|
98
|
|
99 #define BLOCK_INCREMENT_COUNT 5
|
|
100 typedef struct {
|
|
101 struct Pool pool;
|
|
102
|
|
103 unsigned short magic; /* 0xbeef */
|
|
104 int refcount;
|
|
105
|
|
106 int num_blocks, used_blocks;
|
|
107 int first_block_with_space; /* -1 = all used */
|
|
108 PoolBlock **blocks; /* num_blocks size */
|
|
109
|
|
110 char name[1]; /* human readable name for blocks - used in statistics.
|
|
111 variable size */
|
|
112 } AllocfreePool;
|
|
113
|
|
114 static struct Pool static_allocfree_pool;
|
|
115
|
|
116 static void pool_allocfree_free(Pool pool, void *mem);
|
|
117
|
|
118 static void pool_block_create(AllocfreePool *apool, unsigned int size)
|
|
119 {
|
|
120 PoolBlock *block;
|
|
121 int alloc_size;
|
|
122
|
|
123 i_assert(size > sizeof(int));
|
|
124
|
|
125 if (apool->used_blocks >= apool->num_blocks) {
|
|
126 apool->num_blocks += BLOCK_INCREMENT_COUNT;
|
|
127
|
|
128 alloc_size = sizeof(PoolBlock *) * apool->num_blocks;
|
|
129 apool->blocks = apool->blocks == NULL ? malloc(alloc_size) :
|
|
130 realloc(apool->blocks, alloc_size);
|
|
131 if (apool->blocks == NULL) {
|
|
132 i_panic("pool_block_create(): "
|
|
133 "Out of memory when reallocating %d bytes",
|
|
134 alloc_size);
|
|
135 }
|
|
136 }
|
|
137
|
|
138 alloc_size = sizeof(PoolBlock)-1 + size;
|
|
139 block = malloc(alloc_size);
|
|
140 if (block == NULL) {
|
|
141 i_panic("pool_block_create(): "
|
|
142 "Out of memory when allocating %d bytes", alloc_size);
|
|
143 }
|
|
144
|
|
145 block->size = size;
|
|
146 block->free_index = 0;
|
|
147 block->largest_free_space = size-sizeof(int)-ALLOC_EXTRA_SIZE;
|
|
148 memset(block->data, 0, sizeof(int));
|
|
149
|
|
150 if (apool->first_block_with_space == -1)
|
|
151 apool->first_block_with_space = apool->used_blocks;
|
|
152 apool->blocks[apool->used_blocks] = block;
|
|
153 apool->used_blocks++;
|
|
154 }
|
|
155
|
|
156 static int pool_block_get_pos(AllocfreePool *apool, PoolBlock *block)
|
|
157 {
|
|
158 int pos;
|
|
159
|
|
160 for (pos = 0; pos < apool->used_blocks; pos++) {
|
|
161 if (apool->blocks[pos] == block)
|
|
162 return pos;
|
|
163 }
|
|
164
|
|
165 return -1;
|
|
166 }
|
|
167
|
|
168 static void pool_reset_first_free_block(AllocfreePool *apool)
|
|
169 {
|
|
170 int pos;
|
|
171
|
|
172 pos = apool->first_block_with_space;
|
|
173 i_assert(pos >= 0);
|
|
174
|
|
175 apool->first_block_with_space = -1;
|
|
176 for (; pos < apool->used_blocks; pos++) {
|
|
177 if (apool->blocks[pos]->largest_free_space > 0) {
|
|
178 apool->first_block_with_space = pos;
|
|
179 break;
|
|
180 }
|
|
181 }
|
|
182 }
|
|
183
|
|
184 static void pool_block_destroy(AllocfreePool *apool, PoolBlock *block)
|
|
185 {
|
|
186 int pos;
|
|
187
|
|
188 pos = pool_block_get_pos(apool, block);
|
|
189 if (pos == -1)
|
|
190 return;
|
|
191
|
|
192 memmove(apool->blocks+pos, apool->blocks+pos+1,
|
|
193 sizeof(PoolBlock *) * (apool->num_blocks-pos-1));
|
|
194 apool->used_blocks--;
|
|
195
|
|
196 if (apool->first_block_with_space > pos)
|
|
197 apool->first_block_with_space--;
|
|
198 else if (apool->first_block_with_space == pos)
|
|
199 pool_reset_first_free_block(apool);
|
|
200 free(block);
|
|
201 }
|
|
202
|
|
203 static PoolBlock *pool_block_find(AllocfreePool *apool, void *mem)
|
|
204 {
|
|
205 PoolBlock *block;
|
|
206 int pos;
|
|
207
|
|
208 #ifdef POOL_SAVE_BLOCK
|
|
209 memcpy(&block, (unsigned char *) mem - sizeof(PoolBlock *),
|
|
210 sizeof(PoolBlock *));
|
|
211
|
|
212 for (pos = 0; pos < apool->used_blocks; pos++) {
|
|
213 if (apool->blocks[pos] == block)
|
|
214 return block;
|
|
215 }
|
|
216 #else
|
|
217
|
|
218 /* NOTE: this may be a portability problem, since it compares
|
|
219 unrelated pointers. */
|
|
220 for (pos = 0; pos < apool->used_blocks; pos++) {
|
|
221 block = apool->blocks[pos];
|
|
222
|
|
223 if ((unsigned char *) mem >= block->data &&
|
|
224 (unsigned char *) mem < block->data + block->size)
|
|
225 return block;
|
|
226 }
|
|
227 #endif
|
|
228
|
|
229 return NULL;
|
|
230 }
|
|
231
|
|
232 #ifdef POOL_DEBUG
|
|
233 static void pool_block_dump(PoolBlock *block)
|
|
234 {
|
|
235 unsigned char *mem;
|
|
236 unsigned int holesize, holesize_real;
|
|
237
|
|
238 printf("Block: size %d\n", block->size);
|
|
239
|
|
240 mem = block->data;
|
|
241 mem2int(mem, holesize);
|
|
242 while (holesize > 0) {
|
|
243 holesize_real = mem_size(holesize);
|
|
244 printf(" - %u %s\n", holesize_real,
|
|
245 is_mem_free(holesize) ? " (free)" : "");
|
|
246
|
|
247 mem += holesize_real + sizeof(int);
|
|
248 if (mem-block->data == block->size)
|
|
249 break;
|
|
250
|
|
251 if (mem-block->data > block->size)
|
|
252 i_panic("pool_block_alloc() : corrupted pool");
|
|
253
|
|
254 mem2int(mem, holesize);
|
|
255 }
|
|
256 }
|
|
257
|
|
258 static void pool_dump(AllocfreePool *apool)
|
|
259 {
|
|
260 int i;
|
|
261
|
|
262 printf("Dumping pool: %s\n", apool->name);
|
|
263
|
|
264 for (i = 0; i < apool->used_blocks; i++)
|
|
265 pool_block_dump(apool->blocks[i]);
|
|
266 }
|
|
267 #endif
|
|
268
|
|
269 static void *pool_block_alloc(AllocfreePool *apool, PoolBlock *block,
|
|
270 unsigned int size)
|
|
271 {
|
|
272 unsigned char *mem, *alloc;
|
|
273 unsigned int largest, holesize, holesize_real, avail_size;
|
|
274 unsigned int alloc_index, next_free_index;
|
|
275
|
|
276 #ifdef POOL_DEBUG
|
|
277 printf("pool_block_alloc(%s, %d)\n", apool->name, size);
|
|
278 #endif
|
|
279
|
|
280 size += ALLOC_EXTRA_SIZE;
|
|
281
|
|
282 /* search for first large enough free space in the block.
|
|
283 remember the largest free block so far so if we use the
|
|
284 current largest, we don't have to scan the largest one
|
|
285 from the beginning. */
|
|
286 largest = 0;
|
|
287 mem = block->data + block->free_index;
|
|
288 mem2int(mem, holesize);
|
|
289 while (holesize > 0) {
|
|
290 holesize_real = mem_size(holesize);
|
|
291 if (holesize_real + sizeof(int)*2 > block->size)
|
|
292 i_panic("pool_block_alloc() : corrupted pool");
|
|
293
|
|
294 if (is_mem_free(holesize)) {
|
|
295 if (size <= holesize_real)
|
|
296 break;
|
|
297
|
|
298 if (holesize_real-ALLOC_EXTRA_SIZE > largest)
|
|
299 largest = holesize_real-ALLOC_EXTRA_SIZE;
|
|
300 }
|
|
301
|
|
302 mem += holesize_real + sizeof(int);
|
|
303 mem2int(mem, holesize);
|
|
304 }
|
|
305
|
|
306 avail_size = mem_size(holesize);
|
|
307 if (avail_size == 0) {
|
|
308 /* rest of the block is free */
|
|
309 if ((int) (mem-block->data)+size >= block->size) {
|
|
310 i_panic("pool_block_alloc(): not enough space "
|
|
311 "in block (should have been)");
|
|
312 }
|
|
313
|
|
314 avail_size = block->size - (int) (mem-block->data) -
|
|
315 sizeof(int);
|
|
316 } else if (avail_size-ALLOC_EXTRA_SIZE != block->largest_free_space) {
|
|
317 /* we didn't use the largest free space in block,
|
|
318 so we don't need to scan the rest of the data to
|
|
319 search it */
|
|
320 largest = block->largest_free_space;
|
|
321 }
|
|
322
|
|
323 if (avail_size < size+sizeof(int)*3) {
|
|
324 /* don't bother leaving small holes in the pool */
|
|
325 size = avail_size;
|
|
326 }
|
|
327
|
|
328 #ifdef POOL_DEBUG
|
|
329 if (size == avail_size)
|
|
330 printf(" - pool is now full\n");
|
|
331 #endif
|
|
332
|
|
333 /* [i.....] -> [iXXi..] (i = space size, X = data, . = empty space) */
|
|
334 int2mem(mem, size);
|
|
335 alloc = mem + sizeof(int);
|
|
336 alloc_index = (int) (mem-block->data);
|
|
337
|
|
338 #ifdef POOL_SAVE_BLOCK
|
|
339 memcpy(alloc, &block, sizeof(PoolBlock *));
|
|
340 alloc += sizeof(PoolBlock *);
|
|
341 #endif
|
|
342
|
|
343 /* set mem point to beginning of next hole */
|
|
344 mem += size + sizeof(int);
|
|
345
|
|
346 if (size < avail_size) {
|
|
347 /* we didn't use the whole space, mark it unused */
|
|
348 avail_size = holesize == 0 ? 0 : MEM_FREE_BIT |
|
|
349 (avail_size - size - sizeof(int));
|
|
350 int2mem(mem, avail_size);
|
|
351 next_free_index = (int) (mem-block->data);
|
|
352 } else if (block->free_index == alloc_index) {
|
|
353 /* we used the first free hole, get the next one */
|
|
354 next_free_index = block->size;
|
|
355 while (mem < block->data+block->size) {
|
|
356 mem2int(mem, holesize);
|
|
357 if (holesize == 0) {
|
|
358 next_free_index = (int) (mem-block->data);
|
|
359 break;
|
|
360 }
|
|
361
|
|
362 holesize_real = mem_size(holesize);
|
|
363 if (is_mem_free(holesize)) {
|
|
364 next_free_index =
|
|
365 (int) (mem-block->data);
|
|
366 break;
|
|
367 }
|
|
368
|
|
369 mem += holesize_real + sizeof(int);
|
|
370 }
|
|
371 } else {
|
|
372 /* just to suppress compiler warnings */
|
|
373 next_free_index = 0;
|
|
374 }
|
|
375
|
|
376 /* update the first free space index */
|
|
377 if (block->free_index == alloc_index)
|
|
378 block->free_index = next_free_index;
|
|
379
|
|
380 if (holesize > 0 && largest < block->largest_free_space) {
|
|
381 /* we just used the largest space in the block,
|
|
382 we'll need to find the next largest one and it
|
|
383 could be after the space we just used */
|
|
384 while (mem < block->data+block->size) {
|
|
385 mem2int(mem, holesize);
|
|
386 if (holesize == 0)
|
|
387 break;
|
|
388
|
|
389 holesize_real = mem_size(holesize);
|
|
390 if (holesize_real +
|
|
391 sizeof(int)*2 > block->size)
|
|
392 i_panic("pool_block_alloc() : corrupted pool");
|
|
393
|
|
394 if (is_mem_free(holesize) &&
|
|
395 holesize_real-ALLOC_EXTRA_SIZE > largest)
|
|
396 largest = holesize_real-ALLOC_EXTRA_SIZE;
|
|
397
|
|
398 mem += holesize_real + sizeof(int);
|
|
399 }
|
|
400 }
|
|
401
|
|
402 if (holesize == 0 && mem < block->data+block->size) {
|
|
403 /* rest of the block is free */
|
|
404 holesize = block->size - (int) (mem-block->data + sizeof(int));
|
|
405 if (holesize-ALLOC_EXTRA_SIZE > largest)
|
|
406 largest = holesize-ALLOC_EXTRA_SIZE;
|
|
407 }
|
|
408
|
|
409 block->largest_free_space = largest;
|
|
410 if (largest == 0)
|
|
411 pool_reset_first_free_block(apool);
|
|
412
|
|
413 return alloc;
|
|
414 }
|
|
415
|
|
416 /* Returns the previous block for "mem", or NULL if either there's no
|
|
417 previous block or if previous block is before any of the free blocks. */
|
|
418 static unsigned char *
|
|
419 pool_block_prev(PoolBlock *block, unsigned char *find_mem)
|
|
420 {
|
|
421 unsigned char *mem, *next_mem;
|
|
422 unsigned int holesize;
|
|
423
|
|
424 if (block->free_index >= (unsigned int) (find_mem-block->data))
|
|
425 return NULL;
|
|
426
|
|
427 mem = block->data + block->free_index;
|
|
428 mem2int(mem, holesize);
|
|
429 while (holesize > 0) {
|
|
430 next_mem = mem + mem_size(holesize) + sizeof(int);
|
|
431 if (next_mem == find_mem)
|
|
432 return mem;
|
|
433
|
|
434 mem = next_mem;
|
|
435 mem2int(mem, holesize);
|
|
436 }
|
|
437
|
|
438 return NULL;
|
|
439 }
|
|
440
|
|
441 static void pool_block_free(AllocfreePool *apool, PoolBlock *block,
|
|
442 unsigned char *mem)
|
|
443 {
|
|
444 unsigned char *next_mem, *prev_mem;
|
|
445 unsigned int holesize, next_holesize, avail_space, prevsize;
|
|
446 unsigned int next_free_index;
|
|
447 int pos;
|
|
448
|
|
449 #ifdef POOL_SAVE_BLOCK
|
|
450 mem -= sizeof(PoolBlock *);
|
|
451 #endif
|
|
452 mem -= sizeof(int);
|
|
453 mem2int(mem, holesize);
|
|
454
|
|
455 #ifdef POOL_DEBUG
|
|
456 printf("pool_block_free(%s, %u)\n", apool->name, mem_size(holesize));
|
|
457 #endif
|
|
458
|
|
459 if ((holesize & MEM_FREE_BIT) != 0 ||
|
|
460 mem_size(holesize) + sizeof(int) > block->size)
|
|
461 i_panic("pool_block_free() : corrupted pool");
|
|
462
|
|
463 if ((int) (mem-block->data) + holesize + sizeof(int) >= block->size) {
|
|
464 /* this is the last space in block */
|
|
465 holesize = 0;
|
|
466 } else {
|
|
467 next_mem = ((unsigned char *) mem) + holesize + sizeof(int);
|
|
468 mem2int(next_mem, next_holesize);
|
|
469
|
|
470 if (next_holesize == 0) {
|
|
471 /* last used space - combine to free space at end */
|
|
472 holesize = 0;
|
|
473 } else if (!is_mem_free(next_holesize)) {
|
|
474 /* mark the space free */
|
|
475 holesize |= MEM_FREE_BIT;
|
|
476 } else {
|
|
477 /* combine the two free spaces */
|
|
478 holesize = (mem_size(holesize) + sizeof(int) +
|
|
479 mem_size(next_holesize)) | MEM_FREE_BIT;
|
|
480 }
|
|
481 }
|
|
482 int2mem(mem, holesize);
|
|
483
|
|
484 /* if previous block is free, we can combine the free space */
|
|
485 prev_mem = pool_block_prev(block, mem);
|
|
486 if (prev_mem != NULL) {
|
|
487 mem2int(prev_mem, prevsize);
|
|
488 if (is_mem_free(prevsize)) {
|
|
489 if (holesize != 0) {
|
|
490 holesize = (mem_size(holesize) + sizeof(int) +
|
|
491 mem_size(prevsize)) | MEM_FREE_BIT;
|
|
492 }
|
|
493 int2mem(prev_mem, holesize);
|
|
494 mem = prev_mem;
|
|
495 }
|
|
496 }
|
|
497
|
|
498 /* update largest free space size */
|
|
499 avail_space = holesize != 0 ? mem_size(holesize) :
|
|
500 block->size - (int) (mem-block->data + sizeof(int));
|
|
501
|
|
502 #ifdef POOL_FREE_CLEAR
|
|
503 memset(mem + sizeof(int), POOL_FREE_CLEAR, avail_space);
|
|
504 #elif defined (POOL_SAVE_BLOCK)
|
|
505 memset(mem + sizeof(int), 0, sizeof(PoolBlock *));
|
|
506 #endif
|
|
507
|
|
508 if (block->largest_free_space < avail_space-ALLOC_EXTRA_SIZE)
|
|
509 block->largest_free_space = avail_space-ALLOC_EXTRA_SIZE;
|
|
510
|
|
511 /* update the first free space index */
|
|
512 next_free_index = (int) (mem-block->data);
|
|
513 if (block->free_index > next_free_index)
|
|
514 block->free_index = next_free_index;
|
|
515
|
|
516 /* update pool's first block with free space index */
|
|
517 pos = pool_block_get_pos(apool, block);
|
|
518 if (apool->first_block_with_space < 0 ||
|
|
519 pos < apool->first_block_with_space)
|
|
520 apool->first_block_with_space = pos;
|
|
521
|
|
522 if (holesize == 0 && mem == block->data) {
|
|
523 /* FIXME: the block is completely unused, if there's more
|
|
524 than two empty blocks, free them */
|
|
525 }
|
|
526 }
|
|
527
|
|
528 Pool pool_allocfree_create(const char *name, unsigned int size)
|
|
529 {
|
|
530 AllocfreePool *apool;
|
|
531
|
|
532 i_assert(size > sizeof(int));
|
|
533
|
|
534 apool = calloc(sizeof(AllocfreePool) + strlen(name), 1);
|
|
535 if (apool == NULL)
|
|
536 i_panic("pool_create(): Out of memory");
|
|
537
|
|
538 apool->pool = static_allocfree_pool;
|
|
539 apool->magic = 0xbeef;
|
|
540 apool->refcount = 1;
|
|
541
|
|
542 apool->first_block_with_space = -1;
|
|
543 pool_block_create(apool, nearest_power(size));
|
|
544
|
|
545 strcpy(apool->name, name);
|
|
546 return &apool->pool;
|
|
547 }
|
|
548
|
|
549 #ifdef POOL_CHECK_LEAKS
|
|
550 static const char *get_leak_string(const unsigned char *mem, int size)
|
|
551 {
|
|
552 int i;
|
|
553
|
|
554 mem += sizeof(int);
|
|
555
|
|
556 #ifdef POOL_SAVE_BLOCK
|
|
557 mem += sizeof(PoolBlock *);
|
|
558 size -= sizeof(PoolBlock *);
|
|
559 #endif
|
|
560
|
|
561 for (i = 0; i < size; i++) {
|
|
562 if (mem[i] == '\0')
|
|
563 return (const char *) mem;
|
|
564
|
|
565 if ((mem[i] & 0x7f) < 32)
|
|
566 break;
|
|
567 }
|
|
568
|
|
569 return NULL;
|
|
570 }
|
|
571
|
|
572 static const char *pool_block_count_leaks(PoolBlock *block, int *leak_count,
|
|
573 int *leak_size)
|
|
574 {
|
|
575 const char *leak_string;
|
|
576 unsigned char *mem;
|
|
577 unsigned int holesize, holesize_real;
|
|
578
|
|
579 leak_string = NULL;
|
|
580
|
|
581 mem = block->data;
|
|
582 mem2int(mem, holesize);
|
|
583 while (holesize > 0) {
|
|
584 holesize_real = mem_size(holesize);
|
|
585 if (!is_mem_free(holesize)) {
|
|
586 (*leak_count)++;
|
|
587 *leak_size += holesize_real;
|
|
588
|
|
589 if (leak_string == NULL) {
|
|
590 leak_string = get_leak_string(mem,
|
|
591 holesize_real);
|
|
592 }
|
|
593 }
|
|
594
|
|
595 if (holesize_real + sizeof(int)*2 > block->size)
|
|
596 i_panic("pool_block_count_leaks() : corrupted pool");
|
|
597
|
|
598 mem += holesize_real + sizeof(int);
|
|
599 mem2int(mem, holesize);
|
|
600 }
|
|
601
|
|
602 return leak_string;
|
|
603 }
|
|
604
|
|
605 static void pool_check_leaks(AllocfreePool *apool)
|
|
606 {
|
|
607 const char *leak_string;
|
|
608 int i, leak_count, leak_size;
|
|
609
|
|
610 leak_string = NULL;
|
|
611 leak_count = leak_size = 0;
|
|
612 for (i = 0; i < apool->used_blocks; i++) {
|
|
613 PoolBlock *block = apool->blocks[i];
|
|
614
|
|
615 if (block->free_index < block->size) {
|
|
616 leak_string = pool_block_count_leaks(block, &leak_count,
|
|
617 &leak_size);
|
|
618 }
|
|
619 }
|
|
620
|
|
621 if (leak_count > 0) {
|
|
622 i_warning("Pool '%s' leaked %d allocs with "
|
|
623 "total size of %d (%s)",
|
|
624 apool->name, leak_count, leak_size,
|
|
625 leak_string == NULL ? "" : leak_string);
|
|
626 }
|
|
627 }
|
|
628 #endif
|
|
629
|
|
630 static void pool_destroy(AllocfreePool *apool)
|
|
631 {
|
|
632 check_pool(apool);
|
|
633
|
|
634 #ifdef POOL_CHECK_LEAKS
|
|
635 pool_check_leaks(apool);
|
|
636 #endif
|
|
637
|
|
638 while (apool->used_blocks > 0)
|
|
639 pool_block_destroy(apool, apool->blocks[0]);
|
|
640 free(apool->blocks);
|
|
641 free(apool->name);
|
|
642 free(apool);
|
|
643 }
|
|
644
|
|
645 static void pool_allocfree_ref(Pool pool)
|
|
646 {
|
|
647 AllocfreePool *apool = (AllocfreePool *) pool;
|
|
648
|
|
649 apool->refcount++;
|
|
650 }
|
|
651
|
|
652 static void pool_allocfree_unref(Pool pool)
|
|
653 {
|
|
654 AllocfreePool *apool = (AllocfreePool *) pool;
|
|
655
|
|
656 if (--apool->refcount == 0)
|
|
657 pool_destroy(apool);
|
|
658 }
|
|
659
|
|
660 static void *pool_allocfree_malloc(Pool pool, unsigned int size)
|
|
661 {
|
|
662 AllocfreePool *apool = (AllocfreePool *) pool;
|
|
663 PoolBlock *block;
|
|
664 void *mem;
|
|
665 int i, allocsize;
|
|
666
|
|
667 if (size == 0)
|
|
668 return NULL;
|
|
669
|
|
670 if (size > MAX_ALLOC_SIZE)
|
|
671 i_panic("Trying to allocate too much memory");
|
|
672
|
|
673 /* allocate only aligned amount of memory so alignment comes
|
|
674 always properly */
|
|
675 size = (size + MEM_ALIGN-1) & ~(MEM_ALIGN-1);
|
|
676
|
|
677 check_pool(apool);
|
|
678
|
|
679 /* check if there's enough space in one of the existing blocks */
|
|
680 block = NULL;
|
|
681 for (i = 0; i < apool->used_blocks; i++) {
|
|
682 if (apool->blocks[i]->largest_free_space >= size) {
|
|
683 block = apool->blocks[i];
|
|
684 break;
|
|
685 }
|
|
686 }
|
|
687
|
|
688 if (block == NULL) {
|
|
689 /* create new block to pool */
|
|
690 allocsize = 2*apool->blocks[apool->used_blocks-1]->size;
|
|
691 if (allocsize-sizeof(int)-ALLOC_EXTRA_SIZE < size)
|
|
692 allocsize = size*2 + sizeof(int) + ALLOC_EXTRA_SIZE;
|
|
693
|
|
694 pool_block_create(apool, nearest_power(allocsize));
|
|
695 block = apool->blocks[apool->used_blocks-1];
|
|
696 }
|
|
697
|
|
698 mem = pool_block_alloc(apool, block, size);
|
|
699 memset(mem, 0, size);
|
|
700
|
|
701 #ifdef POOL_DEBUG
|
|
702 pool_dump(apool);
|
|
703 #endif
|
|
704 return mem;
|
|
705 }
|
|
706
|
|
707 static void *pool_allocfree_realloc(Pool pool, void *mem, unsigned int size)
|
|
708 {
|
|
709 AllocfreePool *apool = (AllocfreePool *) pool;
|
|
710 unsigned char *mem_size_pos, *oldmem;
|
|
711 unsigned int memsize;
|
|
712
|
|
713 if (size == 0) {
|
|
714 pool_allocfree_free(pool, mem);
|
|
715 return NULL;
|
|
716 }
|
|
717
|
|
718 if (mem == NULL)
|
|
719 return pool_allocfree_malloc(pool, size);
|
|
720
|
|
721 check_pool(apool);
|
|
722
|
|
723 mem_size_pos = (unsigned char *) mem - sizeof(int);
|
|
724 #ifdef POOL_SAVE_BLOCK
|
|
725 mem_size_pos -= sizeof(PoolBlock *);
|
|
726 #endif
|
|
727
|
|
728 mem2int(mem_size_pos, memsize);
|
|
729 if (memsize == size)
|
|
730 return mem;
|
|
731
|
|
732 /* FIXME: shrinking could be done more efficiently, also growing
|
|
733 might be able to check if it can extend it's current allocation */
|
|
734 oldmem = mem;
|
|
735 mem = pool_allocfree_malloc(pool, size);
|
|
736 memcpy(mem, oldmem, memsize < size ? memsize : size);
|
|
737 pool_allocfree_free(pool, oldmem);
|
|
738
|
|
739 #ifdef POOL_DEBUG
|
|
740 pool_dump(apool);
|
|
741 #endif
|
|
742
|
|
743 if (size > memsize)
|
|
744 memset((char *) mem + memsize, 0, size-memsize);
|
|
745 return mem;
|
|
746 }
|
|
747
|
|
748 static void *pool_allocfree_realloc_min(Pool pool, void *mem,
|
|
749 unsigned int size)
|
|
750 {
|
|
751 unsigned char *mem_size_pos;
|
|
752 unsigned int memsize;
|
|
753
|
|
754 if (mem == NULL)
|
|
755 return pool_allocfree_malloc(pool, size);
|
|
756
|
|
757 mem_size_pos = (unsigned char *) mem - sizeof(int);
|
|
758 #ifdef POOL_SAVE_BLOCK
|
|
759 mem_size_pos -= sizeof(PoolBlock *);
|
|
760 #endif
|
|
761
|
|
762 mem2int(mem_size_pos, memsize);
|
|
763 if (size <= memsize)
|
|
764 return mem;
|
|
765
|
|
766 return pool_allocfree_realloc(pool, mem, size);
|
|
767 }
|
|
768
|
|
769 static void pool_allocfree_free(Pool pool, void *mem)
|
|
770 {
|
|
771 AllocfreePool *apool = (AllocfreePool *) pool;
|
|
772 PoolBlock *block;
|
|
773
|
|
774 if (mem == NULL)
|
|
775 return;
|
|
776
|
|
777 check_pool(apool);
|
|
778
|
|
779 block = pool_block_find(apool, mem);
|
|
780 if (block == NULL)
|
|
781 i_panic("pool_allocfree_free(): invalid memory address");
|
|
782
|
|
783 pool_block_free(apool, block, mem);
|
|
784
|
|
785 #ifdef POOL_DEBUG
|
|
786 pool_dump(apool);
|
|
787 #endif
|
|
788 }
|
|
789
|
|
790 static void pool_allocfree_clear(Pool pool)
|
|
791 {
|
|
792 AllocfreePool *apool = (AllocfreePool *) pool;
|
|
793 int i;
|
|
794
|
|
795 apool->first_block_with_space = 0;
|
|
796
|
|
797 for (i = 0; i < apool->used_blocks; i++) {
|
|
798 PoolBlock *block = apool->blocks[i];
|
|
799
|
|
800 block->free_index = 0;
|
|
801 block->largest_free_space = block->size -
|
|
802 sizeof(int) - ALLOC_EXTRA_SIZE;
|
|
803 memset(block->data, 0, sizeof(int));
|
|
804 }
|
|
805 }
|
|
806
|
|
807 static struct Pool static_allocfree_pool = {
|
|
808 pool_allocfree_ref,
|
|
809 pool_allocfree_unref,
|
|
810
|
|
811 pool_allocfree_malloc,
|
|
812 pool_allocfree_free,
|
|
813
|
|
814 pool_allocfree_realloc,
|
|
815 pool_allocfree_realloc_min,
|
|
816
|
|
817 pool_allocfree_clear
|
|
818 };
|
|
819
|
|
820 #ifdef POOL_DEBUG
|
|
821 #include <stdlib.h>
|
|
822
|
|
823 void mempool_test(void)
|
|
824 {
|
|
825 Pool p;
|
|
826 void *arr[100];
|
|
827 int i, j;
|
|
828
|
|
829 memset(arr, 0, sizeof(arr));
|
|
830
|
|
831 p = pool_create("temp", 32);
|
|
832 for (i = 0; i < 10000; i++) {
|
|
833 arr[rand()%100] = p_malloc(p, 4*(rand()%10+1));
|
|
834
|
|
835 if (rand()%3 == 1) {
|
|
836 for (j = 0; j < 100; j++) {
|
|
837 if (arr[j] != NULL)
|
|
838 p_free_and_null(p, arr[j]);
|
|
839 }
|
|
840 }
|
|
841 }
|
|
842 }
|
|
843 #endif
|