SingingCat 0
application
umm_malloc.c
1/* ----------------------------------------------------------------------------
2 * umm_malloc.c - a memory allocator for embedded systems (microcontrollers)
3 *
4 * See LICENSE for copyright notice
5 * See README.md for acknowledgements and description of internals
6 * ----------------------------------------------------------------------------
7 *
8 * R.Hempel 2007-09-22 - Original
9 * R.Hempel 2008-12-11 - Added MIT License biolerplate
10 * - realloc() now looks to see if previous block is free
11 * - made common operations functions
12 * R.Hempel 2009-03-02 - Added macros to disable tasking
13 * - Added function to dump heap and check for valid free
14 * pointer
15 * R.Hempel 2009-03-09 - Changed name to umm_malloc to avoid conflicts with
16 * the mm_malloc() library functions
17 * - Added some test code to assimilate a free block
18 * with the very block if possible. Complicated and
19 * not worth the grief.
20 * D.Frank 2014-04-02 - Fixed heap configuration when UMM_TEST_MAIN is NOT set,
21 * added user-dependent configuration file umm_malloc_cfg.h
22 * R.Hempel 2016-12-04 - Add support for Unity test framework
23 * - Reorganize source files to avoid redundant content
24 * - Move integrity and poison checking to separate file
25 * R.Hempel 2017-12-29 - Fix bug in realloc when requesting a new block that
26 * results in OOM error - see Issue 11
27 * R.Hempel 2019-09-07 - Separate the malloc() and free() functionality into
28 * wrappers that use critical section protection macros
29 * and static core functions that assume they are
30 * running in a protected con text. Thanks @devyte
31 * R.Hempel 2020-01-07 - Add support for Fragmentation metric - See Issue 14
32 * R.Hempel 2020-01-12 - Use explicitly sized values from stdint.h - See Issue 15
33 * R.Hempel 2020-01-20 - Move metric functions back to umm_info - See Issue 29
34 * R.Hempel 2020-02-01 - Macro functions are uppercased - See Issue 34
35 * R.Hempel 2020-06-20 - Support alternate body size - See Issue 42
36 * R.Hempel 2021-05-02 - Support explicit memory umm_init_heap() - See Issue 53
37 * ----------------------------------------------------------------------------
38 */
39
40#include <stdio.h>
41#include <stdint.h>
42#include <stddef.h>
43#include <string.h>
44
45#include "umm_malloc_cfg.h" // Override with umm_malloc_cfg_xxx.h
46#include "umm_malloc.h"
47
48/* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */
49
50// #define DBGLOG_ENABLE
51
52#define DBGLOG_LEVEL 0
53
54#ifdef DBGLOG_ENABLE
55#include "dbglog/dbglog.h"
56#endif
57
58extern void *UMM_MALLOC_CFG_HEAP_ADDR;
59extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE;
60
61/* ------------------------------------------------------------------------- */
62
63UMM_H_ATTPACKPRE typedef struct umm_ptr_t {
64 uint16_t next;
65 uint16_t prev;
66} UMM_H_ATTPACKSUF umm_ptr;
67
68UMM_H_ATTPACKPRE typedef struct umm_block_t {
69 union {
70 umm_ptr used;
71 } header;
72 union {
73 umm_ptr free;
74 uint8_t data[UMM_BLOCK_BODY_SIZE - sizeof(struct umm_ptr_t)];
75 } body;
76} UMM_H_ATTPACKSUF umm_block;
77
78#define UMM_FREELIST_MASK ((uint16_t)(0x8000))
79#define UMM_BLOCKNO_MASK ((uint16_t)(0x7FFF))
80
81/* ------------------------------------------------------------------------- */
82
84 umm_block * pheap;
85 size_t heap_size;
86 uint16_t numblocks;
87};
88
89struct umm_heap_config umm_heap_current;
90// struct umm_heap_config umm_heaps[UMM_NUM_HEAPS];
91
92#define UMM_HEAP (umm_heap_current.pheap)
93#define UMM_HEAPSIZE (umm_heap_current.heap_size)
94#define UMM_NUMBLOCKS (umm_heap_current.numblocks)
95
96#define UMM_BLOCKSIZE (sizeof(umm_block))
97#define UMM_BLOCK_LAST (UMM_NUMBLOCKS - 1)
98
99/* -------------------------------------------------------------------------
100 * These macros evaluate to the address of the block and data respectively
101 */
102
103#define UMM_BLOCK(b) (UMM_HEAP[b])
104#define UMM_DATA(b) (UMM_BLOCK(b).body.data)
105
106/* -------------------------------------------------------------------------
107 * These macros evaluate to the index of the block - NOT the address!!!
108 */
109
110#define UMM_NBLOCK(b) (UMM_BLOCK(b).header.used.next)
111#define UMM_PBLOCK(b) (UMM_BLOCK(b).header.used.prev)
112#define UMM_NFREE(b) (UMM_BLOCK(b).body.free.next)
113#define UMM_PFREE(b) (UMM_BLOCK(b).body.free.prev)
114
115/* -------------------------------------------------------------------------
116 * There are additional files that may be included here - normally it's
117 * not a good idea to include .c files but in this case it keeps the
118 * main umm_malloc file clear and prevents issues with exposing internal
119 * data structures to other programs.
120 * -------------------------------------------------------------------------
121 */
122
123#include "umm_integrity.c"
124#include "umm_poison.c"
125#include "umm_info.c"
126
127/* ------------------------------------------------------------------------ */
128
129static uint16_t umm_blocks(size_t size) {
130 /*
131 * The calculation of the block size is not too difficult, but there are
132 * a few little things that we need to be mindful of.
133 *
134 * When a block removed from the free list, the space used by the free
135 * pointers is available for data. That's what the first calculation
136 * of size is doing.
137 *
138 * We don't check for the special case of (size == 0) here as this needs
139 * special handling in the caller depending on context. For example when we
140 * realloc() a block to size 0 it should simply be freed.
141 *
142 * We do NOT need to check for allocating more blocks than the heap can
143 * possibly hold - the allocator figures this out for us.
144 *
145 * There are only two cases left to consider:
146 *
147 * 1. (size <= body) Obviously this is just one block
148 * 2. (blocks > (2^15)) This should return ((2^15)) to force a
149 * failure when the allocator runs
150 *
151 * If the requested size is greater that 32677-2 blocks (max block index
152 * minus the overhead of the top and bottom bookkeeping blocks) then we
153 * will return an incorrectly truncated value when the result is cast to
154 * a uint16_t.
155 */
156
157 if (size <= (sizeof(((umm_block *)0)->body))) {
158 return 1;
159 }
160
161 /*
162 * If it's for more than that, then we need to figure out the number of
163 * additional whole blocks the size of an umm_block are required, so
164 * reduce the size request by the number of bytes in the body of the
165 * first block.
166 */
167
168 size -= (sizeof(((umm_block *)0)->body));
169
170 /* NOTE WELL that we take advantage of the fact that INT16_MAX is the
171 * number of blocks that we can index in 15 bits :-)
172 *
173 * The below expression looks wierd, but it's right. Assuming body
174 * size of 4 bytes and a block size of 8 bytes:
175 *
176 * BYTES (BYTES-BODY) (BYTES-BODY-1)/BLOCKSIZE BLOCKS
177 * 1 n/a n/a 1
178 * 5 1 0 2
179 * 12 8 0 2
180 * 13 9 1 3
181 */
182
183 size_t blocks = (2 + ((size - 1) / (UMM_BLOCKSIZE)));
184
185 if (blocks > (INT16_MAX)) {
186 blocks = INT16_MAX;
187 }
188
189 return (uint16_t)blocks;
190}
191
192/* ------------------------------------------------------------------------ */
193/*
194 * Split the block `c` into two blocks: `c` and `c + blocks`.
195 *
196 * - `new_freemask` should be `0` if `c + blocks` used, or `UMM_FREELIST_MASK`
197 * otherwise.
198 *
199 * Note that free pointers are NOT modified by this function.
200 */
201static void umm_split_block(uint16_t c,
202 uint16_t blocks,
203 uint16_t new_freemask) {
204 UMM_NBLOCK(c + blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask;
205 UMM_PBLOCK(c + blocks) = c;
206
207 UMM_PBLOCK(UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) = (c + blocks);
208 UMM_NBLOCK(c) = (c + blocks);
209}
210
211/* ------------------------------------------------------------------------ */
212
213static void umm_disconnect_from_free_list(uint16_t c) {
214 /* Disconnect this block from the FREE list */
215
216 UMM_NFREE(UMM_PFREE(c)) = UMM_NFREE(c);
217 UMM_PFREE(UMM_NFREE(c)) = UMM_PFREE(c);
218
219 /* And clear the free block indicator */
220
221 UMM_NBLOCK(c) &= (~UMM_FREELIST_MASK);
222}
223
224/* ------------------------------------------------------------------------
225 * The umm_assimilate_up() function does not assume that UMM_NBLOCK(c)
226 * has the UMM_FREELIST_MASK bit set. It only assimilates up if the
227 * next block is free.
228 */
229static void umm_assimilate_up(uint16_t c) {
230 if (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK) {
231 UMM_FRAGMENTATION_METRIC_REMOVE(UMM_NBLOCK(c));
232
233 /*
234 * The next block is a free block, so assimilate up and remove it from
235 * the free list
236 */
237
238 DBGLOG_DEBUG("Assimilate up to next block, which is FREE\n");
239
240 /* Disconnect the next block from the FREE list */
241
242 umm_disconnect_from_free_list(UMM_NBLOCK(c));
243
244 /* Assimilate the next block with this one */
245
246 UMM_PBLOCK(UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) = c;
247 UMM_NBLOCK(c) = UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK;
248 }
249}
250
251/* ------------------------------------------------------------------------
252 * The umm_assimilate_down() function assumes that UMM_NBLOCK(c) does NOT
253 * have the UMM_FREELIST_MASK bit set. In other words, try to assimilate
254 * up before assimilating down.
255 */
256static uint16_t umm_assimilate_down(uint16_t c, uint16_t freemask) {
257 // We are going to assimilate down to the previous block because
258 // it was free, so remove it from the fragmentation metric
259
260 UMM_FRAGMENTATION_METRIC_REMOVE(UMM_PBLOCK(c));
261
262 UMM_NBLOCK(UMM_PBLOCK(c)) = UMM_NBLOCK(c) | freemask;
263 UMM_PBLOCK(UMM_NBLOCK(c)) = UMM_PBLOCK(c);
264
265 if (freemask) {
266 // We are going to free the entire assimilated block
267 // so add it to the fragmentation metric. A good
268 // compiler will optimize away the empty if statement
269 // when UMM_INFO is not defined, so don't worry about
270 // guarding it.
271
272 UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c));
273 }
274
275 return UMM_PBLOCK(c);
276}
277
278/* ------------------------------------------------------------------------- */
279
280void umm_init_heap(void *ptr, size_t size) {
281 /* init heap pointer and size, and memset it to 0 */
282 UMM_HEAP = (umm_block *)ptr;
283 UMM_HEAPSIZE = size;
284 UMM_NUMBLOCKS = (UMM_HEAPSIZE / UMM_BLOCKSIZE);
285 memset(UMM_HEAP, 0x00, UMM_HEAPSIZE);
286
287 /* setup initial blank heap structure */
288 UMM_FRAGMENTATION_METRIC_INIT();
289
290 /* Set up umm_block[0], which just points to umm_block[1] */
291 UMM_NBLOCK(0) = 1;
292 UMM_NFREE(0) = 1;
293 UMM_PFREE(0) = 1;
294
295 /*
296 * Now, we need to set the whole heap space as a huge free block. We should
297 * not touch umm_block[0], since it's special: umm_block[0] is the head of
298 * the free block list. It's a part of the heap invariant.
299 *
300 * See the detailed explanation at the beginning of the file.
301 *
302 * umm_block[1] has pointers:
303 *
304 * - next `umm_block`: the last one umm_block[n]
305 * - prev `umm_block`: umm_block[0]
306 *
307 * Plus, it's a free `umm_block`, so we need to apply `UMM_FREELIST_MASK`
308 *
309 * And it's the last free block, so the next free block is 0 which marks
310 * the end of the list. The previous block and free block pointer are 0
311 * too, there is no need to initialize these values due to the init code
312 * that memsets the entire umm_ space to 0.
313 */
314 UMM_NBLOCK(1) = UMM_BLOCK_LAST | UMM_FREELIST_MASK;
315
316 /*
317 * Last umm_block[n] has the next block index at 0, meaning it's
318 * the end of the list, and the previous block is umm_block[1].
319 *
320 * The last block is a special block and can never be part of the
321 * free list, so its pointers are left at 0 too.
322 */
323
324 UMM_PBLOCK(UMM_BLOCK_LAST) = 1;
325
326// DBGLOG_FORCE(true, "nblock(0) %04x pblock(0) %04x nfree(0) %04x pfree(0) %04x\n", UMM_NBLOCK(0) & UMM_BLOCKNO_MASK, UMM_PBLOCK(0), UMM_NFREE(0), UMM_PFREE(0));
327// DBGLOG_FORCE(true, "nblock(1) %04x pblock(1) %04x nfree(1) %04x pfree(1) %04x\n", UMM_NBLOCK(1) & UMM_BLOCKNO_MASK, UMM_PBLOCK(1), UMM_NFREE(1), UMM_PFREE(1));
328}
329
330void umm_init(void) {
331 /* Initialize the heap from linker supplied values */
332
333 umm_init_heap(UMM_MALLOC_CFG_HEAP_ADDR, UMM_MALLOC_CFG_HEAP_SIZE);
334}
335
336/* ------------------------------------------------------------------------
337 * Must be called only from within critical sections guarded by
338 * UMM_CRITICAL_ENTRY(id) and UMM_CRITICAL_EXIT(id).
339 */
340static void umm_free_core(void *ptr) {
341 uint16_t c;
342
343 /*
344 * FIXME: At some point it might be a good idea to add a check to make sure
345 * that the pointer we're being asked to free up is actually within
346 * the umm_heap!
347 *
348 * NOTE: See the new umm_info() function that you can use to see if a ptr is
349 * on the free list!
350 */
351
352 /* Figure out which block we're in. Note the use of truncated division... */
353
354 c = (((uint8_t *)ptr) - (uint8_t *)(&(UMM_HEAP[0]))) / UMM_BLOCKSIZE;
355
356 DBGLOG_DEBUG("Freeing block %6i\n", c);
357
358 /* Now let's assimilate this block with the next one if possible. */
359
360 umm_assimilate_up(c);
361
362 /* Then assimilate with the previous block if possible */
363
364 if (UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK) {
365 DBGLOG_DEBUG("Assimilate down to previous block, which is FREE\n");
366
367 c = umm_assimilate_down(c, UMM_FREELIST_MASK);
368 } else {
369 /*
370 * The previous block is not a free block, so add this one to the head
371 * of the free list
372 */
373 UMM_FRAGMENTATION_METRIC_ADD(c);
374
375 DBGLOG_DEBUG("Just add to head of free list\n");
376
377 UMM_PFREE(UMM_NFREE(0)) = c;
378 UMM_NFREE(c) = UMM_NFREE(0);
379 UMM_PFREE(c) = 0;
380 UMM_NFREE(0) = c;
381
382 UMM_NBLOCK(c) |= UMM_FREELIST_MASK;
383 }
384}
385
386/* ------------------------------------------------------------------------ */
387
388void umm_free(void *ptr) {
389 UMM_CRITICAL_DECL(id_free);
390
391 UMM_CHECK_INITIALIZED();
392
393 /* If we're being asked to free a NULL pointer, well that's just silly! */
394
395 if ((void *)0 == ptr) {
396 DBGLOG_DEBUG("free a null pointer -> do nothing\n");
397
398 return;
399 }
400
401 /* Free the memory withing a protected critical section */
402
403 UMM_CRITICAL_ENTRY(id_free);
404
405 umm_free_core(ptr);
406
407 UMM_CRITICAL_EXIT(id_free);
408}
409
410/* ------------------------------------------------------------------------
411 * Must be called only from within critical sections guarded by
412 * UMM_CRITICAL_ENTRY(id) and UMM_CRITICAL_EXIT(id).
413 */
414static void *umm_malloc_core(size_t size) {
415 uint16_t blocks;
416 uint16_t blockSize = 0;
417
418 uint16_t bestSize;
419 uint16_t bestBlock;
420
421 uint16_t cf;
422
423 blocks = umm_blocks(size);
424
425 /*
426 * Now we can scan through the free list until we find a space that's big
427 * enough to hold the number of blocks we need.
428 *
429 * This part may be customized to be a best-fit, worst-fit, or first-fit
430 * algorithm
431 */
432
433 cf = UMM_NFREE(0);
434
435 bestBlock = UMM_NFREE(0);
436 bestSize = 0x7FFF;
437
438 while (cf) {
439 blockSize = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) - cf;
440
441 DBGLOG_TRACE("Looking at block %6i size %6i\n", cf, blockSize);
442
443#if defined UMM_BEST_FIT
444 if ((blockSize >= blocks) && (blockSize < bestSize)) {
445 bestBlock = cf;
446 bestSize = blockSize;
447 }
448#elif defined UMM_FIRST_FIT
449 /* This is the first block that fits! */
450 if ((blockSize >= blocks)) {
451 break;
452 }
453#else
454#error "No UMM_*_FIT is defined - check umm_malloc_cfg.h"
455#endif
456
457 cf = UMM_NFREE(cf);
458 }
459
460 if (0x7FFF != bestSize) {
461 cf = bestBlock;
462 blockSize = bestSize;
463 }
464
465 if (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks) {
466 UMM_FRAGMENTATION_METRIC_REMOVE(cf);
467
468 /*
469 * This is an existing block in the memory heap, we just need to split off
470 * what we need, unlink it from the free list and mark it as in use, and
471 * link the rest of the block back into the freelist as if it was a new
472 * block on the free list...
473 */
474
475 if (blockSize == blocks) {
476 /* It's an exact fit and we don't neet to split off a block. */
477 DBGLOG_DEBUG("Allocating %6i blocks starting at %6i - exact\n", blocks, cf);
478
479 /* Disconnect this block from the FREE list */
480
481 umm_disconnect_from_free_list(cf);
482 } else {
483 /* It's not an exact fit and we need to split off a block. */
484 DBGLOG_DEBUG("Allocating %6i blocks starting at %6i - existing\n", blocks, cf);
485
486 /*
487 * split current free block `cf` into two blocks. The first one will be
488 * returned to user, so it's not free, and the second one will be free.
489 */
490 umm_split_block(cf, blocks, UMM_FREELIST_MASK /*new block is free*/);
491
492 UMM_FRAGMENTATION_METRIC_ADD(UMM_NBLOCK(cf));
493
494 /*
495 * `umm_split_block()` does not update the free pointers (it affects
496 * only free flags), but effectively we've just moved beginning of the
497 * free block from `cf` to `cf + blocks`. So we have to adjust pointers
498 * to and from adjacent free blocks.
499 */
500
501 /* previous free block */
502 UMM_NFREE(UMM_PFREE(cf)) = cf + blocks;
503 UMM_PFREE(cf + blocks) = UMM_PFREE(cf);
504
505 /* next free block */
506 UMM_PFREE(UMM_NFREE(cf)) = cf + blocks;
507 UMM_NFREE(cf + blocks) = UMM_NFREE(cf);
508 }
509 } else {
510 /* Out of memory */
511
512 DBGLOG_DEBUG("Can't allocate %5i blocks\n", blocks);
513
514 return (void *)NULL;
515 }
516
517 return (void *)&UMM_DATA(cf);
518}
519
520/* ------------------------------------------------------------------------ */
521
522void *umm_malloc(size_t size) {
523 UMM_CRITICAL_DECL(id_malloc);
524
525 void *ptr = NULL;
526
527 UMM_CHECK_INITIALIZED();
528
529 /*
530 * the very first thing we do is figure out if we're being asked to allocate
531 * a size of 0 - and if we are we'll simply return a null pointer. if not
532 * then reduce the size by 1 byte so that the subsequent calculations on
533 * the number of blocks to allocate are easier...
534 */
535
536 if (0 == size) {
537 DBGLOG_DEBUG("malloc a block of 0 bytes -> do nothing\n");
538
539 return ptr;
540 }
541
542 /* Allocate the memory withing a protected critical section */
543
544 UMM_CRITICAL_ENTRY(id_malloc);
545
546 ptr = umm_malloc_core(size);
547
548 UMM_CRITICAL_EXIT(id_malloc);
549
550 return ptr;
551}
552
553/* ------------------------------------------------------------------------ */
554
555void *umm_realloc(void *ptr, size_t size) {
556 UMM_CRITICAL_DECL(id_realloc);
557
558 uint16_t blocks;
559 uint16_t blockSize;
560 uint16_t prevBlockSize = 0;
561 uint16_t nextBlockSize = 0;
562
563 uint16_t c;
564
565 size_t curSize;
566
567 UMM_CHECK_INITIALIZED();
568
569 /*
570 * This code looks after the case of a NULL value for ptr. The ANSI C
571 * standard says that if ptr is NULL and size is non-zero, then we've
572 * got to work the same a malloc(). If size is also 0, then our version
573 * of malloc() returns a NULL pointer, which is OK as far as the ANSI C
574 * standard is concerned.
575 */
576
577 if (((void *)NULL == ptr)) {
578 DBGLOG_DEBUG("realloc the NULL pointer - call malloc()\n");
579
580 return umm_malloc(size);
581 }
582
583 /*
584 * Now we're sure that we have a non_NULL ptr, but we're not sure what
585 * we should do with it. If the size is 0, then the ANSI C standard says that
586 * we should operate the same as free.
587 */
588
589 if (0 == size) {
590 DBGLOG_DEBUG("realloc to 0 size, just free the block\n");
591
592 umm_free(ptr);
593
594 return (void *)NULL;
595 }
596
597 /*
598 * Otherwise we need to actually do a reallocation. A naiive approach
599 * would be to malloc() a new block of the correct size, copy the old data
600 * to the new block, and then free the old block.
601 *
602 * While this will work, we end up doing a lot of possibly unnecessary
603 * copying. So first, let's figure out how many blocks we'll need.
604 */
605
606 blocks = umm_blocks(size);
607
608 /* Figure out which block we're in. Note the use of truncated division... */
609
610 c = (((uint8_t *)ptr) - (uint8_t *)(&(UMM_HEAP[0]))) / UMM_BLOCKSIZE;
611
612 /* Figure out how big this block is ... the free bit is not set :-) */
613
614 blockSize = (UMM_NBLOCK(c) - c);
615
616 /* Figure out how many bytes are in this block */
617
618 curSize = (blockSize * UMM_BLOCKSIZE) - (sizeof(((umm_block *)0)->header));
619
620 /* Protect the critical section... */
621 UMM_CRITICAL_ENTRY(id_realloc);
622
623 /* Now figure out if the previous and/or next blocks are free as well as
624 * their sizes - this will help us to minimize special code later when we
625 * decide if it's possible to use the adjacent blocks.
626 *
627 * We set prevBlockSize and nextBlockSize to non-zero values ONLY if they
628 * are free!
629 */
630
631 if ((UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK)) {
632 nextBlockSize = (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) - UMM_NBLOCK(c);
633 }
634
635 if ((UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK)) {
636 prevBlockSize = (c - UMM_PBLOCK(c));
637 }
638
639 DBGLOG_DEBUG("realloc blocks %i blockSize %i nextBlockSize %i prevBlockSize %i\n", blocks, blockSize, nextBlockSize, prevBlockSize);
640
641 /*
642 * Ok, now that we're here we know how many blocks we want and the current
643 * blockSize. The prevBlockSize and nextBlockSize are set and we can figure
644 * out the best strategy for the new allocation as follows:
645 *
646 * 1. If the new block is the same size or smaller than the current block do
647 * nothing.
648 * 2. If the next block is free and adding it to the current block gives us
649 * EXACTLY enough memory, assimilate the next block. This avoids unwanted
650 * fragmentation of free memory.
651 *
652 * The following cases may be better handled with memory copies to reduce
653 * fragmentation
654 *
655 * 3. If the previous block is NOT free and the next block is free and
656 * adding it to the current block gives us enough memory, assimilate
657 * the next block. This may introduce a bit of fragmentation.
658 * 4. If the prev block is free and adding it to the current block gives us
659 * enough memory, remove the previous block from the free list, assimilate
660 * it, copy to the new block.
661 * 5. If the prev and next blocks are free and adding them to the current
662 * block gives us enough memory, assimilate the next block, remove the
663 * previous block from the free list, assimilate it, copy to the new block.
664 * 6. Otherwise try to allocate an entirely new block of memory. If the
665 * allocation works free the old block and return the new pointer. If
666 * the allocation fails, return NULL and leave the old block intact.
667 *
668 * TODO: Add some conditional code to optimise for less fragmentation
669 * by simply allocating new memory if we need to copy anyways.
670 *
671 * All that's left to do is decide if the fit was exact or not. If the fit
672 * was not exact, then split the memory block so that we use only the requested
673 * number of blocks and add what's left to the free list.
674 */
675
676 // Case 1 - block is same size or smaller
677 if (blockSize >= blocks) {
678 DBGLOG_DEBUG("realloc the same or smaller size block - %i, do nothing\n", blocks);
679 /* This space intentionally left blank */
680
681 // Case 2 - block + next block fits EXACTLY
682 } else if ((blockSize + nextBlockSize) == blocks) {
683 DBGLOG_DEBUG("exact realloc using next block - %i\n", blocks);
684 umm_assimilate_up(c);
685 blockSize += nextBlockSize;
686
687 // Case 3 - prev block NOT free and block + next block fits
688 } else if ((0 == prevBlockSize) && (blockSize + nextBlockSize) >= blocks) {
689 DBGLOG_DEBUG("realloc using next block - %i\n", blocks);
690 umm_assimilate_up(c);
691 blockSize += nextBlockSize;
692
693 // Case 4 - prev block + block fits
694 } else if ((prevBlockSize + blockSize) >= blocks) {
695 DBGLOG_DEBUG("realloc using prev block - %i\n", blocks);
696 umm_disconnect_from_free_list(UMM_PBLOCK(c));
697 c = umm_assimilate_down(c, 0);
698 memmove((void *)&UMM_DATA(c), ptr, curSize);
699 ptr = (void *)&UMM_DATA(c);
700 blockSize += prevBlockSize;
701
702 // Case 5 - prev block + block + next block fits
703 } else if ((prevBlockSize + blockSize + nextBlockSize) >= blocks) {
704 DBGLOG_DEBUG("realloc using prev and next block - %i\n", blocks);
705 umm_assimilate_up(c);
706 umm_disconnect_from_free_list(UMM_PBLOCK(c));
707 c = umm_assimilate_down(c, 0);
708 memmove((void *)&UMM_DATA(c), ptr, curSize);
709 ptr = (void *)&UMM_DATA(c);
710 blockSize += (prevBlockSize + nextBlockSize);
711
712 // Case 6 - default is we need to realloc a new block
713 } else {
714 DBGLOG_DEBUG("realloc a completely new block %i\n", blocks);
715 void *oldptr = ptr;
716 if ((ptr = umm_malloc_core(size))) {
717 DBGLOG_DEBUG("realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks);
718 memcpy(ptr, oldptr, curSize);
719 umm_free_core(oldptr);
720 } else {
721 DBGLOG_DEBUG("realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks);
722 /* This space intentionally left blnk */
723 }
724 blockSize = blocks;
725 }
726
727 /* Now all we need to do is figure out if the block fit exactly or if we
728 * need to split and free ...
729 */
730
731 if (blockSize > blocks) {
732 DBGLOG_DEBUG("split and free %i blocks from %i\n", blocks, blockSize);
733 umm_split_block(c, blocks, 0);
734 umm_free_core((void *)&UMM_DATA(c + blocks));
735 }
736
737 /* Release the critical section... */
738 UMM_CRITICAL_EXIT(id_realloc);
739
740 return ptr;
741}
742
743/* ------------------------------------------------------------------------ */
744
745void *umm_calloc(size_t num, size_t item_size) {
746 void *ret;
747
748 ret = umm_malloc((size_t)(item_size * num));
749
750 if (ret) {
751 memset(ret, 0x00, (size_t)(item_size * num));
752 }
753
754 return ret;
755}
756
757/* ------------------------------------------------------------------------ */