summaryrefslogtreecommitdiffstats
path: root/src/core/malloc.c
diff options
context:
space:
mode:
authorSimon Rettberg2026-01-28 12:53:53 +0100
committerSimon Rettberg2026-01-28 12:53:53 +0100
commit8e82785c584dc13e20f9229decb95bd17bbe9cd1 (patch)
treea8b359e59196be5b2e3862bed189107f4bc9975f /src/core/malloc.c
parentMerge branch 'master' into openslx (diff)
parent[prefix] Make unlzma.S compatible with 386 class CPUs (diff)
downloadipxe-openslx.tar.gz
ipxe-openslx.tar.xz
ipxe-openslx.zip
Merge branch 'master' into openslxopenslx
Diffstat (limited to 'src/core/malloc.c')
-rw-r--r--src/core/malloc.c425
1 files changed, 260 insertions, 165 deletions
diff --git a/src/core/malloc.c b/src/core/malloc.c
index 8499ab45a..3a9f23ee4 100644
--- a/src/core/malloc.c
+++ b/src/core/malloc.c
@@ -22,11 +22,11 @@
*/
FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
+FILE_SECBOOT ( PERMITTED );
#include <stddef.h>
#include <stdint.h>
#include <string.h>
-#include <strings.h>
#include <ipxe/io.h>
#include <ipxe/list.h>
#include <ipxe/init.h>
@@ -59,8 +59,12 @@ struct memory_block {
struct list_head list;
};
-#define MIN_MEMBLOCK_SIZE \
- ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
+/** Physical address alignment maintained for free blocks of memory
+ *
+ * We keep memory blocks aligned on a power of two that is at least
+ * large enough to hold a @c struct @c memory_block.
+ */
+#define MIN_MEMBLOCK_ALIGN ( 4 * sizeof ( void * ) )
/** A block of allocated memory complete with size information */
struct autosized_block {
@@ -71,49 +75,24 @@ struct autosized_block {
};
/**
- * Address for zero-length memory blocks
- *
- * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
- * NOWHERE. Calling @c free(NOWHERE) will have no effect.
+ * Heap area size
*
- * This is consistent with the ANSI C standards, which state that
- * "either NULL or a pointer suitable to be passed to free()" must be
- * returned in these cases. Using a special non-NULL value means that
- * the caller can take a NULL return value to indicate failure,
- * without first having to check for a requested size of zero.
- *
- * Code outside of malloc.c do not ever need to refer to the actual
- * value of @c NOWHERE; this is an internal definition.
+ * Currently fixed at 4MB.
*/
-#define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
-
-/** List of free memory blocks */
-static LIST_HEAD ( free_blocks );
+#define HEAP_SIZE ( 4096 * 1024 )
-/** Total amount of free memory */
-size_t freemem;
+/** Heap area alignment */
+#define HEAP_ALIGN MIN_MEMBLOCK_ALIGN
-/** Total amount of used memory */
-size_t usedmem;
-
-/** Maximum amount of used memory */
-size_t maxusedmem;
-
-/**
- * Heap size
- *
- * Currently fixed at 512kB.
- */
-#define HEAP_SIZE ( 512 * 1024 )
-
-/** The heap itself */
-static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
+/** The heap area */
+static char __attribute__ (( aligned ( HEAP_ALIGN ) )) heap_area[HEAP_SIZE];
/**
* Mark all blocks in free list as defined
*
+ * @v heap Heap
*/
-static inline void valgrind_make_blocks_defined ( void ) {
+static inline void valgrind_make_blocks_defined ( struct heap *heap ) {
struct memory_block *block;
/* Do nothing unless running under Valgrind */
@@ -126,18 +105,18 @@ static inline void valgrind_make_blocks_defined ( void ) {
*/
/* Mark block list itself as defined */
- VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
+ VALGRIND_MAKE_MEM_DEFINED ( &heap->blocks, sizeof ( heap->blocks ) );
/* Mark areas accessed by list_check() as defined */
- VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
- sizeof ( free_blocks.prev->next ) );
- VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
- sizeof ( *free_blocks.next ) );
- VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
- sizeof ( free_blocks.next->next->prev ) );
+ VALGRIND_MAKE_MEM_DEFINED ( &heap->blocks.prev->next,
+ sizeof ( heap->blocks.prev->next ) );
+ VALGRIND_MAKE_MEM_DEFINED ( heap->blocks.next,
+ sizeof ( *heap->blocks.next ) );
+ VALGRIND_MAKE_MEM_DEFINED ( &heap->blocks.next->next->prev,
+ sizeof ( heap->blocks.next->next->prev ) );
/* Mark each block in list as defined */
- list_for_each_entry ( block, &free_blocks, list ) {
+ list_for_each_entry ( block, &heap->blocks, list ) {
/* Mark block as defined */
VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
@@ -153,8 +132,9 @@ static inline void valgrind_make_blocks_defined ( void ) {
/**
* Mark all blocks in free list as inaccessible
*
+ * @v heap Heap
*/
-static inline void valgrind_make_blocks_noaccess ( void ) {
+static inline void valgrind_make_blocks_noaccess ( struct heap *heap ) {
struct memory_block *block;
struct memory_block *prev = NULL;
@@ -168,7 +148,7 @@ static inline void valgrind_make_blocks_noaccess ( void ) {
*/
/* Mark each block in list as inaccessible */
- list_for_each_entry ( block, &free_blocks, list ) {
+ list_for_each_entry ( block, &heap->blocks, list ) {
/* Mark previous block (if any) as inaccessible. (Current
* block will be accessed by list_check().)
@@ -181,8 +161,8 @@ static inline void valgrind_make_blocks_noaccess ( void ) {
* accessing the first list item. Temporarily mark
* this area as defined.
*/
- VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
- sizeof ( free_blocks.next->prev ) );
+ VALGRIND_MAKE_MEM_DEFINED ( &heap->blocks.next->prev,
+ sizeof ( heap->blocks.next->prev ));
}
/* Mark last block (if any) as inaccessible */
if ( prev )
@@ -191,32 +171,37 @@ static inline void valgrind_make_blocks_noaccess ( void ) {
/* Mark as inaccessible the area that was temporarily marked
* as defined to avoid errors from list_check().
*/
- VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
- sizeof ( free_blocks.next->prev ) );
+ VALGRIND_MAKE_MEM_NOACCESS ( &heap->blocks.next->prev,
+ sizeof ( heap->blocks.next->prev ) );
/* Mark block list itself as inaccessible */
- VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
+ VALGRIND_MAKE_MEM_NOACCESS ( &heap->blocks, sizeof ( heap->blocks ) );
}
/**
* Check integrity of the blocks in the free list
*
+ * @v heap Heap
*/
-static inline void check_blocks ( void ) {
+static inline void check_blocks ( struct heap *heap ) {
struct memory_block *block;
struct memory_block *prev = NULL;
if ( ! ASSERTING )
return;
- list_for_each_entry ( block, &free_blocks, list ) {
+ list_for_each_entry ( block, &heap->blocks, list ) {
+
+ /* Check alignment */
+ assert ( ( virt_to_phys ( block ) &
+ ( heap->align - 1 ) ) == 0 );
/* Check that list structure is intact */
list_check ( &block->list );
/* Check that block size is not too small */
assert ( block->size >= sizeof ( *block ) );
- assert ( block->size >= MIN_MEMBLOCK_SIZE );
+ assert ( block->size >= heap->align );
/* Check that block does not wrap beyond end of address space */
assert ( ( ( void * ) block + block->size ) >
@@ -237,9 +222,10 @@ static inline void check_blocks ( void ) {
/**
* Discard some cached data
*
+ * @v size Failed allocation size
* @ret discarded Number of cached items discarded
*/
-static unsigned int discard_cache ( void ) {
+static unsigned int discard_cache ( size_t size __unused ) {
struct cache_discarder *discarder;
unsigned int discarded;
@@ -259,13 +245,14 @@ static void discard_all_cache ( void ) {
unsigned int discarded;
do {
- discarded = discard_cache();
+ discarded = discard_cache ( 0 );
} while ( discarded );
}
/**
* Allocate a memory block
*
+ * @v heap Heap
* @v size Requested size
* @v align Physical alignment
* @v offset Offset from physical alignment
@@ -276,28 +263,35 @@ static void discard_all_cache ( void ) {
*
* @c align must be a power of two. @c size may not be zero.
*/
-void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
+static void * heap_alloc_block ( struct heap *heap, size_t size, size_t align,
+ size_t offset ) {
struct memory_block *block;
+ size_t actual_offset;
size_t align_mask;
size_t actual_size;
size_t pre_size;
size_t post_size;
struct memory_block *pre;
struct memory_block *post;
- unsigned int discarded;
+ unsigned int grown;
void *ptr;
/* Sanity checks */
assert ( size != 0 );
assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
- valgrind_make_blocks_defined();
- check_blocks();
+ valgrind_make_blocks_defined ( heap );
+ check_blocks ( heap );
- /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
- * calculate alignment mask.
- */
- actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
- ~( MIN_MEMBLOCK_SIZE - 1 ) );
+ /* Limit offset to requested alignment */
+ offset &= ( align ? ( align - 1 ) : 0 );
+
+ /* Calculate offset of memory block */
+ actual_offset = ( offset & ~( heap->align - 1 ) );
+ assert ( actual_offset <= offset );
+
+ /* Calculate size of memory block */
+ actual_size = ( ( size + offset - actual_offset + heap->align - 1 )
+ & ~( heap->align - 1 ) );
if ( ! actual_size ) {
/* The requested size is not permitted to be zero. A
* zero result at this point indicates that either the
@@ -308,14 +302,16 @@ void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
goto done;
}
assert ( actual_size >= size );
- align_mask = ( ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 ) );
- DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
+ /* Calculate alignment mask */
+ align_mask = ( ( align - 1 ) | ( heap->align - 1 ) );
+
+ DBGC2 ( heap, "HEAP allocating %#zx (aligned %#zx+%#zx)\n",
size, align, offset );
while ( 1 ) {
/* Search through blocks for the first one with enough space */
- list_for_each_entry ( block, &free_blocks, list ) {
- pre_size = ( ( offset - virt_to_phys ( block ) )
+ list_for_each_entry ( block, &heap->blocks, list ) {
+ pre_size = ( ( actual_offset - virt_to_phys ( block ) )
& align_mask );
if ( ( block->size < pre_size ) ||
( ( block->size - pre_size ) < actual_size ) )
@@ -329,15 +325,17 @@ void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
pre = block;
block = ( ( ( void * ) pre ) + pre_size );
post = ( ( ( void * ) block ) + actual_size );
- DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
+ DBGC2 ( heap, "HEAP splitting [%p,%p) -> [%p,%p) "
+ "+ [%p,%p)\n", pre,
( ( ( void * ) pre ) + pre->size ), pre, block,
post, ( ( ( void * ) pre ) + pre->size ) );
/* If there is a "post" block, add it in to
- * the free list. Leak it if it is too small
- * (which can happen only at the very end of
- * the heap).
+ * the free list.
*/
- if ( post_size >= MIN_MEMBLOCK_SIZE ) {
+ if ( post_size ) {
+ assert ( post_size >= sizeof ( *block ) );
+ assert ( ( post_size &
+ ( heap->align - 1 ) ) == 0 );
VALGRIND_MAKE_MEM_UNDEFINED ( post,
sizeof ( *post ));
post->size = post_size;
@@ -349,38 +347,42 @@ void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
*/
pre->size = pre_size;
/* If there is no "pre" block, remove it from
- * the list. Also remove it (i.e. leak it) if
- * it is too small, which can happen only at
- * the very start of the heap.
+ * the list.
*/
- if ( pre_size < MIN_MEMBLOCK_SIZE ) {
+ if ( ! pre_size ) {
list_del ( &pre->list );
VALGRIND_MAKE_MEM_NOACCESS ( pre,
sizeof ( *pre ) );
+ } else {
+ assert ( pre_size >= sizeof ( *block ) );
+ assert ( ( pre_size &
+ ( heap->align - 1 ) ) == 0 );
}
/* Update memory usage statistics */
- freemem -= actual_size;
- usedmem += actual_size;
- if ( usedmem > maxusedmem )
- maxusedmem = usedmem;
+ heap->freemem -= actual_size;
+ heap->usedmem += actual_size;
+ if ( heap->usedmem > heap->maxusedmem )
+ heap->maxusedmem = heap->usedmem;
/* Return allocated block */
- DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
- ( ( ( void * ) block ) + size ) );
- ptr = block;
+ ptr = ( ( ( void * ) block ) + offset - actual_offset );
+ DBGC2 ( heap, "HEAP allocated [%p,%p) within "
+ "[%p,%p)\n", ptr, ( ptr + size ), block,
+ ( ( ( void * ) block ) + actual_size ) );
VALGRIND_MAKE_MEM_UNDEFINED ( ptr, size );
goto done;
}
- /* Try discarding some cached data to free up memory */
- DBGC ( &heap, "Attempting discard for %#zx (aligned %#zx+%zx), "
- "used %zdkB\n", size, align, offset, ( usedmem >> 10 ) );
- valgrind_make_blocks_noaccess();
- discarded = discard_cache();
- valgrind_make_blocks_defined();
- check_blocks();
- if ( ! discarded ) {
- /* Nothing available to discard */
- DBGC ( &heap, "Failed to allocate %#zx (aligned "
+ /* Attempt to grow heap to satisfy allocation */
+ DBGC ( heap, "HEAP attempting to grow for %#zx (aligned "
+ "%#zx+%zx), used %zdkB\n", size, align, offset,
+ ( heap->usedmem >> 10 ) );
+ valgrind_make_blocks_noaccess ( heap );
+ grown = ( heap->grow ? heap->grow ( actual_size ) : 0 );
+ valgrind_make_blocks_defined ( heap );
+ check_blocks ( heap );
+ if ( ! grown ) {
+ /* Heap did not grow: fail allocation */
+ DBGC ( heap, "HEAP failed to allocate %#zx (aligned "
"%#zx)\n", size, align );
ptr = NULL;
goto done;
@@ -388,23 +390,25 @@ void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
}
done:
- check_blocks();
- valgrind_make_blocks_noaccess();
+ check_blocks ( heap );
+ valgrind_make_blocks_noaccess ( heap );
return ptr;
}
/**
* Free a memory block
*
- * @v ptr Memory allocated by alloc_memblock(), or NULL
+ * @v heap Heap
+ * @v ptr Memory allocated by heap_alloc_block(), or NULL
* @v size Size of the memory
*
* If @c ptr is NULL, no action is taken.
*/
-void free_memblock ( void *ptr, size_t size ) {
+static void heap_free_block ( struct heap *heap, void *ptr, size_t size ) {
struct memory_block *freeing;
struct memory_block *block;
struct memory_block *tmp;
+ size_t sub_offset;
size_t actual_size;
ssize_t gap_before;
ssize_t gap_after = -1;
@@ -415,29 +419,31 @@ void free_memblock ( void *ptr, size_t size ) {
VALGRIND_MAKE_MEM_NOACCESS ( ptr, size );
/* Sanity checks */
- valgrind_make_blocks_defined();
- check_blocks();
+ valgrind_make_blocks_defined ( heap );
+ check_blocks ( heap );
- /* Round up size to match actual size that alloc_memblock()
- * would have used.
+ /* Round up to match actual block that heap_alloc_block() would
+ * have allocated.
*/
assert ( size != 0 );
- actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
- ~( MIN_MEMBLOCK_SIZE - 1 ) );
- freeing = ptr;
+ sub_offset = ( virt_to_phys ( ptr ) & ( heap->align - 1 ) );
+ freeing = ( ptr - sub_offset );
+ actual_size = ( ( size + sub_offset + heap->align - 1 ) &
+ ~( heap->align - 1 ) );
+ DBGC2 ( heap, "HEAP freeing [%p,%p) within [%p,%p)\n",
+ ptr, ( ptr + size ), freeing,
+ ( ( ( void * ) freeing ) + actual_size ) );
VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
- DBGC2 ( &heap, "Freeing [%p,%p)\n",
- freeing, ( ( ( void * ) freeing ) + size ) );
/* Check that this block does not overlap the free list */
if ( ASSERTING ) {
- list_for_each_entry ( block, &free_blocks, list ) {
+ list_for_each_entry ( block, &heap->blocks, list ) {
if ( ( ( ( void * ) block ) <
( ( void * ) freeing + actual_size ) ) &&
( ( void * ) freeing <
( ( void * ) block + block->size ) ) ) {
assert ( 0 );
- DBGC ( &heap, "Double free of [%p,%p) "
+ DBGC ( heap, "HEAP double free of [%p,%p) "
"overlapping [%p,%p) detected from %p\n",
freeing,
( ( ( void * ) freeing ) + size ), block,
@@ -449,7 +455,7 @@ void free_memblock ( void *ptr, size_t size ) {
/* Insert/merge into free list */
freeing->size = actual_size;
- list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
+ list_for_each_entry_safe ( block, tmp, &heap->blocks, list ) {
/* Calculate gaps before and after the "freeing" block */
gap_before = ( ( ( void * ) freeing ) -
( ( ( void * ) block ) + block->size ) );
@@ -457,7 +463,8 @@ void free_memblock ( void *ptr, size_t size ) {
( ( ( void * ) freeing ) + freeing->size ) );
/* Merge with immediately preceding block, if possible */
if ( gap_before == 0 ) {
- DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
+ DBGC2 ( heap, "HEAP merging [%p,%p) + [%p,%p) -> "
+ "[%p,%p)\n", block,
( ( ( void * ) block ) + block->size ), freeing,
( ( ( void * ) freeing ) + freeing->size ),
block,
@@ -477,13 +484,13 @@ void free_memblock ( void *ptr, size_t size ) {
* possible, merge the following block into the "freeing"
* block.
*/
- DBGC2 ( &heap, "[%p,%p)\n",
+ DBGC2 ( heap, "HEAP freed [%p,%p)\n",
freeing, ( ( ( void * ) freeing ) + freeing->size ) );
list_add_tail ( &freeing->list, &block->list );
if ( gap_after == 0 ) {
- DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
- ( ( ( void * ) freeing ) + freeing->size ), block,
- ( ( ( void * ) block ) + block->size ), freeing,
+ DBGC2 ( heap, "HEAP merging [%p,%p) + [%p,%p) -> [%p,%p)\n",
+ freeing, ( ( ( void * ) freeing ) + freeing->size ),
+ block, ( ( ( void * ) block ) + block->size ), freeing,
( ( ( void * ) block ) + block->size ) );
freeing->size += block->size;
list_del ( &block->list );
@@ -491,17 +498,26 @@ void free_memblock ( void *ptr, size_t size ) {
}
/* Update memory usage statistics */
- freemem += actual_size;
- usedmem -= actual_size;
+ heap->freemem += actual_size;
+ heap->usedmem -= actual_size;
+
+ /* Allow heap to shrink */
+ if ( heap->shrink && heap->shrink ( freeing, freeing->size ) ) {
+ list_del ( &freeing->list );
+ heap->freemem -= freeing->size;
+ VALGRIND_MAKE_MEM_UNDEFINED ( freeing, freeing->size );
+ }
- check_blocks();
- valgrind_make_blocks_noaccess();
+ /* Sanity checks */
+ check_blocks ( heap );
+ valgrind_make_blocks_noaccess ( heap );
}
/**
* Reallocate memory
*
- * @v old_ptr Memory previously allocated by malloc(), or NULL
+ * @v heap Heap
+ * @v old_ptr Memory previously allocated by heap_realloc(), or NULL
* @v new_size Requested size
* @ret new_ptr Allocated memory, or NULL
*
@@ -515,26 +531,27 @@ void free_memblock ( void *ptr, size_t size ) {
* If allocation fails the previously allocated block is left
* untouched and NULL is returned.
*
- * Calling realloc() with a new size of zero is a valid way to free a
- * memory block.
+ * Calling heap_realloc() with a new size of zero is a valid way to
+ * free a memory block.
*/
-void * realloc ( void *old_ptr, size_t new_size ) {
+void * heap_realloc ( struct heap *heap, void *old_ptr, size_t new_size ) {
struct autosized_block *old_block;
struct autosized_block *new_block;
size_t old_total_size;
size_t new_total_size;
size_t old_size;
+ size_t offset = offsetof ( struct autosized_block, data );
void *new_ptr = NOWHERE;
/* Allocate new memory if necessary. If allocation fails,
* return without touching the old block.
*/
if ( new_size ) {
- new_total_size = ( new_size +
- offsetof ( struct autosized_block, data ) );
+ new_total_size = ( new_size + offset );
if ( new_total_size < new_size )
return NULL;
- new_block = alloc_memblock ( new_total_size, 1, 0 );
+ new_block = heap_alloc_block ( heap, new_total_size,
+ heap->ptr_align, -offset );
if ( ! new_block )
return NULL;
new_block->size = new_total_size;
@@ -542,8 +559,10 @@ void * realloc ( void *old_ptr, size_t new_size ) {
sizeof ( new_block->size ) );
new_ptr = &new_block->data;
VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
+ assert ( ( ( ( intptr_t ) new_ptr ) &
+ ( heap->ptr_align - 1 ) ) == 0 );
}
-
+
/* Copy across relevant part of the old data region (if any),
* then free it. Note that at this point either (a) new_ptr
* is valid, or (b) new_size is 0; either way, the memcpy() is
@@ -556,21 +575,40 @@ void * realloc ( void *old_ptr, size_t new_size ) {
sizeof ( old_block->size ) );
old_total_size = old_block->size;
assert ( old_total_size != 0 );
- old_size = ( old_total_size -
- offsetof ( struct autosized_block, data ) );
+ old_size = ( old_total_size - offset );
memcpy ( new_ptr, old_ptr,
( ( old_size < new_size ) ? old_size : new_size ) );
VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
- free_memblock ( old_block, old_total_size );
+ heap_free_block ( heap, old_block, old_total_size );
}
if ( ASSERTED ) {
- DBGC ( &heap, "Possible memory corruption detected from %p\n",
- __builtin_return_address ( 0 ) );
+ DBGC ( heap, "HEAP detected possible memory corruption "
+ "from %p\n", __builtin_return_address ( 0 ) );
}
return new_ptr;
}
+/** The global heap */
+static struct heap heap = {
+ .blocks = LIST_HEAD_INIT ( heap.blocks ),
+ .align = MIN_MEMBLOCK_ALIGN,
+ .ptr_align = sizeof ( void * ),
+ .grow = discard_cache,
+};
+
+/**
+ * Reallocate memory
+ *
+ * @v old_ptr Memory previously allocated by malloc(), or NULL
+ * @v new_size Requested size
+ * @ret new_ptr Allocated memory, or NULL
+ */
+void * realloc ( void *old_ptr, size_t new_size ) {
+
+ return heap_realloc ( &heap, old_ptr, new_size );
+}
+
/**
* Allocate memory
*
@@ -585,8 +623,8 @@ void * malloc ( size_t size ) {
ptr = realloc ( NULL, size );
if ( ASSERTED ) {
- DBGC ( &heap, "Possible memory corruption detected from %p\n",
- __builtin_return_address ( 0 ) );
+ DBGC ( &heap, "HEAP detected possible memory corruption "
+ "from %p\n", __builtin_return_address ( 0 ) );
}
return ptr;
}
@@ -605,8 +643,8 @@ void free ( void *ptr ) {
realloc ( ptr, 0 );
if ( ASSERTED ) {
- DBGC ( &heap, "Possible memory corruption detected from %p\n",
- __builtin_return_address ( 0 ) );
+ DBGC ( &heap, "HEAP detected possible memory corruption "
+ "from %p\n", __builtin_return_address ( 0 ) );
}
}
@@ -628,35 +666,87 @@ void * zalloc ( size_t size ) {
if ( data )
memset ( data, 0, size );
if ( ASSERTED ) {
- DBGC ( &heap, "Possible memory corruption detected from %p\n",
- __builtin_return_address ( 0 ) );
+ DBGC ( &heap, "HEAP detected possible memory corruption "
+ "from %p\n", __builtin_return_address ( 0 ) );
}
return data;
}
/**
+ * Allocate memory with specified physical alignment and offset
+ *
+ * @v size Requested size
+ * @v align Physical alignment
+ * @v offset Offset from physical alignment
+ * @ret ptr Memory, or NULL
+ *
+ * @c align must be a power of two. @c size may not be zero.
+ */
+void * malloc_phys_offset ( size_t size, size_t phys_align, size_t offset ) {
+ void * ptr;
+
+ ptr = heap_alloc_block ( &heap, size, phys_align, offset );
+ if ( ptr && size ) {
+ assert ( ( phys_align == 0 ) ||
+ ( ( ( virt_to_phys ( ptr ) ^ offset ) &
+ ( phys_align - 1 ) ) == 0 ) );
+ VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 );
+ }
+ return ptr;
+}
+
+/**
+ * Allocate memory with specified physical alignment
+ *
+ * @v size Requested size
+ * @v align Physical alignment
+ * @ret ptr Memory, or NULL
+ *
+ * @c align must be a power of two. @c size may not be zero.
+ */
+void * malloc_phys ( size_t size, size_t phys_align ) {
+
+ return malloc_phys_offset ( size, phys_align, 0 );
+}
+
+/**
+ * Free memory allocated with malloc_phys()
+ *
+ * @v ptr Memory allocated by malloc_phys(), or NULL
+ * @v size Size of memory, as passed to malloc_phys()
+ *
+ * Memory allocated with malloc_phys() can only be freed with
+ * free_phys(); it cannot be freed with the standard free().
+ *
+ * If @c ptr is NULL, no action is taken.
+ */
+void free_phys ( void *ptr, size_t size ) {
+
+ VALGRIND_FREELIKE_BLOCK ( ptr, 0 );
+ heap_free_block ( &heap, ptr, size );
+}
+
+/**
* Add memory to allocation pool
*
+ * @v heap Heap
* @v start Start address
- * @v end End address
+ * @v len Length of memory
*
- * Adds a block of memory [start,end) to the allocation pool. This is
- * a one-way operation; there is no way to reclaim this memory.
- *
- * @c start must be aligned to at least a multiple of sizeof(void*).
+ * Adds a block of memory to the allocation pool. The memory must be
+ * aligned to the heap's required free memory block alignment.
*/
-void mpopulate ( void *start, size_t len ) {
+void heap_populate ( struct heap *heap, void *start, size_t len ) {
- /* Prevent free_memblock() from rounding up len beyond the end
- * of what we were actually given...
- */
- len &= ~( MIN_MEMBLOCK_SIZE - 1 );
+ /* Sanity checks */
+ assert ( ( virt_to_phys ( start ) & ( heap->align - 1 ) ) == 0 );
+ assert ( ( len & ( heap->align - 1 ) ) == 0 );
/* Add to allocation pool */
- free_memblock ( start, len );
+ heap_free_block ( heap, start, len );
/* Fix up memory usage statistics */
- usedmem += len;
+ heap->usedmem += len;
}
/**
@@ -664,13 +754,19 @@ void mpopulate ( void *start, size_t len ) {
*
*/
static void init_heap ( void ) {
- VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
- VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
- mpopulate ( heap, sizeof ( heap ) );
+
+ /* Sanity check */
+ build_assert ( MIN_MEMBLOCK_ALIGN >= sizeof ( struct memory_block ) );
+
+ /* Populate heap */
+ VALGRIND_MAKE_MEM_NOACCESS ( heap_area, sizeof ( heap_area ) );
+ VALGRIND_MAKE_MEM_NOACCESS ( &heap.blocks, sizeof ( heap.blocks ) );
+ heap_populate ( &heap, heap_area, sizeof ( heap_area ) );
}
/** Memory allocator initialisation function */
struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
+ .name = "heap",
.initialise = init_heap,
};
@@ -680,7 +776,8 @@ struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
*/
static void shutdown_cache ( int booting __unused ) {
discard_all_cache();
- DBGC ( &heap, "Maximum heap usage %zdkB\n", ( maxusedmem >> 10 ) );
+ DBGC ( &heap, "HEAP maximum usage %zdkB\n",
+ ( heap.maxusedmem >> 10 ) );
}
/** Memory allocator shutdown function */
@@ -689,19 +786,17 @@ struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
.shutdown = shutdown_cache,
};
-#if 0
-#include <stdio.h>
/**
- * Dump free block list
+ * Dump free block list (for debugging)
*
*/
-void mdumpfree ( void ) {
+void heap_dump ( struct heap *heap ) {
struct memory_block *block;
- printf ( "Free block list:\n" );
- list_for_each_entry ( block, &free_blocks, list ) {
- printf ( "[%p,%p] (size %#zx)\n", block,
- ( ( ( void * ) block ) + block->size ), block->size );
+ dbg_printf ( "HEAP free block list:\n" );
+ list_for_each_entry ( block, &heap->blocks, list ) {
+ dbg_printf ( "...[%p,%p] (size %#zx)\n", block,
+ ( ( ( void * ) block ) + block->size ),
+ block->size );
}
}
-#endif