summaryrefslogtreecommitdiffstats
path: root/src/core/malloc.c
diff options
context:
space:
mode:
authorMichael Brown2006-04-25 05:30:46 +0200
committerMichael Brown2006-04-25 05:30:46 +0200
commitb601a7d355a4c24563f4f2db40ba52ae183ce493 (patch)
tree84525e5d35329776f8a4c90563ae4e435c001d8f /src/core/malloc.c
parentAdd __constant_flsl(), because it's useful for finding out the next (diff)
downloadipxe-b601a7d355a4c24563f4f2db40ba52ae183ce493.tar.gz
ipxe-b601a7d355a4c24563f4f2db40ba52ae183ce493.tar.xz
ipxe-b601a7d355a4c24563f4f2db40ba52ae183ce493.zip
Updated memory allocator to improve support for unaligned or partially
aligned blocks. Moved header to include/malloc.h, since we now also provide the POSIX-like malloc()/free() pair. Not yet tested.
Diffstat (limited to 'src/core/malloc.c')
-rw-r--r--src/core/malloc.c276
1 files changed, 158 insertions, 118 deletions
diff --git a/src/core/malloc.c b/src/core/malloc.c
index 8078406e..d086cc1e 100644
--- a/src/core/malloc.c
+++ b/src/core/malloc.c
@@ -16,189 +16,228 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <stddef.h>
#include <stdint.h>
#include <string.h>
+#include <strings.h>
#include <io.h>
#include <gpxe/list.h>
-#include <gpxe/malloc.h>
+#include <malloc.h>
/** @file
*
- * Memory allocation
+ * Dynamic memory allocation
*
*/
/** A free block of memory */
-struct free_block {
+struct memory_block {
/** List of free blocks */
struct list_head list;
/** Size of this block */
size_t size;
};
+#define MIN_MEMBLOCK_SIZE \
+ ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
+
+/** A block of allocated memory complete with size information */
+struct autosized_block {
+ /** Size of this block */
+ size_t size;
+ /** Remaining data */
+ char data[0];
+};
+
/** List of free memory blocks */
static LIST_HEAD ( free_blocks );
/**
- * Round size up to a memory allocation block size
+ * Allocate a memory block
+ *
+ * @v size Requested size
+ * @v align Physical alignment
+ * @ret ptr Memory block, or NULL
*
- * @v requested Requested size
- * @ret obtained Obtained size
+ * Allocates a memory block @b physically aligned as requested. No
+ * guarantees are provided for the alignment of the virtual address.
*
- * The requested size is rounded up to the minimum allocation block
- * size (the size of a struct @c free_block) and then rounded up to
- * the nearest power of two.
+ * @c align must be a power of two. @c size may not be zero.
*/
-static size_t block_size ( size_t requested ) {
- size_t obtained = 1;
-
- while ( ( obtained < sizeof ( struct free_block ) ) ||
- ( obtained < requested ) ) {
- obtained <<= 1;
+void * alloc_memblock ( size_t size, size_t align ) {
+ struct memory_block *block;
+ size_t pre_size;
+ ssize_t post_size;
+ struct memory_block *pre;
+ struct memory_block *post;
+
+ /* Round up alignment and size to multiples of MIN_MEMBLOCK_SIZE */
+ align = ( align + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
+ size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
+
+ /* Search through blocks for the first one with enough space */
+ list_for_each_entry ( block, &free_blocks, list ) {
+ pre_size = ( - virt_to_phys ( block ) ) & ( align - 1 );
+ post_size = block->size - pre_size - size;
+ if ( post_size >= 0 ) {
+ /* Split block into pre-block, block, and
+ * post-block. After this split, the "pre"
+ * block is the one currently linked into the
+ * free list.
+ */
+ pre = block;
+ block = ( ( ( void * ) pre ) + pre_size );
+ post = ( ( ( void * ) block ) + size );
+ /* If there is a "post" block, add it in to
+ * the free list. Leak it if it is too small
+ * (which can happen only at the very end of
+ * the heap).
+ */
+ if ( ( size_t ) post_size > MIN_MEMBLOCK_SIZE ) {
+ post->size = post_size;
+ list_add ( &post->list, &pre->list );
+ }
+ /* Shrink "pre" block, leaving the main block
+ * isolated and no longer part of the free
+ * list.
+ */
+ pre->size = pre_size;
+ /* If there is no "pre" block, remove it from
+ * the list. Also remove it (i.e. leak it) if
+ * it is too small, which can happen only at
+ * the very start of the heap.
+ */
+ if ( pre_size < MIN_MEMBLOCK_SIZE )
+ list_del ( &pre->list );
+ /* Zero allocated memory, for calloc() */
+ memset ( block, 0, size );
+ return block;
+ }
}
- return obtained;
+ return NULL;
}
/**
- * Allocate memory
+ * Free a memory block
*
- * @v size Requested size
- * @ret ptr Allocated memory
- *
- * gmalloc() will always allocate memory in power-of-two sized blocks,
- * aligned to the corresponding power-of-two boundary. For example, a
- * request for 1500 bytes will return a 2048-byte block aligned to a
- * 2048-byte boundary.
- *
- * The alignment applies to the physical address, not the virtual
- * address. The pointer value returned by gmalloc() therefore has no
- * alignment guarantees, except as provided for by the
- * virtual-to-physical mapping. (In a PXE environment, this mapping
- * is guaranteed to be a multiple of 16 bytes.)
- *
- * Unlike traditional malloc(), the caller must remember the size of
- * the allocated block and pass the size to gfree(). This is done in
- * order to allow efficient allocation of power-of-two sized and
- * aligned blocks.
+ * @v ptr Memory allocated by alloc_memblock(), or NULL
+ * @v size Size of the memory
+ *
+ * If @c ptr is NULL, no action is taken.
*/
-void * gmalloc ( size_t size ) {
- struct free_block *block;
- struct free_block *buddy;
+void free_memblock ( void *ptr, size_t size ) {
+ struct memory_block *freeing;
+ struct memory_block *block;
+ ssize_t gap_before;
+ ssize_t gap_after;
+
+ /* Allow for ptr==NULL */
+ if ( ! ptr )
+ return;
- /* Round up block size to power of two */
- size = block_size ( size );
+ /* Round up size to match actual size that alloc_memblock()
+ * would have used.
+ */
+ size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
+ freeing = ptr;
+ freeing->size = size;
- /* Find the best available block */
+ /* Insert/merge into free list */
list_for_each_entry ( block, &free_blocks, list ) {
- if ( block->size == size ) {
+ /* Calculate gaps before and after the "freeing" block */
+ gap_before = ( ( ( void * ) freeing ) -
+ ( ( ( void * ) block ) + block->size ) );
+ gap_after = ( ( ( void * ) block ) -
+ ( ( ( void * ) freeing ) + freeing->size ) );
+ /* Merge with immediately preceding block, if possible */
+ if ( gap_before == 0 ) {
+ block->size += size;
list_del ( &block->list );
- memset ( block, 0, size );
- return block;
+ freeing = block;
}
- while ( block->size > size ) {
- block->size >>= 1;
- buddy = ( ( ( void * ) block ) + block->size );
- buddy->size = block->size;
- list_add ( &buddy->list, &block->list );
+ /* Insert before the immediately following block. If
+ * possible, merge the following block into the
+ * "freeing" block.
+ */
+ if ( gap_after >= 0 ) {
+ list_add_tail ( &freeing->list, &block->list );
+ if ( gap_after == 0 ) {
+ freeing->size += block->size;
+ list_del ( &block->list );
+ }
+ break;
}
}
-
- /* Nothing available */
- return NULL;
}
/**
- * Free memory
- *
- * @v ptr Allocated memory
- * @v size Originally requested size
+ * Allocate memory
*
- * Frees memory originally allocated by gmalloc().
+ * @v size Requested size
+ * @ret ptr Memory, or NULL
*
- * Calling gfree() with a NULL @c ptr is explicitly allowed, and
- * defined to have no effect. Code such as
+ * Allocates memory with no particular alignment requirement. @c ptr
+ * will be aligned to at least a multiple of sizeof(void*).
*
- * @code
+ * Note that malloc() is very inefficient for allocating blocks where
+ * the size is a power of two; if you have many of these
+ * (e.g. descriptor rings, data buffers) you should use malloc_dma()
+ * instead.
+ */
+void * malloc ( size_t size ) {
+ size_t total_size;
+ struct autosized_block *block;
+
+ total_size = size + offsetof ( struct autosized_block, data );
+ block = alloc_memblock ( total_size, 1 );
+ if ( ! block )
+ return NULL;
+ block->size = size;
+ return &block->data;
+}
+
+/**
+ * Free memory
*
- * if ( ! my_ptr )
- * gfree ( my_ptr, my_size )
+ * @v size Memory allocated by malloc(), or NULL
*
- * @endcode
+ * Memory allocated with malloc_dma() cannot be freed with free(); it
+ * must be freed with free_dma() instead.
*
- * is perfectly valid, but should be avoided as unnecessary bloat.
+ * If @c ptr is NULL, no action is taken.
*/
-void gfree ( void *ptr, size_t size ) {
- struct free_block *freed_block = ptr;
- struct free_block *block;
-
- /* Cope with gfree(NULL,x) */
- if ( ! ptr )
- return;
+void free ( void *ptr ) {
+ struct autosized_block *block;
- /* Round up block size to power of two */
- size = block_size ( size );
- freed_block->size = size;
-
- /* Merge back into free list */
- list_for_each_entry ( block, &free_blocks, list ) {
- if ( ( ( virt_to_phys ( block ) ^
- virt_to_phys ( freed_block ) ) == size ) &&
- ( block->size == size ) ) {
- list_del ( &block->list );
- size <<= 1;
- if ( block < freed_block )
- freed_block = block;
- freed_block->size = size;
- } else if ( block->size > size ) {
- break;
- }
+ if ( ptr ) {
+ block = container_of ( ptr, struct autosized_block, data );
+ free_memblock ( block, block->size );
}
- list_add_tail ( &freed_block->list, &block->list );
}
/**
* Add memory to allocation pool
*
* @v start Start address
- * @v len Length
+ * @v end End address
*
- * Adds a block of memory to the allocation pool. This is a one-way
- * operation; there is no way to reclaim this memory.
+ * Adds a block of memory [start,end) to the allocation pool. This is
+ * a one-way operation; there is no way to reclaim this memory.
*
- * There are no alignment requirements on either start or len.
+ * @c start must be aligned to at least a multiple of sizeof(void*).
*/
-void gmpopulate ( void *start, size_t len ) {
- size_t frag_len;
-
- /* Split region into power-of-two sized and aligned blocks,
- * and feed them to gfree().
- */
- while ( len ) {
- frag_len = 1;
- /* Find maximum allowed alignment for this address */
- while ( ( virt_to_phys ( start ) & frag_len ) == 0 ) {
- frag_len <<= 1;
- }
- /* Find maximum block size that fits in remaining space */
- while ( frag_len > len ) {
- frag_len >>= 1;
- }
- /* Skip blocks that are too small */
- if ( frag_len >= sizeof ( struct free_block ) )
- gfree ( start, frag_len );
- start += frag_len;
- len -= frag_len;
- }
+void mpopulate ( void *start, size_t len ) {
+ free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
}
-#if 0
+#if 1
#include <vsprintf.h>
/**
* Dump free block list
*
*/
-void gdumpfree ( void ) {
- struct free_block *block;
+void mdumpfree ( void ) {
+ struct memory_block *block;
printf ( "Free block list:\n" );
list_for_each_entry ( block, &free_blocks, list ) {
@@ -207,3 +246,4 @@ void gdumpfree ( void ) {
}
}
#endif
+