diff --git a/components/heap/heap_tlsf.c b/components/heap/heap_tlsf.c index 8251796339..69a105a437 100644 --- a/components/heap/heap_tlsf.c +++ b/components/heap/heap_tlsf.c @@ -90,12 +90,6 @@ tlsf_static_assert(sizeof(int) * CHAR_BIT == 32); tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32); tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64); -/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ -tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT); - -/* Ensure we've properly tuned our sizes. */ -tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT); - static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align) { tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); @@ -120,7 +114,7 @@ static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr ** Adjust an allocation size to be aligned to word size, and no smaller ** than internal minimum. */ -static inline __attribute__((__always_inline__)) size_t adjust_request_size(size_t size, size_t align) +static inline __attribute__((__always_inline__)) size_t adjust_request_size(tlsf_t tlsf, size_t size, size_t align) { size_t adjust = 0; if (size) @@ -128,7 +122,7 @@ static inline __attribute__((__always_inline__)) size_t adjust_request_size(size const size_t aligned = align_up(size, align); /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */ - if (aligned < block_size_max) + if (aligned < tlsf_block_size_max(tlsf)) { adjust = tlsf_max(aligned, block_size_min); } @@ -141,10 +135,10 @@ static inline __attribute__((__always_inline__)) size_t adjust_request_size(size ** the documentation found in the white paper. */ -static inline __attribute__((__always_inline__)) void mapping_insert(size_t size, int* fli, int* sli) +static inline __attribute__((__always_inline__)) void mapping_insert(control_t *control, size_t size, int* fli, int* sli) { int fl, sl; - if (size < SMALL_BLOCK_SIZE) + if (size < control->small_block_size) { /* Store small blocks in first list. */ fl = 0; @@ -153,22 +147,22 @@ static inline __attribute__((__always_inline__)) void mapping_insert(size_t size else { fl = tlsf_fls(size); - sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2); - fl -= (FL_INDEX_SHIFT - 1); + sl = tlsf_cast(int, size >> (fl - control->sl_index_count_log2)) ^ (1 << control->sl_index_count_log2); + fl -= (control->fl_index_shift - 1); } *fli = fl; *sli = sl; } /* This version rounds up to the next block size (for allocations) */ -static inline __attribute__((__always_inline__)) void mapping_search(size_t size, int* fli, int* sli) +static inline __attribute__((__always_inline__)) void mapping_search(control_t *control, size_t size, int* fli, int* sli) { - if (size >= SMALL_BLOCK_SIZE) + if (size >= control->small_block_size) { - const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1; + const size_t round = (1 << (tlsf_fls(size) - control->sl_index_count_log2)) - 1; size += round; } - mapping_insert(size, fli, sli); + mapping_insert(control, size, fli, sli); } static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli) @@ -200,7 +194,7 @@ static inline __attribute__((__always_inline__)) block_header_t* search_suitable *sli = sl; /* Return the first block in the free list. */ - return control->blocks[fl][sl]; + return control->blocks[fl*control->sl_index_count + sl]; } /* Remove a free block from the free list.*/ @@ -214,9 +208,9 @@ static inline __attribute__((__always_inline__)) void remove_free_block(control_ prev->next_free = next; /* If this block is the head of the free list, set new head. */ - if (control->blocks[fl][sl] == block) + if (control->blocks[fl*control->sl_index_count + sl] == block) { - control->blocks[fl][sl] = next; + control->blocks[fl*control->sl_index_count + sl] = next; /* If the new head is null, clear the bitmap. */ if (next == &control->block_null) @@ -235,7 +229,7 @@ static inline __attribute__((__always_inline__)) void remove_free_block(control_ /* Insert a free block into the free block list. */ static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl) { - block_header_t* current = control->blocks[fl][sl]; + block_header_t* current = control->blocks[fl*control->sl_index_count + sl]; tlsf_assert(current && "free list cannot have a null entry"); tlsf_assert(block && "cannot insert a null entry into the free list"); block->next_free = current; @@ -248,7 +242,7 @@ static inline __attribute__((__always_inline__)) void insert_free_block(control_ ** Insert the new block at the head of the list, and mark the first- ** and second-level bitmaps appropriately. */ - control->blocks[fl][sl] = block; + control->blocks[fl*control->sl_index_count + sl] = block; control->fl_bitmap |= (1 << fl); control->sl_bitmap[fl] |= (1 << sl); } @@ -257,7 +251,7 @@ static inline __attribute__((__always_inline__)) void insert_free_block(control_ static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block) { int fl, sl; - mapping_insert(block_size(block), &fl, &sl); + mapping_insert(control, block_size(block), &fl, &sl); remove_free_block(control, block, fl, sl); } @@ -265,7 +259,7 @@ static inline __attribute__((__always_inline__)) void block_remove(control_t* co static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block) { int fl, sl; - mapping_insert(block_size(block), &fl, &sl); + mapping_insert(control, block_size(block), &fl, &sl); insert_free_block(control, block, fl, sl); } @@ -428,7 +422,7 @@ static inline __attribute__((__always_inline__)) block_header_t* block_locate_f if (size) { - mapping_search(size, &fl, &sl); + mapping_search(control, size, &fl, &sl); /* ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up @@ -436,7 +430,7 @@ static inline __attribute__((__always_inline__)) block_header_t* block_locate_f ** So, we protect against that here, since this is the only callsite of mapping_search. ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range. */ - if (fl < FL_INDEX_COUNT) + if (fl < control->fl_index_count) { block = search_suitable_block(control, &fl, &sl); } @@ -465,20 +459,44 @@ static inline __attribute__((__always_inline__)) void* block_prepare_used(contro } /* Clear structure and point all empty lists at the null block. */ -static void control_construct(control_t* control) +static void control_construct(control_t* control, size_t bytes) { int i, j; control->block_null.next_free = &control->block_null; control->block_null.prev_free = &control->block_null; + /* find the closest ^2 for first layer */ + i = (bytes - 1) / (16 * 1024); + control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i); + + /* adapt second layer to the pool */ + if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3; + else if (bytes <= 256 * 1024) control->sl_index_count_log2 = 4; + else control->sl_index_count_log2 = 5; + + control->fl_index_shift = (control->sl_index_count_log2 + ALIGN_SIZE_LOG2); + control->sl_index_count = 1 << control->sl_index_count_log2; + control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1; + control->small_block_size = 1 << control->fl_index_shift; control->fl_bitmap = 0; - for (i = 0; i < FL_INDEX_COUNT; ++i) + + control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap)); + control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks)); + control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control; + + /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ + tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count"); + + /* Ensure we've properly tuned our sizes. */ + tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match"); + + for (i = 0; i < control->fl_index_count; ++i) { control->sl_bitmap[i] = 0; - for (j = 0; j < SL_INDEX_COUNT; ++j) + for (j = 0; j < control->sl_index_count; ++j) { - control->blocks[i][j] = &control->block_null; + control->blocks[i*control->sl_index_count + j] = &control->block_null; } } } @@ -524,14 +542,14 @@ int tlsf_check(tlsf_t tlsf) int status = 0; /* Check that the free lists and bitmaps are accurate. */ - for (i = 0; i < FL_INDEX_COUNT; ++i) + for (i = 0; i < control->fl_index_count; ++i) { - for (j = 0; j < SL_INDEX_COUNT; ++j) + for (j = 0; j < control->sl_index_count; ++j) { const int fl_map = control->fl_bitmap & (1 << i); const int sl_list = control->sl_bitmap[i]; const int sl_map = sl_list & (1 << j); - const block_header_t* block = control->blocks[i][j]; + const block_header_t* block = control->blocks[i*control->sl_index_count + j]; /* Check that first- and second-level lists agree. */ if (!fl_map) @@ -559,7 +577,7 @@ int tlsf_check(tlsf_t tlsf) tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free"); tlsf_insist(block_size(block) >= block_size_min && "block not minimum size"); - mapping_insert(block_size(block), &fli, &sli); + mapping_insert(control, block_size(block), &fli, &sli); tlsf_insist(fli == i && sli == j && "block size indexed in wrong list"); #ifdef MULTI_HEAP_POISONING @@ -631,13 +649,37 @@ int tlsf_check_pool(pool_t pool) return integ.status; } +size_t tlsf_fit_size(tlsf_t tlsf, size_t size) +{ + /* because it's GoodFit, allocable size is one range lower */ + if (size) + { + size_t sl_interval; + control_t* control = tlsf_cast(control_t*, tlsf); + sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count; + return size & ~(sl_interval - 1); + } + + return 0; +} + + /* ** Size of the TLSF structures in a given memory block passed to ** tlsf_create, equal to the size of a control_t */ -size_t tlsf_size(void) +size_t tlsf_size(tlsf_t tlsf) { - return sizeof(control_t); + if (tlsf) + { + control_t* control = tlsf_cast(control_t*, tlsf); + return control->size; + } + + /* no tlsf, we'll just return a min size */ + return sizeof(control_t) + + sizeof(int) * SL_INDEX_COUNT_MIN + + sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN; } size_t tlsf_align_size(void) @@ -650,9 +692,10 @@ size_t tlsf_block_size_min(void) return block_size_min; } -size_t tlsf_block_size_max(void) +size_t tlsf_block_size_max(tlsf_t tlsf) { - return block_size_max; + control_t* control = tlsf_cast(control_t*, tlsf); + return tlsf_cast(size_t, 1) << control->fl_index_max; } /* @@ -685,16 +728,16 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes) return 0; } - if (pool_bytes < block_size_min || pool_bytes > block_size_max) + if (pool_bytes < block_size_min || pool_bytes > tlsf_block_size_max(tlsf)) { #if defined (TLSF_64BIT) printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", (unsigned int)(pool_overhead + block_size_min), - (unsigned int)((pool_overhead + block_size_max) / 256)); + (unsigned int)((pool_overhead + tlsf_block_size_max(tlsf)) / 256)); #else printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n", (unsigned int)(pool_overhead + block_size_min), - (unsigned int)(pool_overhead + block_size_max)); + (unsigned int)(pool_overhead + tlsf_block_size_max(tlsf))); #endif return 0; } @@ -730,7 +773,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free"); tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero"); - mapping_insert(block_size(block), &fl, &sl); + mapping_insert(control, block_size(block), &fl, &sl); remove_free_block(control, block, fl, sl); } @@ -739,7 +782,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) */ -tlsf_t tlsf_create(void* mem) +tlsf_t tlsf_create(void* mem, size_t bytes) { #if _DEBUG if (test_ffs_fls()) @@ -755,27 +798,27 @@ tlsf_t tlsf_create(void* mem) return 0; } - control_construct(tlsf_cast(control_t*, mem)); + control_construct(tlsf_cast(control_t*, mem), bytes); return tlsf_cast(tlsf_t, mem); } pool_t tlsf_get_pool(tlsf_t tlsf) { - return tlsf_cast(pool_t, (char*)tlsf + tlsf_size()); + return tlsf_cast(pool_t, (char*)tlsf + tlsf_size(tlsf)); } -tlsf_t tlsf_create_with_pool(void* mem, size_t bytes) +tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes) { - tlsf_t tlsf = tlsf_create(mem); - tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size()); + tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes); + tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf)); return tlsf; } void* tlsf_malloc(tlsf_t tlsf, size_t size) { control_t* control = tlsf_cast(control_t*, tlsf); - size_t adjust = adjust_request_size(size, ALIGN_SIZE); + size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); block_header_t* block = block_locate_free(control, adjust); return block_prepare_used(control, block, adjust); } @@ -806,7 +849,7 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size) void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset) { control_t* control = tlsf_cast(control_t*, tlsf); - const size_t adjust = adjust_request_size(size, ALIGN_SIZE); + const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); const size_t off_adjust = align_up(data_offset, ALIGN_SIZE); /* @@ -821,7 +864,7 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off /* The offset is included in both `adjust` and `gap_minimum`, so we ** need to subtract it once. */ - const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum - off_adjust, align); + const size_t size_with_gap = adjust_request_size(tlsf, adjust + align + gap_minimum - off_adjust, align); /* ** If alignment is less than or equal to base alignment, we're done, because @@ -934,7 +977,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) const size_t cursize = block_size(block); const size_t combined = cursize + block_size(next) + block_header_overhead; - const size_t adjust = adjust_request_size(size, ALIGN_SIZE); + const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); tlsf_assert(!block_is_free(block) && "block already marked as free"); diff --git a/components/heap/heap_tlsf.h b/components/heap/heap_tlsf.h index 58212dbb3c..4347a0d0c8 100644 --- a/components/heap/heap_tlsf.h +++ b/components/heap/heap_tlsf.h @@ -78,12 +78,21 @@ typedef struct control_t /* Empty lists point at this block to indicate they are free. */ block_header_t block_null; + /* Local parameter for the pool */ + unsigned int fl_index_count; + unsigned int fl_index_shift; + unsigned int fl_index_max; + unsigned int sl_index_count; + unsigned int sl_index_count_log2; + unsigned int small_block_size; + size_t size; + /* Bitmaps for free lists. */ unsigned int fl_bitmap; - unsigned int sl_bitmap[FL_INDEX_COUNT]; + unsigned int *sl_bitmap; /* Head of free lists. */ - block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]; + block_header_t** blocks; } control_t; #include "heap_tlsf_block_functions.h" @@ -94,8 +103,8 @@ typedef void* tlsf_t; typedef void* pool_t; /* Create/destroy a memory pool. */ -tlsf_t tlsf_create(void* mem); -tlsf_t tlsf_create_with_pool(void* mem, size_t bytes); +tlsf_t tlsf_create(void* mem, size_t bytes); +tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes); pool_t tlsf_get_pool(tlsf_t tlsf); /* Add/remove memory pools. */ @@ -113,12 +122,13 @@ void tlsf_free(tlsf_t tlsf, void* ptr); size_t tlsf_block_size(void* ptr); /* Overheads/limits of internal structures. */ -size_t tlsf_size(void); +size_t tlsf_size(tlsf_t tlsf); size_t tlsf_align_size(void); size_t tlsf_block_size_min(void); -size_t tlsf_block_size_max(void); +size_t tlsf_block_size_max(tlsf_t tlsf); size_t tlsf_pool_overhead(void); size_t tlsf_alloc_overhead(void); +size_t tlsf_fit_size(tlsf_t tlsf, size_t size); /* Debugging. */ typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user); diff --git a/components/heap/heap_tlsf_block_functions.h b/components/heap/heap_tlsf_block_functions.h index 77119e39a2..efc92c9c28 100644 --- a/components/heap/heap_tlsf_block_functions.h +++ b/components/heap/heap_tlsf_block_functions.h @@ -63,9 +63,11 @@ ** A free block must be large enough to store its header minus the size of ** the prev_phys_block field, and no larger than the number of addressable ** bits for FL_INDEX. +** The block_size_max macro returns the maximum block for the minimum pool +** use tlsf_block_size_max for a value specific to the pool */ #define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*)) -#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX) +#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN) /* ** block_header_t member functions. diff --git a/components/heap/heap_tlsf_config.h b/components/heap/heap_tlsf_config.h index f26daf81f6..bb378e8a2a 100644 --- a/components/heap/heap_tlsf_config.h +++ b/components/heap/heap_tlsf_config.h @@ -37,23 +37,13 @@ #pragma once -#ifdef ESP_PLATFORM - -#include "soc/soc.h" - -#if !CONFIG_SPIRAM -#define TLSF_MAX_POOL_SIZE (SOC_DIRAM_DRAM_HIGH - SOC_DIRAM_DRAM_LOW) -#else -#define TLSF_MAX_POOL_SIZE SOC_EXTRAM_DATA_SIZE -#endif - enum tlsf_config { /* log2 of number of linear subdivisions of block sizes. Larger ** values require more memory in the control structure. Values of - ** 4 or 5 are typical. + ** 4 or 5 are typical, 3 is for very small pools. */ - SL_INDEX_COUNT_LOG2 = 5, + SL_INDEX_COUNT_LOG2_MIN = 3, /* All allocation sizes and addresses are aligned to 4 bytes. */ ALIGN_SIZE_LOG2 = 2, @@ -68,59 +58,9 @@ enum tlsf_config ** trying to split size ranges into more slots than we have available. ** Instead, we calculate the minimum threshold size, and place all ** blocks below that size into the 0th first-level list. + ** Values below are the absolute minimum to accept a pool addition */ - - /* Tunning the first level, we can reduce TLSF pool overhead - * in exchange of manage a pool smaller than 4GB - */ - #if (TLSF_MAX_POOL_SIZE <= (256 * 1024)) - FL_INDEX_MAX = 18, //Each pool can have up 256KB - #elif (TLSF_MAX_POOL_SIZE <= (512 * 1024)) - FL_INDEX_MAX = 19, //Each pool can have up 512KB - #elif (TLSF_MAX_POOL_SIZE <= (1 * 1024 * 1024)) - FL_INDEX_MAX = 20, //Each pool can have up 1MB - #elif (TLSF_MAX_POOL_SIZE <= (2 * 1024 * 1024)) - FL_INDEX_MAX = 21, //Each pool can have up 2MB - #elif (TLSF_MAX_POOL_SIZE <= (4 * 1024 * 1024)) - FL_INDEX_MAX = 22, //Each pool can have up 4MB - #elif (TLSF_MAX_POOL_SIZE <= (8 * 1024 * 1024)) - FL_INDEX_MAX = 23, //Each pool can have up 8MB - #elif (TLSF_MAX_POOL_SIZE <= (16 * 1024 * 1024)) - FL_INDEX_MAX = 24, //Each pool can have up 16MB - #else - #error "Higher TLSF pool sizes should be added for this new config" - #endif - - SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), - FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), - FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), - - SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), + FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool + SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN), + FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1), }; -#else -enum tlsf_config -{ - //Specific configuration for host test. - - /* log2 of number of linear subdivisions of block sizes. Larger - ** values require more memory in the control structure. Values of - ** 4 or 5 are typical. - */ - SL_INDEX_COUNT_LOG2 = 5, - - /* All allocation sizes and addresses are aligned to 4 bytes. */ - ALIGN_SIZE_LOG2 = 2, - ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2), - - /* Tunning the first level, we can reduce TLSF pool overhead - * in exchange of manage a pool smaller than 4GB - */ - FL_INDEX_MAX = 30, - - SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), - FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), - FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), - - SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), -}; -#endif diff --git a/components/heap/multi_heap.c b/components/heap/multi_heap.c index 01775d98a7..a72b31e629 100644 --- a/components/heap/multi_heap.c +++ b/components/heap/multi_heap.c @@ -122,7 +122,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p) multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size) { assert(start_ptr); - if(size < (tlsf_size() + tlsf_block_size_min() + sizeof(heap_t))) { + if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) { //Region too small to be a heap. return NULL; } @@ -130,13 +130,13 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size) heap_t *result = (heap_t *)start_ptr; size -= sizeof(heap_t); - result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size); + result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0); if(!result->heap_data) { return NULL; } result->lock = NULL; - result->free_bytes = size - tlsf_size(); + result->free_bytes = size - tlsf_size(result->heap_data); result->pool_size = size; result->minimum_free_bytes = result->free_bytes; return result; @@ -399,9 +399,7 @@ static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* use void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info) { - uint32_t sl_interval; uint32_t overhead; - memset(info, 0, sizeof(multi_heap_info_t)); if (heap == NULL) { @@ -413,12 +411,9 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info) /* TLSF has an overhead per block. Calculate the total amount of overhead, it shall not be * part of the allocated bytes */ overhead = info->allocated_blocks * tlsf_alloc_overhead(); - info->total_allocated_bytes = (heap->pool_size - tlsf_size()) - heap->free_bytes - overhead; + info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead; info->minimum_free_bytes = heap->minimum_free_bytes; info->total_free_bytes = heap->free_bytes; - if (info->largest_free_block) { - sl_interval = (1 << (31 - __builtin_clz(info->largest_free_block))) / SL_INDEX_COUNT; - info->largest_free_block = info->largest_free_block & ~(sl_interval - 1); - } + info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block); multi_heap_internal_unlock(heap); }