kopia lustrzana https://github.com/espressif/esp-idf
Merge branch 'feature/tlsf-dynamic-control-size_v5.0' into 'release/v5.0'
heap: Update to the new tlsf implementation of dynamic metadata size (backport v5.0) See merge request espressif/esp-idf!20774pull/10970/head
commit
c173845ff3
|
@ -38,5 +38,4 @@ def test_cxx_stack_smash(dut: Dut) -> None:
|
|||
dut.expect_exact('Press ENTER to see the list of tests')
|
||||
dut.write('\"stack smashing protection CXX\"')
|
||||
dut.expect_exact('Stack smashing protect failure!')
|
||||
dut.expect_exact('abort() was called')
|
||||
dut.expect_exact('Rebooting...')
|
||||
|
|
|
@ -23,10 +23,9 @@ __esp_stack_guard_setup (void)
|
|||
__stack_chk_guard = (void *)esp_random();
|
||||
}
|
||||
|
||||
void __stack_chk_fail (void)
|
||||
IRAM_ATTR void __stack_chk_fail (void)
|
||||
{
|
||||
esp_rom_printf("\r\nStack smashing protect failure!\r\n\r\n");
|
||||
abort();
|
||||
esp_system_abort(DRAM_STR("Stack smashing protect failure!"));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -8,11 +8,6 @@ set(includes "include")
|
|||
if(NOT CONFIG_HEAP_TLSF_USE_ROM_IMPL)
|
||||
set(priv_includes "tlsf")
|
||||
list(APPEND srcs "tlsf/tlsf.c")
|
||||
if(NOT CMAKE_BUILD_EARLY_EXPANSION)
|
||||
set_source_files_properties(tlsf/tlsf.c
|
||||
PROPERTIES COMPILE_FLAGS
|
||||
"-include ../tlsf_platform.h")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT CONFIG_HEAP_POISONING_DISABLED)
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include "heap_private.h"
|
||||
#include "esp_system.h"
|
||||
|
||||
|
||||
/* Forward declaration for base function, put in IRAM.
|
||||
* These functions don't check for errors after trying to allocate memory. */
|
||||
static void *heap_caps_realloc_base( void *ptr, size_t size, uint32_t caps );
|
||||
|
@ -56,7 +55,7 @@ IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
|
|||
}
|
||||
|
||||
|
||||
static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
|
||||
IRAM_ATTR NOINLINE_ATTR static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
|
||||
{
|
||||
if (alloc_failed_callback) {
|
||||
alloc_failed_callback(requested_size, caps, function_name);
|
||||
|
|
|
@ -43,7 +43,7 @@ extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
|
|||
bool heap_caps_match(const heap_t *heap, uint32_t caps);
|
||||
|
||||
/* return all possible capabilities (across all priorities) for a given heap */
|
||||
inline static IRAM_ATTR uint32_t get_all_caps(const heap_t *heap)
|
||||
inline static __attribute__((always_inline)) uint32_t get_all_caps(const heap_t *heap)
|
||||
{
|
||||
if (heap->heap == NULL) {
|
||||
return 0;
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# Function placement in IRAM section
|
||||
|
||||
The heap component is compiled and linked in a way that minimizes the utilization of the IRAM section of memory without impacting the performance of its core functionalities. For this reason, the heap component API provided through [esp_heap_caps.h](./include/esp_heap_caps.h) and [esp_heap_caps_init.h](./include/esp_heap_caps_init.h) can be sorted into two sets of functions.
|
||||
|
||||
1. The performance related functions placed into the IRAM by using the `IRAM_ATTR` defined in [esp_attr.h](./../../components/esp_common/include/esp_attr.h) (e.g., `heap_caps_malloc`, `heap_caps_free`, `heap_caps_realloc`, etc.)
|
||||
|
||||
2. The functions that does not require the best of performance placed in the flash (e.g., `heap_caps_print_heap_info`, `heap_caps_dump`, `heap_caps_dump_all`, etc.)
|
||||
|
||||
With that in mind, all the functions defined in [multi_heap.c](./multi_heap.c), [multi_heap_poisoning.c](./multi_heap_poisoning.c) and [tlsf.c](./tlsf/tlsf.c) that are directly or indirectly called from one of the heap component API functions placed in IRAM have to also be placed in IRAM. Symmetrically, the functions directly or indirectly called from one of the heap component API functions placed in flash will also be placed in flash.
|
|
@ -2,7 +2,53 @@
|
|||
archive: libheap.a
|
||||
entries:
|
||||
if HEAP_TLSF_USE_ROM_IMPL = n:
|
||||
tlsf (noflash)
|
||||
multi_heap (noflash)
|
||||
tlsf:tlsf_block_size (noflash)
|
||||
tlsf:tlsf_size (noflash)
|
||||
tlsf:tlsf_align_size (noflash)
|
||||
tlsf:tlsf_block_size_min (noflash)
|
||||
tlsf:tlsf_block_size_max (noflash)
|
||||
tlsf:tlsf_alloc_overhead (noflash)
|
||||
tlsf:tlsf_get_pool (noflash)
|
||||
tlsf:tlsf_malloc (noflash)
|
||||
tlsf:tlsf_memalign_offs (noflash)
|
||||
tlsf:tlsf_memalign (noflash)
|
||||
tlsf:tlsf_free (noflash)
|
||||
tlsf:tlsf_realloc (noflash)
|
||||
|
||||
multi_heap:multi_heap_get_block_address_impl (noflash)
|
||||
multi_heap:multi_heap_get_allocated_size_impl (noflash)
|
||||
multi_heap:multi_heap_set_lock (noflash)
|
||||
multi_heap:multi_heap_get_first_block (noflash)
|
||||
multi_heap:multi_heap_get_next_block (noflash)
|
||||
multi_heap:multi_heap_is_free (noflash)
|
||||
multi_heap:multi_heap_malloc_impl (noflash)
|
||||
multi_heap:multi_heap_free_impl (noflash)
|
||||
multi_heap:multi_heap_realloc_impl (noflash)
|
||||
multi_heap:multi_heap_aligned_alloc_impl_offs (noflash)
|
||||
multi_heap:multi_heap_aligned_alloc_impl (noflash)
|
||||
multi_heap:multi_heap_internal_lock (noflash)
|
||||
multi_heap:multi_heap_internal_unlock (noflash)
|
||||
multi_heap:assert_valid_block (noflash)
|
||||
|
||||
if HEAP_TLSF_USE_ROM_IMPL = y:
|
||||
multi_heap:_multi_heap_lock (noflash)
|
||||
multi_heap:_multi_heap_unlock (noflash)
|
||||
multi_heap:multi_heap_in_rom_init (noflash)
|
||||
|
||||
if HEAP_POISONING_DISABLED = n:
|
||||
multi_heap_poisoning (noflash)
|
||||
multi_heap_poisoning:poison_allocated_region (noflash)
|
||||
multi_heap_poisoning:verify_allocated_region (noflash)
|
||||
multi_heap_poisoning:multi_heap_aligned_alloc (noflash)
|
||||
multi_heap_poisoning:multi_heap_malloc (noflash)
|
||||
multi_heap_poisoning:multi_heap_free (noflash)
|
||||
multi_heap_poisoning:multi_heap_aligned_free (noflash)
|
||||
multi_heap_poisoning:multi_heap_realloc (noflash)
|
||||
multi_heap_poisoning:multi_heap_get_block_address (noflash)
|
||||
multi_heap_poisoning:multi_heap_get_block_owner (noflash)
|
||||
multi_heap_poisoning:multi_heap_get_allocated_size (noflash)
|
||||
multi_heap_poisoning:multi_heap_internal_check_block_poisoning (noflash)
|
||||
multi_heap_poisoning:multi_heap_internal_poison_fill_region (noflash)
|
||||
|
||||
if HEAP_POISONING_COMPREHENSIVE = y:
|
||||
multi_heap_poisoning:verify_fill_pattern (noflash)
|
||||
multi_heap_poisoning:block_absorb_post_hook (noflash)
|
||||
|
|
|
@ -105,20 +105,8 @@ void multi_heap_in_rom_init(void)
|
|||
|
||||
#else // CONFIG_HEAP_TLSF_USE_ROM_IMPL
|
||||
|
||||
/* Return true if this block is free. */
|
||||
static inline bool is_free(const block_header_t *block)
|
||||
{
|
||||
return ((block->size & 0x01) != 0);
|
||||
}
|
||||
|
||||
/* Data size of the block (excludes this block's header) */
|
||||
static inline size_t block_data_size(const block_header_t *block)
|
||||
{
|
||||
return (block->size & ~0x03);
|
||||
}
|
||||
|
||||
/* Check a block is valid for this heap. Used to verify parameters. */
|
||||
static void assert_valid_block(const heap_t *heap, const block_header_t *block)
|
||||
__attribute__((noinline)) NOCLONE_ATTR static void assert_valid_block(const heap_t *heap, const block_header_t *block)
|
||||
{
|
||||
pool_t pool = tlsf_get_pool(heap->heap_data);
|
||||
void *ptr = block_to_ptr(block);
|
||||
|
@ -130,8 +118,7 @@ static void assert_valid_block(const heap_t *heap, const block_header_t *block)
|
|||
|
||||
void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block)
|
||||
{
|
||||
void *ptr = block_to_ptr(block);
|
||||
return (ptr);
|
||||
return block_to_ptr(block);
|
||||
}
|
||||
|
||||
size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
|
||||
|
@ -142,7 +129,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
|
|||
multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
|
||||
{
|
||||
assert(start_ptr);
|
||||
if(size < (tlsf_size() + tlsf_block_size_min() + sizeof(heap_t))) {
|
||||
if(size < (sizeof(heap_t))) {
|
||||
//Region too small to be a heap.
|
||||
return NULL;
|
||||
}
|
||||
|
@ -150,13 +137,16 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
|
|||
heap_t *result = (heap_t *)start_ptr;
|
||||
size -= sizeof(heap_t);
|
||||
|
||||
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size);
|
||||
/* Do not specify any maximum size for the allocations so that the default configuration is used */
|
||||
const size_t max_bytes = 0;
|
||||
|
||||
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, max_bytes);
|
||||
if(!result->heap_data) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
result->lock = NULL;
|
||||
result->free_bytes = size - tlsf_size();
|
||||
result->free_bytes = size - tlsf_size(result->heap_data);
|
||||
result->pool_size = size;
|
||||
result->minimum_free_bytes = result->free_bytes;
|
||||
return result;
|
||||
|
@ -167,12 +157,12 @@ void multi_heap_set_lock(multi_heap_handle_t heap, void *lock)
|
|||
heap->lock = lock;
|
||||
}
|
||||
|
||||
void inline multi_heap_internal_lock(multi_heap_handle_t heap)
|
||||
void multi_heap_internal_lock(multi_heap_handle_t heap)
|
||||
{
|
||||
MULTI_HEAP_LOCK(heap->lock);
|
||||
}
|
||||
|
||||
void inline multi_heap_internal_unlock(multi_heap_handle_t heap)
|
||||
void multi_heap_internal_unlock(multi_heap_handle_t heap)
|
||||
{
|
||||
MULTI_HEAP_UNLOCK(heap->lock);
|
||||
}
|
||||
|
@ -192,7 +182,7 @@ multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, mu
|
|||
assert_valid_block(heap, block);
|
||||
block_header_t* next = block_next(block);
|
||||
|
||||
if(block_data_size(next) == 0) {
|
||||
if(block_size(next) == 0) {
|
||||
//Last block:
|
||||
return NULL;
|
||||
} else {
|
||||
|
@ -203,7 +193,7 @@ multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, mu
|
|||
|
||||
bool multi_heap_is_free(multi_heap_block_handle_t block)
|
||||
{
|
||||
return is_free(block);
|
||||
return block_is_free(block);
|
||||
}
|
||||
|
||||
void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size)
|
||||
|
@ -361,7 +351,7 @@ bool multi_heap_check(multi_heap_handle_t heap, bool print_errors)
|
|||
return valid;
|
||||
}
|
||||
|
||||
static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
|
||||
__attribute__((noinline)) static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
|
||||
{
|
||||
(void)user;
|
||||
MULTI_HEAP_STDERR_PRINTF("Block %p data, size: %d bytes, Free: %s \n",
|
||||
|
@ -398,7 +388,7 @@ size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap)
|
|||
return heap->minimum_free_bytes;
|
||||
}
|
||||
|
||||
static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
|
||||
__attribute__((noinline)) static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
|
||||
{
|
||||
multi_heap_info_t *info = user;
|
||||
|
||||
|
@ -417,7 +407,6 @@ static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* use
|
|||
|
||||
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
||||
{
|
||||
uint32_t sl_interval;
|
||||
uint32_t overhead;
|
||||
|
||||
memset(info, 0, sizeof(multi_heap_info_t));
|
||||
|
@ -431,13 +420,10 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
|||
/* TLSF has an overhead per block. Calculate the total amount of overhead, it shall not be
|
||||
* part of the allocated bytes */
|
||||
overhead = info->allocated_blocks * tlsf_alloc_overhead();
|
||||
info->total_allocated_bytes = (heap->pool_size - tlsf_size()) - heap->free_bytes - overhead;
|
||||
info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead;
|
||||
info->minimum_free_bytes = heap->minimum_free_bytes;
|
||||
info->total_free_bytes = heap->free_bytes;
|
||||
if (info->largest_free_block) {
|
||||
sl_interval = (1 << (31 - __builtin_clz(info->largest_free_block))) / SL_INDEX_COUNT;
|
||||
info->largest_free_block = info->largest_free_block & ~(sl_interval - 1);
|
||||
}
|
||||
info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -5,6 +5,14 @@
|
|||
*/
|
||||
#pragma once
|
||||
|
||||
/* Define a noclone attribute when compiled with GCC as certain functions
|
||||
* in the heap component should not be cloned by the compiler */
|
||||
#if defined __has_attribute && __has_attribute(noclone)
|
||||
#define NOCLONE_ATTR __attribute((noclone))
|
||||
#else
|
||||
#define NOCLONE_ATTR
|
||||
#endif
|
||||
|
||||
/* Define a structure that contains some function pointers that point to OS-related functions.
|
||||
An instance of this structure will be provided to the heap in ROM for use if needed.
|
||||
*/
|
||||
|
|
|
@ -66,7 +66,7 @@ typedef struct {
|
|||
|
||||
Returns the pointer to the actual usable data buffer (ie after 'head')
|
||||
*/
|
||||
static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
|
||||
__attribute__((noinline)) static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
|
||||
{
|
||||
uint8_t *data = (uint8_t *)(&head[1]); /* start of data ie 'real' allocated buffer */
|
||||
poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
|
||||
|
@ -90,7 +90,7 @@ static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
|
|||
|
||||
Returns a pointer to the poison header structure, or NULL if the poison structures are corrupt.
|
||||
*/
|
||||
static poison_head_t *verify_allocated_region(void *data, bool print_errors)
|
||||
__attribute__((noinline)) static poison_head_t *verify_allocated_region(void *data, bool print_errors)
|
||||
{
|
||||
poison_head_t *head = (poison_head_t *)((intptr_t)data - sizeof(poison_head_t));
|
||||
poison_tail_t *tail = (poison_tail_t *)((intptr_t)data + head->alloc_size);
|
||||
|
@ -132,8 +132,12 @@ static poison_head_t *verify_allocated_region(void *data, bool print_errors)
|
|||
if swap_pattern is true, swap patterns in the buffer (ie replace MALLOC_FILL_PATTERN with FREE_FILL_PATTERN, and vice versa.)
|
||||
|
||||
Returns true if verification checks out.
|
||||
|
||||
This function has the attribute noclone to prevent the compiler to create a clone on flash where expect_free is removed (as this
|
||||
function is called only with expect_free == true throughout the component).
|
||||
*/
|
||||
static bool verify_fill_pattern(void *data, size_t size, bool print_errors, bool expect_free, bool swap_pattern)
|
||||
__attribute__((noinline)) NOCLONE_ATTR
|
||||
static bool verify_fill_pattern(void *data, size_t size, const bool print_errors, const bool expect_free, bool swap_pattern)
|
||||
{
|
||||
const uint32_t FREE_FILL_WORD = (FREE_FILL_PATTERN << 24) | (FREE_FILL_PATTERN << 16) | (FREE_FILL_PATTERN << 8) | FREE_FILL_PATTERN;
|
||||
const uint32_t MALLOC_FILL_WORD = (MALLOC_FILL_PATTERN << 24) | (MALLOC_FILL_PATTERN << 16) | (MALLOC_FILL_PATTERN << 8) | MALLOC_FILL_PATTERN;
|
||||
|
@ -259,7 +263,9 @@ void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
|
|||
return data;
|
||||
}
|
||||
|
||||
void multi_heap_free(multi_heap_handle_t heap, void *p)
|
||||
/* This function has the noclone attribute to prevent the compiler to optimize out the
|
||||
* check for p == NULL and create a clone function placed in flash. */
|
||||
NOCLONE_ATTR void multi_heap_free(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
if (p == NULL) {
|
||||
return;
|
||||
|
|
|
@ -2,3 +2,18 @@ idf_component_register(SRC_DIRS "."
|
|||
PRIV_INCLUDE_DIRS "."
|
||||
PRIV_REQUIRES cmock test_utils heap spi_flash)
|
||||
target_compile_options(${COMPONENT_LIB} PRIVATE "-Wno-format")
|
||||
|
||||
if(CONFIG_COMPILER_DUMP_RTL_FILES)
|
||||
idf_build_get_property(elf_file_name EXECUTABLE GENERATOR_EXPRESSION)
|
||||
add_custom_target(check_test_app_sections ALL
|
||||
COMMAND ${PYTHON} $ENV{IDF_PATH}/tools/ci/check_callgraph.py
|
||||
--rtl-dir ${CMAKE_BINARY_DIR}/esp-idf/heap/
|
||||
--elf-file ${CMAKE_BINARY_DIR}/${elf_file_name}
|
||||
find-refs
|
||||
--from-sections=.iram0.text
|
||||
--to-sections=.flash.text,.flash.rodata
|
||||
--ignore-symbols=__func__/__assert_func,__func__/heap_caps_alloc_failed
|
||||
--exit-code
|
||||
DEPENDS ${elf_file_name}
|
||||
)
|
||||
endif()
|
||||
|
|
|
@ -2,12 +2,30 @@
|
|||
#include "multi_heap.h"
|
||||
|
||||
#include "../multi_heap_config.h"
|
||||
#include "../tlsf/tlsf.h"
|
||||
#include "../tlsf/tlsf_common.h"
|
||||
#include "../tlsf/tlsf_block_functions.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
|
||||
/* The functions __malloc__ and __free__ are used to call the libc
|
||||
* malloc and free and allocate memory from the host heap. Since the test
|
||||
* `TEST_CASE("multi_heap many random allocations", "[multi_heap]")`
|
||||
* calls multi_heap_allocation_impl() with sizes that can go up to 8MB,
|
||||
* an allocatation on the heap will be prefered rather than the stack which
|
||||
* might not have the necessary memory.
|
||||
*/
|
||||
static void *__malloc__(size_t bytes)
|
||||
{
|
||||
return malloc(bytes);
|
||||
}
|
||||
|
||||
static void __free__(void *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
/* Insurance against accidentally using libc heap functions in tests */
|
||||
#undef free
|
||||
#define free #error
|
||||
|
@ -61,10 +79,11 @@ TEST_CASE("multi_heap simple allocations", "[multi_heap]")
|
|||
|
||||
TEST_CASE("multi_heap fragmentation", "[multi_heap]")
|
||||
{
|
||||
uint8_t small_heap[4 * 1024];
|
||||
const size_t HEAP_SIZE = 4 * 1024;
|
||||
uint8_t small_heap[HEAP_SIZE];
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
const size_t alloc_size = 128;
|
||||
const size_t alloc_size = 500;
|
||||
|
||||
void *p[4];
|
||||
for (int i = 0; i < 4; i++) {
|
||||
|
@ -204,20 +223,22 @@ TEST_CASE("multi_heap defrag realloc", "[multi_heap]")
|
|||
#endif
|
||||
|
||||
|
||||
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
||||
void multi_heap_allocation_impl(int heap_size)
|
||||
{
|
||||
uint8_t big_heap[8 * 1024];
|
||||
uint8_t *big_heap = (uint8_t *) __malloc__(heap_size);
|
||||
const int NUM_POINTERS = 64;
|
||||
|
||||
printf("Running multi-allocation test...\n");
|
||||
printf("Running multi-allocation test with heap_size %d...\n", heap_size);
|
||||
|
||||
REQUIRE( big_heap );
|
||||
multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size);
|
||||
|
||||
void *p[NUM_POINTERS] = { 0 };
|
||||
size_t s[NUM_POINTERS] = { 0 };
|
||||
multi_heap_handle_t heap = multi_heap_register(big_heap, sizeof(big_heap));
|
||||
|
||||
const size_t initial_free = multi_heap_free_size(heap);
|
||||
|
||||
const int ITERATIONS = 10000;
|
||||
const int ITERATIONS = 5000;
|
||||
|
||||
for (int i = 0; i < ITERATIONS; i++) {
|
||||
/* check all pointers allocated so far are valid inside big_heap */
|
||||
|
@ -228,11 +249,11 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||
|
||||
uint8_t n = rand() % NUM_POINTERS;
|
||||
|
||||
if (rand() % 4 == 0) {
|
||||
if (i % 4 == 0) {
|
||||
/* 1 in 4 iterations, try to realloc the buffer instead
|
||||
of using malloc/free
|
||||
*/
|
||||
size_t new_size = rand() % 1024;
|
||||
size_t new_size = (rand() % 1023) + 1;
|
||||
void *new_p = multi_heap_realloc(heap, p[n], new_size);
|
||||
printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
|
||||
multi_heap_check(heap, true);
|
||||
|
@ -241,13 +262,12 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||
s[n] = new_size;
|
||||
if (new_size > 0) {
|
||||
REQUIRE( p[n] >= big_heap );
|
||||
REQUIRE( p[n] < big_heap + sizeof(big_heap) );
|
||||
REQUIRE( p[n] < big_heap + heap_size );
|
||||
memset(p[n], n, new_size);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (p[n] != NULL) {
|
||||
if (s[n] > 0) {
|
||||
/* Verify pre-existing contents of p[n] */
|
||||
|
@ -271,14 +291,13 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||
printf("malloc %p (%zu)\n", p[n], s[n]);
|
||||
if (p[n] != NULL) {
|
||||
REQUIRE( p[n] >= big_heap );
|
||||
REQUIRE( p[n] < big_heap + sizeof(big_heap) );
|
||||
REQUIRE( p[n] < big_heap + heap_size );
|
||||
}
|
||||
if (!multi_heap_check(heap, true)) {
|
||||
printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
|
||||
multi_heap_dump(heap);
|
||||
REQUIRE(0);
|
||||
}
|
||||
|
||||
if (p[n] != NULL) {
|
||||
memset(p[n], n, s[n]);
|
||||
}
|
||||
|
@ -294,6 +313,15 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||
}
|
||||
|
||||
REQUIRE( initial_free == multi_heap_free_size(heap) );
|
||||
__free__(big_heap);
|
||||
}
|
||||
|
||||
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
||||
{
|
||||
size_t poolsize[] = { 15, 255, 4095, 8191 };
|
||||
for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) {
|
||||
multi_heap_allocation_impl(poolsize[i] * 1024);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
|
||||
|
@ -393,8 +421,9 @@ TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]")
|
|||
|
||||
TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
||||
{
|
||||
const size_t HEAP_SIZE = 4 * 1024;
|
||||
const uint32_t PATTERN = 0xABABDADA;
|
||||
uint8_t small_heap[4 * 1024];
|
||||
uint8_t small_heap[HEAP_SIZE];
|
||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||
|
||||
uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
|
||||
|
@ -404,7 +433,6 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||
REQUIRE( b > a); /* 'b' takes the block after 'a' */
|
||||
|
||||
*a = PATTERN;
|
||||
|
||||
uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
|
||||
REQUIRE( multi_heap_check(heap, true));
|
||||
REQUIRE( c != NULL );
|
||||
|
@ -414,13 +442,12 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||
#ifndef MULTI_HEAP_POISONING_SLOW
|
||||
// "Slow" poisoning implementation doesn't reallocate in place, so these
|
||||
// test will fail...
|
||||
|
||||
uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
|
||||
REQUIRE( *d == PATTERN);
|
||||
|
||||
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64);
|
||||
// biggest allocation possible to completely fill the block left free after it was reallocated
|
||||
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 60);
|
||||
REQUIRE( multi_heap_check(heap, true));
|
||||
REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
|
||||
|
||||
|
@ -429,11 +456,7 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||
REQUIRE( multi_heap_check(heap, true) );
|
||||
REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
|
||||
|
||||
#ifdef MULTI_HEAP_POISONING
|
||||
#define TOO_MUCH 7420 + 1
|
||||
#else
|
||||
#define TOO_MUCH 7420 + 1
|
||||
#endif
|
||||
#define TOO_MUCH HEAP_SIZE + 1
|
||||
/* not enough contiguous space left in the heap */
|
||||
uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
|
||||
REQUIRE( g == NULL );
|
||||
|
@ -443,7 +466,8 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||
g = (uint32_t *)multi_heap_realloc(heap, e, 128);
|
||||
REQUIRE( multi_heap_check(heap, true) );
|
||||
REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
|
||||
#endif
|
||||
|
||||
#endif // MULTI_HEAP_POISONING_SLOW
|
||||
}
|
||||
|
||||
// TLSF only accepts heaps aligned to 4-byte boundary so
|
||||
|
@ -542,8 +566,12 @@ TEST_CASE("multi_heap poisoning detection", "[multi_heap]")
|
|||
/* register the heap memory. One free block only will be available */
|
||||
multi_heap_handle_t heap = multi_heap_register(heap_mem, HEAP_SIZE);
|
||||
|
||||
control_t *tlsf_ptr = (control_t*)(heap_mem + 20);
|
||||
const size_t control_t_size = tlsf_ptr->size;
|
||||
const size_t heap_t_size = 20;
|
||||
|
||||
/* offset in memory at which to find the first free memory byte */
|
||||
const size_t free_memory_offset = sizeof(multi_heap_info_t) + sizeof(control_t) + block_header_overhead;
|
||||
const size_t free_memory_offset = heap_t_size + control_t_size + sizeof(block_header_t) - block_header_overhead;
|
||||
|
||||
/* block header of the free block under test in the heap () */
|
||||
const block_header_t* block = (block_header_t*)(heap_mem + free_memory_offset - sizeof(block_header_t));
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit ab17d6798d1561758827b6553d56d57f19aa4d66
|
||||
Subproject commit 8c9cd0517adf99e363812e9a295dfe3898fdd345
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
#include "soc/soc.h"
|
||||
|
||||
#if !CONFIG_SPIRAM
|
||||
#define TLSF_MAX_POOL_SIZE (SOC_DIRAM_DRAM_HIGH - SOC_DIRAM_DRAM_LOW)
|
||||
#else
|
||||
#define TLSF_MAX_POOL_SIZE SOC_EXTRAM_DATA_SIZE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if (TLSF_MAX_POOL_SIZE <= (256 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 18 //Each pool can have up 256KB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (512 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 19 //Each pool can have up 512KB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (1 * 1024 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 20 //Each pool can have up 1MB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (2 * 1024 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 21 //Each pool can have up 2MB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (4 * 1024 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 22 //Each pool can have up 4MB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (8 * 1024 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 23 //Each pool can have up 8MB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (16 * 1024 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 24 //Each pool can have up 16MB
|
||||
#elif (TLSF_MAX_POOL_SIZE <= (32 * 1024 * 1024))
|
||||
#define FL_INDEX_MAX_PLATFORM 25 //Each pool can have up 32MB
|
||||
#else
|
||||
#error "Higher TLSF pool sizes should be added for this new config"
|
||||
#endif
|
||||
|
||||
/* Include from the TLSF submodule to force TLSF_INDEX_MAX_PLATFORM to be defined
|
||||
* when the TLSF repository is compiled in the IDF environment. */
|
||||
#include "tlsf_common.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -126,7 +126,28 @@ Thread Safety
|
|||
|
||||
Heap functions are thread safe, meaning they can be called from different tasks simultaneously without any limitations.
|
||||
|
||||
It is technically possible to call ``malloc``, ``free``, and related functions from interrupt handler (ISR) context. However this is not recommended, as heap function calls may delay other interrupts. It is strongly recommended to refactor applications so that any buffers used by an ISR are pre-allocated outside of the ISR. Support for calling heap functions from ISRs may be removed in a future update.
|
||||
It is technically possible to call ``malloc``, ``free``, and related functions from interrupt handler (ISR) context (see :ref:`calling-heap-related-functions-from-isr`). However this is not recommended, as heap function calls may delay other interrupts. It is strongly recommended to refactor applications so that any buffers used by an ISR are pre-allocated outside of the ISR. Support for calling heap functions from ISRs may be removed in a future update.
|
||||
|
||||
.. _calling-heap-related-functions-from-isr:
|
||||
|
||||
Calling heap related functions from ISR
|
||||
---------------------------------------
|
||||
|
||||
The following functions from the heap component can be called form interrupt handler (ISR):
|
||||
|
||||
* :cpp:func:`heap_caps_malloc`
|
||||
* :cpp:func:`heap_caps_malloc_default`
|
||||
* :cpp:func:`heap_caps_realloc_default`
|
||||
* :cpp:func:`heap_caps_malloc_prefer`
|
||||
* :cpp:func:`heap_caps_realloc_prefer`
|
||||
* :cpp:func:`heap_caps_calloc_prefer`
|
||||
* :cpp:func:`heap_caps_free`
|
||||
* :cpp:func:`heap_caps_realloc`
|
||||
* :cpp:func:`heap_caps_calloc`
|
||||
* :cpp:func:`heap_caps_aligned_alloc`
|
||||
* :cpp:func:`heap_caps_aligned_free`
|
||||
|
||||
Note however this practice is strongly discouraged.
|
||||
|
||||
Heap Tracing & Debugging
|
||||
------------------------
|
||||
|
|
|
@ -95,6 +95,11 @@ class Reference(object):
|
|||
)
|
||||
|
||||
|
||||
class IgnorePair():
|
||||
def __init__(self, pair: str) -> None:
|
||||
self.symbol, self.function_call = pair.split('/')
|
||||
|
||||
|
||||
class ElfInfo(object):
|
||||
def __init__(self, elf_file: BinaryIO) -> None:
|
||||
self.elf_file = elf_file
|
||||
|
@ -159,7 +164,7 @@ class ElfInfo(object):
|
|||
return None
|
||||
|
||||
|
||||
def load_rtl_file(rtl_filename: str, tu_filename: str, functions: List[RtlFunction]) -> None:
|
||||
def load_rtl_file(rtl_filename: str, tu_filename: str, functions: List[RtlFunction], ignore_pairs: List[IgnorePair]) -> None:
|
||||
last_function: Optional[RtlFunction] = None
|
||||
for line in open(rtl_filename):
|
||||
# Find function definition
|
||||
|
@ -175,6 +180,17 @@ def load_rtl_file(rtl_filename: str, tu_filename: str, functions: List[RtlFuncti
|
|||
match = re.match(CALL_REGEX, line)
|
||||
if match:
|
||||
target = match.group('target')
|
||||
|
||||
# if target matches on of the IgnorePair function_call attributes, remove
|
||||
# the last occurrence of the associated symbol from the last_function.refs list.
|
||||
call_matching_pairs = [pair for pair in ignore_pairs if pair.function_call == target]
|
||||
if call_matching_pairs and last_function and last_function.refs:
|
||||
for pair in call_matching_pairs:
|
||||
ignored_symbols = [ref for ref in last_function.refs if pair.symbol in ref]
|
||||
if ignored_symbols:
|
||||
last_ref = ignored_symbols.pop()
|
||||
last_function.refs = [ref for ref in last_function.refs if last_ref != ref]
|
||||
|
||||
if target not in last_function.calls:
|
||||
last_function.calls.append(target)
|
||||
continue
|
||||
|
@ -304,12 +320,12 @@ def match_rtl_funcs_to_symbols(rtl_functions: List[RtlFunction], elfinfo: ElfInf
|
|||
return symbols, refs
|
||||
|
||||
|
||||
def get_symbols_and_refs(rtl_list: List[str], elf_file: BinaryIO) -> Tuple[List[Symbol], List[Reference]]:
|
||||
def get_symbols_and_refs(rtl_list: List[str], elf_file: BinaryIO, ignore_pairs: List[IgnorePair]) -> Tuple[List[Symbol], List[Reference]]:
|
||||
elfinfo = ElfInfo(elf_file)
|
||||
|
||||
rtl_functions: List[RtlFunction] = []
|
||||
for file_name in rtl_list:
|
||||
load_rtl_file(file_name, file_name, rtl_functions)
|
||||
load_rtl_file(file_name, file_name, rtl_functions, ignore_pairs)
|
||||
|
||||
return match_rtl_funcs_to_symbols(rtl_functions, elfinfo)
|
||||
|
||||
|
@ -361,6 +377,10 @@ def main() -> None:
|
|||
find_refs_parser.add_argument(
|
||||
'--to-sections', help='comma-separated list of target sections'
|
||||
)
|
||||
find_refs_parser.add_argument(
|
||||
'--ignore-symbols', help='comma-separated list of symbol/function_name pairs. \
|
||||
This will force the parser to ignore the symbol preceding the call to function_name'
|
||||
)
|
||||
find_refs_parser.add_argument(
|
||||
'--exit-code',
|
||||
action='store_true',
|
||||
|
@ -384,7 +404,11 @@ def main() -> None:
|
|||
if not rtl_list:
|
||||
raise RuntimeError('No RTL files specified')
|
||||
|
||||
_, refs = get_symbols_and_refs(rtl_list, args.elf_file)
|
||||
ignore_pairs = []
|
||||
for pair in args.ignore_symbols.split(',') if args.ignore_symbols else []:
|
||||
ignore_pairs.append(IgnorePair(pair))
|
||||
|
||||
_, refs = get_symbols_and_refs(rtl_list, args.elf_file, ignore_pairs)
|
||||
|
||||
if args.action == 'find-refs':
|
||||
from_sections = args.from_sections.split(',') if args.from_sections else []
|
||||
|
|
|
@ -31,3 +31,4 @@ CONFIG_MQTT_TEST_BROKER_URI="mqtt://${EXAMPLE_MQTT_BROKER_TCP}"
|
|||
# See esp_spiffs_gc description for more info.
|
||||
CONFIG_SPIFFS_GC_MAX_RUNS=132
|
||||
CONFIG_NVS_ASSERT_ERROR_CHECK=y
|
||||
CONFIG_COMPILER_DUMP_RTL_FILES=y
|
||||
|
|
Ładowanie…
Reference in New Issue