heap: fix unaligned memory bug when poisoning is enabled.

Poisoned memory is now aligned as requested by the user.
Closes IDF-2653
pull/7261/head
Omar Chebib 2021-01-22 16:50:19 +08:00
rodzic c4346abfb7
commit d902b4e7db
8 zmienionych plików z 110 dodań i 54 usunięć

Wyświetl plik

@ -277,10 +277,15 @@ static inline __attribute__((__always_inline__)) int block_can_split(block_heade
/* Split a block into two, the second of which is free. */
static inline __attribute__((__always_inline__)) block_header_t* block_split(block_header_t* block, size_t size)
{
/* Calculate the amount of space left in the remaining block. */
/* Calculate the amount of space left in the remaining block.
* REMINDER: remaining pointer's first field is `prev_phys_block` but this field is part of the
* previous physical block. */
block_header_t* remaining =
offset_to_block(block_to_ptr(block), size - block_header_overhead);
/* `size` passed as an argument is the first block's new size, thus, the remaining block's size
* is `block_size(block) - size`. However, the block's data must be precedeed by the data size.
* This field is NOT part of the size, so it has to be substracted from the calculation. */
const size_t remain_size = block_size(block) - (size + block_header_overhead);
tlsf_assert(block_to_ptr(remaining) == align_ptr(block_to_ptr(remaining), ALIGN_SIZE)
@ -293,6 +298,29 @@ static inline __attribute__((__always_inline__)) block_header_t* block_split(blo
block_set_size(block, size);
block_mark_as_free(remaining);
/**
* Here is the final outcome of this function:
*
* block remaining (block_ptr + size - BHO)
* + +
* | |
* v v
* +----------------------------------------------------------------------+
* |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
* |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
* |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
* |0000| |xxxxxxxxxxxxxxxxxxxxxx|xxxx| |###########################|
* +----------------------------------------------------------------------+
* | | | |
* + +<------------------------->+ +<------------------------->
* BHO `size` (argument) bytes BHO `remain_size` bytes
*
* Where BHO = block_header_overhead,
* 0: part of the memory owned by a `block`'s previous neighbour,
* x: part of the memory owned by `block`.
* #: part of the memory owned by `remaining`.
*/
return remaining;
}
@ -376,11 +404,17 @@ static inline __attribute__((__always_inline__)) block_header_t* block_trim_free
block_header_t* remaining_block = block;
if (block_can_split(block, size))
{
/* We want the 2nd block. */
/* We want to split `block` in two: the first block will be freed and the
* second block will be returned. */
remaining_block = block_split(block, size - block_header_overhead);
/* `remaining_block` is the second block, mark its predecessor (first
* block) as free. */
block_set_prev_free(remaining_block);
block_link_next(block);
/* Put back the first block into the free memory list. */
block_insert(control, block);
}
@ -724,10 +758,34 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
return block_prepare_used(control, block, adjust);
}
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
/**
* @brief Allocate memory of at least `size` bytes where byte at `data_offset` will be aligned to `alignment`.
*
* This function will allocate memory pointed by `ptr`. However, the byte at `data_offset` of
* this piece of memory (i.e., byte at `ptr` + `data_offset`) will be aligned to `alignment`.
* This function is useful for allocating memory that will internally have a header, and the
* usable memory following the header (i.e. `ptr` + `data_offset`) must be aligned.
*
* For example, a call to `multi_heap_aligned_alloc_impl_offs(heap, 64, 256, 20)` will return a
* pointer `ptr` to free memory of minimum 64 bytes, where `ptr + 20` is aligned on `256`.
* So `(ptr + 20) % 256` equals 0.
*
* @param tlsf TLSF structure to allocate memory from.
* @param align Alignment for the returned pointer's offset.
* @param size Minimum size, in bytes, of the memory to allocate INCLUDING
* `data_offset` bytes.
* @param data_offset Offset to be aligned on `alignment`. This can be 0, in
* this case, the returned pointer will be aligned on
* `alignment`. If it is not a multiple of CPU word size,
* it will be aligned up to the closest multiple of it.
*
* @return pointer to free memory.
*/
void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset)
{
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
const size_t off_adjust = align_up(data_offset, ALIGN_SIZE);
/*
** We must allocate an additional minimum block size bytes so that if
@ -737,8 +795,11 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
** the prev_phys_block field is not valid, and we can't simply adjust
** the size of that block.
*/
const size_t gap_minimum = sizeof(block_header_t);
const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum, align);
const size_t gap_minimum = sizeof(block_header_t) + off_adjust;
/* The offset is included in both `adjust` and `gap_minimum`, so we
** need to subtract it once.
*/
const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum - off_adjust, align);
/*
** If alignment is less than or equals base alignment, we're done.
@ -758,8 +819,11 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
size_t gap = tlsf_cast(size_t,
tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
/* If gap size is too small, offset to next aligned boundary. */
if (gap && gap < gap_minimum)
/*
** If gap size is too small or if there is not gap but we need one,
** offset to next aligned boundary.
*/
if ((gap && gap < gap_minimum) || (!gap && off_adjust))
{
const size_t gap_remain = gap_minimum - gap;
const size_t offset = tlsf_max(gap_remain, align);
@ -774,13 +838,24 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
if (gap)
{
tlsf_assert(gap >= gap_minimum && "gap size too small");
block = block_trim_free_leading(control, block, gap);
block = block_trim_free_leading(control, block, gap - off_adjust);
}
}
/* Preparing the block will also the trailing free memory. */
return block_prepare_used(control, block, adjust);
}
/**
* @brief Same as `tlsf_memalign_offs` function but with a 0 offset.
* The pointer returned is aligned on `align`.
*/
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
{
return tlsf_memalign_offs(tlsf, align, size, 0);
}
void tlsf_free(tlsf_t tlsf, void* ptr)
{
/* Don't attempt to free a NULL pointer. */

Wyświetl plik

@ -105,6 +105,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool);
/* malloc/memalign/realloc/free replacements. */
void* tlsf_malloc(tlsf_t tlsf, size_t size);
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size);
void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t offset);
void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size);
void tlsf_free(tlsf_t tlsf, void* ptr);

Wyświetl plik

@ -251,7 +251,7 @@ void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size)
return result;
}
void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment)
void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset)
{
if(heap == NULL) {
return NULL;
@ -267,7 +267,7 @@ void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_
}
multi_heap_internal_lock(heap);
void *result = tlsf_memalign(heap->heap_data, alignment, size);
void *result = tlsf_memalign_offs(heap->heap_data, alignment, size, offset);
if(result) {
heap->free_bytes -= tlsf_block_size(result);
if(heap->free_bytes < heap->minimum_free_bytes) {
@ -279,6 +279,12 @@ void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_
return result;
}
void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment)
{
return multi_heap_aligned_alloc_impl_offs(heap, size, alignment, 0);
}
bool multi_heap_check(multi_heap_handle_t heap, bool print_errors)
{
(void)print_errors;

Wyświetl plik

@ -25,10 +25,15 @@ typedef const struct block_header_t *multi_heap_block_handle_t;
*/
void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size);
/* Allocate a memory region of minimum `size` bytes, aligned on `alignment`. */
void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment);
/* Allocate a memory region of minimum `size` bytes, where memory's `offset` is aligned on `alignment`. */
void *multi_heap_aligned_alloc_impl_offs(multi_heap_handle_t heap, size_t size, size_t alignment, size_t offset);
void multi_heap_free_impl(multi_heap_handle_t heap, void *p);
void *multi_heap_realloc_impl(multi_heap_handle_t heap, void *p, size_t size);
void *multi_heap_aligned_alloc_impl(multi_heap_handle_t heap, size_t size, size_t alignment);
multi_heap_handle_t multi_heap_register_impl(void *start, size_t size);
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info);
size_t multi_heap_free_size_impl(multi_heap_handle_t heap);

Wyświetl plik

@ -196,7 +196,8 @@ void *multi_heap_aligned_alloc(multi_heap_handle_t heap, size_t size, size_t ali
}
multi_heap_internal_lock(heap);
poison_head_t *head = multi_heap_aligned_alloc_impl(heap, size + POISON_OVERHEAD, alignment);
poison_head_t *head = multi_heap_aligned_alloc_impl_offs(heap, size + POISON_OVERHEAD,
alignment, sizeof(poison_head_t));
uint8_t *data = NULL;
if (head != NULL) {
data = poison_allocated_region(head, size);

Wyświetl plik

@ -29,13 +29,7 @@ TEST_CASE("Capabilities aligned allocator test", "[heap]")
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
//Address of obtained block must be aligned with selected value
if((alignments & 0x03) == 0) {
//Alignment is a multiple of four:
TEST_ASSERT(((intptr_t)buf & 0x03) == 0);
} else {
//Exotic alignments:
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
}
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
//Write some data, if it corrupts memory probably the heap
//canary verification will fail:
@ -64,14 +58,7 @@ TEST_CASE("Capabilities aligned allocator test", "[heap]")
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
//Address of obtained block must be aligned with selected value
if((alignments & 0x03) == 0) {
//Alignment is a multiple of four:
TEST_ASSERT(((intptr_t)buf & 0x03) == 0);
} else {
//Exotic alignments:
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
}
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
//Write some data, if it corrupts memory probably the heap
//canary verification will fail:
@ -99,13 +86,7 @@ TEST_CASE("Capabilities aligned calloc test", "[heap]")
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
//Address of obtained block must be aligned with selected value
if((alignments & 0x03) == 0) {
//Alignment is a multiple of four:
TEST_ASSERT(((intptr_t)buf & 0x03) == 0);
} else {
//Exotic alignments:
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
}
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
//Write some data, if it corrupts memory probably the heap
//canary verification will fail:
@ -137,7 +118,7 @@ TEST_CASE("Capabilities aligned calloc test", "[heap]")
for(;alignments <= 1024 * 1024; alignments++) {
//Now try to take aligned memory from IRAM:
uint8_t *buf = (uint8_t *)(uint8_t *)heap_caps_aligned_calloc(alignments, 1, 10*1024, MALLOC_CAP_SPIRAM);;
uint8_t *buf = (uint8_t *)(uint8_t *)heap_caps_aligned_calloc(alignments, 1, 10*1024, MALLOC_CAP_SPIRAM);
if(((alignments & (alignments - 1)) != 0) || (!alignments)) {
TEST_ASSERT( buf == NULL );
//printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
@ -146,13 +127,8 @@ TEST_CASE("Capabilities aligned calloc test", "[heap]")
printf("[ALIGNED_ALLOC] alignment required: %u \n", alignments);
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
//Address of obtained block must be aligned with selected value
if((alignments & 0x03) == 0) {
//Alignment is a multiple of four:
TEST_ASSERT(((intptr_t)buf & 0x03) == 0);
} else {
//Exotic alignments:
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
}
TEST_ASSERT(((intptr_t)buf & (alignments - 1)) == 0);
//Write some data, if it corrupts memory probably the heap
//canary verification will fail:
memset(buf, 0xA5, (10*1024));

Wyświetl plik

@ -17,7 +17,7 @@ INCLUDE_FLAGS = -I../include -I../../../tools/catch
GCOV ?= gcov
CPPFLAGS += $(INCLUDE_FLAGS) -D CONFIG_LOG_DEFAULT_LEVEL -g -fstack-protector-all -m32
CPPFLAGS += $(INCLUDE_FLAGS) -D CONFIG_LOG_DEFAULT_LEVEL -g -fstack-protector-all -m32 -DCONFIG_HEAP_POISONING_COMPREHENSIVE
CFLAGS += -Wall -Werror -fprofile-arcs -ftest-coverage
CXXFLAGS += -std=c++11 -Wall -Werror -fprofile-arcs -ftest-coverage
LDFLAGS += -lstdc++ -fprofile-arcs -ftest-coverage -m32

Wyświetl plik

@ -466,23 +466,15 @@ TEST_CASE("multi_heap aligned allocations", "[multi_heap]")
uint8_t *buf = (uint8_t *)multi_heap_aligned_alloc(heap, (aligments + 137), aligments );
if(((aligments & (aligments - 1)) != 0) || (!aligments)) {
REQUIRE( buf == NULL );
//printf("[ALIGNED_ALLOC] alignment: %u is not a power of two, don't allow allocation \n", aligments);
} else {
REQUIRE( buf != NULL );
REQUIRE((intptr_t)buf >= (intptr_t)test_heap);
REQUIRE((intptr_t)buf < (intptr_t)(test_heap + sizeof(test_heap)));
printf("[ALIGNED_ALLOC] alignment required: %u \n", aligments);
//printf("[ALIGNED_ALLOC] allocated size: %d \n", multi_heap_get_allocated_size(heap, buf));
printf("[ALIGNED_ALLOC] address of allocated memory: %p \n\n", (void *)buf);
//Address of obtained block must be aligned with selected value
if((aligments & 0x03) == 0) {
//Alignment is a multiple of four:
REQUIRE(((intptr_t)buf & 0x03) == 0);
} else {
//Exotic alignments:
REQUIRE(((intptr_t)buf & (aligments - 1)) == 0);
}
REQUIRE(((intptr_t)buf & (aligments - 1)) == 0);
//Write some data, if it corrupts memory probably the heap
//canary verification will fail: