kopia lustrzana https://github.com/espressif/esp-idf
Merge branch 'feature/heap_task_tracking_pr1498' into 'master'
heap: Add task tracking features (PR 1498) See merge request idf/esp-idf!1960pull/1498/head^2
commit
2abba13172
|
@ -37,4 +37,13 @@ config HEAP_TRACING_STACK_DEPTH
|
|||
More stack frames uses more memory in the heap trace buffer (and slows down allocation), but
|
||||
can provide useful information.
|
||||
|
||||
config HEAP_TASK_TRACKING
|
||||
bool "Enable heap task tracking"
|
||||
depends on !HEAP_POISONING_DISABLED
|
||||
help
|
||||
Enables tracking the task responsible for each heap allocation.
|
||||
|
||||
This function depends on heap poisoning being enabled and adds four more bytes of overhead for each block
|
||||
allocated.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -6,6 +6,10 @@ COMPONENT_OBJS := heap_caps_init.o heap_caps.o multi_heap.o heap_trace.o
|
|||
|
||||
ifndef CONFIG_HEAP_POISONING_DISABLED
|
||||
COMPONENT_OBJS += multi_heap_poisoning.o
|
||||
|
||||
ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
COMPONENT_OBJS += heap_task_info.o
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_HEAP_TRACING
|
||||
|
|
|
@ -55,19 +55,6 @@ IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
|
|||
return (void *)(iptr + 1);
|
||||
}
|
||||
|
||||
/* return all possible capabilities (across all priorities) for a given heap */
|
||||
inline static uint32_t get_all_caps(const heap_t *heap)
|
||||
{
|
||||
if (heap->heap == NULL) {
|
||||
return 0;
|
||||
}
|
||||
uint32_t all_caps = 0;
|
||||
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
||||
all_caps |= heap->caps[prio];
|
||||
}
|
||||
return all_caps;
|
||||
}
|
||||
|
||||
bool heap_caps_match(const heap_t *heap, uint32_t caps)
|
||||
{
|
||||
return heap->heap != NULL && ((get_all_caps(heap) & caps) == caps);
|
||||
|
|
|
@ -48,6 +48,18 @@ extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
|
|||
|
||||
bool heap_caps_match(const heap_t *heap, uint32_t caps);
|
||||
|
||||
/* return all possible capabilities (across all priorities) for a given heap */
|
||||
inline static uint32_t get_all_caps(const heap_t *heap)
|
||||
{
|
||||
if (heap->heap == NULL) {
|
||||
return 0;
|
||||
}
|
||||
uint32_t all_caps = 0;
|
||||
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
|
||||
all_caps |= heap->caps[prio];
|
||||
}
|
||||
return all_caps;
|
||||
}
|
||||
|
||||
/*
|
||||
Because we don't want to add _another_ known allocation method to the stack of functions to trace wrt memory tracing,
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <freertos/FreeRTOS.h>
|
||||
#include <freertos/task.h>
|
||||
#include <multi_heap.h>
|
||||
#include "multi_heap_internal.h"
|
||||
#include "heap_private.h"
|
||||
#include "esp_heap_task_info.h"
|
||||
|
||||
#ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
|
||||
/*
|
||||
* Return per-task heap allocation totals and lists of blocks.
|
||||
*
|
||||
* For each task that has allocated memory from the heap, return totals for
|
||||
* allocations within regions matching one or more sets of capabilities.
|
||||
*
|
||||
* Optionally also return an array of structs providing details about each
|
||||
* block allocated by one or more requested tasks, or by all tasks.
|
||||
*
|
||||
* Returns the number of block detail structs returned.
|
||||
*/
|
||||
size_t heap_caps_get_per_task_info(heap_task_info_params_t *params)
|
||||
{
|
||||
heap_t *reg;
|
||||
heap_task_block_t *blocks = params->blocks;
|
||||
size_t count = *params->num_totals;
|
||||
size_t remaining = params->max_blocks;
|
||||
|
||||
// Clear out totals for any prepopulated tasks.
|
||||
if (params->totals) {
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
for (size_t type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
|
||||
params->totals[i].size[type] = 0;
|
||||
params->totals[i].count[type] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SLIST_FOREACH(reg, ®istered_heaps, next) {
|
||||
multi_heap_handle_t heap = reg->heap;
|
||||
if (heap == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Find if the capabilities of this heap region match on of the desired
|
||||
// sets of capabilities.
|
||||
uint32_t caps = get_all_caps(reg);
|
||||
uint32_t type;
|
||||
for (type = 0; type < NUM_HEAP_TASK_CAPS; ++type) {
|
||||
if ((caps & params->mask[type]) == params->caps[type]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (type == NUM_HEAP_TASK_CAPS) {
|
||||
continue;
|
||||
}
|
||||
|
||||
multi_heap_block_handle_t b = multi_heap_get_first_block(heap);
|
||||
multi_heap_internal_lock(heap);
|
||||
for ( ; b ; b = multi_heap_get_next_block(heap, b)) {
|
||||
if (multi_heap_is_free(b)) {
|
||||
continue;
|
||||
}
|
||||
void *p = multi_heap_get_block_address(b); // Safe, only arithmetic
|
||||
size_t bsize = multi_heap_get_allocated_size(heap, p); // Validates
|
||||
TaskHandle_t btask = (TaskHandle_t)multi_heap_get_block_owner(b);
|
||||
|
||||
// Accumulate per-task allocation totals.
|
||||
if (params->totals) {
|
||||
size_t i;
|
||||
for (i = 0; i < count; ++i) {
|
||||
if (params->totals[i].task == btask) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < count) {
|
||||
params->totals[i].size[type] += bsize;
|
||||
params->totals[i].count[type] += 1;
|
||||
}
|
||||
else {
|
||||
if (count < params->max_totals) {
|
||||
params->totals[count].task = btask;
|
||||
params->totals[count].size[type] = bsize;
|
||||
params->totals[i].count[type] = 1;
|
||||
++count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return details about allocated blocks for selected tasks.
|
||||
if (blocks && remaining > 0) {
|
||||
if (params->tasks) {
|
||||
size_t i;
|
||||
for (i = 0; i < params->num_tasks; ++i) {
|
||||
if (btask == params->tasks[i]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == params->num_tasks) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
blocks->task = btask;
|
||||
blocks->address = p;
|
||||
blocks->size = bsize;
|
||||
++blocks;
|
||||
--remaining;
|
||||
}
|
||||
}
|
||||
multi_heap_internal_unlock(heap);
|
||||
}
|
||||
*params->num_totals = count;
|
||||
return params->max_blocks - remaining;
|
||||
}
|
||||
|
||||
#endif // CONFIG_HEAP_TASK_TRACKING
|
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
#ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// This macro controls how much space is provided for partitioning the per-task
|
||||
// heap allocation info according to one or more sets of heap capabilities.
|
||||
#define NUM_HEAP_TASK_CAPS 4
|
||||
|
||||
/** @brief Structure to collect per-task heap allocation totals partitioned by selected caps */
|
||||
typedef struct {
|
||||
TaskHandle_t task; ///< Task to which these totals belong
|
||||
size_t size[NUM_HEAP_TASK_CAPS]; ///< Total allocations partitioned by selected caps
|
||||
size_t count[NUM_HEAP_TASK_CAPS]; ///< Number of blocks partitioned by selected caps
|
||||
} heap_task_totals_t;
|
||||
|
||||
/** @brief Structure providing details about a block allocated by a task */
|
||||
typedef struct {
|
||||
TaskHandle_t task; ///< Task that allocated the block
|
||||
void *address; ///< User address of allocated block
|
||||
uint32_t size; ///< Size of the allocated block
|
||||
} heap_task_block_t;
|
||||
|
||||
/** @brief Structure to provide parameters to heap_caps_get_per_task_info
|
||||
*
|
||||
* The 'caps' and 'mask' arrays allow partitioning the per-task heap allocation
|
||||
* totals by selected sets of heap region capabilities so that totals for
|
||||
* multiple regions can be accumulated in one scan. The capabilities flags for
|
||||
* each region ANDed with mask[i] are compared to caps[i] in order; the
|
||||
* allocations in that region are added to totals->size[i] and totals->count[i]
|
||||
* for the first i that matches. To collect the totals without any
|
||||
* partitioning, set mask[0] and caps[0] both to zero. The allocation totals
|
||||
* are returned in the 'totals' array of heap_task_totals_t structs. To allow
|
||||
* easily comparing the totals array between consecutive calls, that array can
|
||||
* be left populated from one call to the next so the order of tasks is the
|
||||
* same even if some tasks have freed their blocks or have been deleted. The
|
||||
* number of blocks prepopulated is given by num_totals, which is updated upon
|
||||
* return. If there are more tasks with allocations than the capacity of the
|
||||
* totals array (given by max_totals), information for the excess tasks will be
|
||||
* not be collected. The totals array pointer can be NULL if the totals are
|
||||
* not desired.
|
||||
*
|
||||
* The 'tasks' array holds a list of handles for tasks whose block details are
|
||||
* to be returned in the 'blocks' array of heap_task_block_t structs. If the
|
||||
* tasks array pointer is NULL, block details for all tasks will be returned up
|
||||
* to the capacity of the buffer array, given by max_blocks. The function
|
||||
* return value tells the number of blocks filled into the array. The blocks
|
||||
* array pointer can be NULL if block details are not desired, or max_blocks
|
||||
* can be set to zero.
|
||||
*/
|
||||
typedef struct {
|
||||
int32_t caps[NUM_HEAP_TASK_CAPS]; ///< Array of caps for partitioning task totals
|
||||
int32_t mask[NUM_HEAP_TASK_CAPS]; ///< Array of masks under which caps must match
|
||||
TaskHandle_t *tasks; ///< Array of tasks whose block info is returned
|
||||
size_t num_tasks; ///< Length of tasks array
|
||||
heap_task_totals_t *totals; ///< Array of structs to collect task totals
|
||||
size_t *num_totals; ///< Number of task structs currently in array
|
||||
size_t max_totals; ///< Capacity of array of task totals structs
|
||||
heap_task_block_t *blocks; ///< Array of task block details structs
|
||||
size_t max_blocks; ///< Capacity of array of task block info structs
|
||||
} heap_task_info_params_t;
|
||||
|
||||
/**
|
||||
* @brief Return per-task heap allocation totals and lists of blocks.
|
||||
*
|
||||
* For each task that has allocated memory from the heap, return totals for
|
||||
* allocations within regions matching one or more sets of capabilities.
|
||||
*
|
||||
* Optionally also return an array of structs providing details about each
|
||||
* block allocated by one or more requested tasks, or by all tasks.
|
||||
*
|
||||
* @param params Structure to hold all the parameters for the function
|
||||
* (@see heap_task_info_params_t).
|
||||
* @return Number of block detail structs returned (@see heap_task_block_t).
|
||||
*/
|
||||
extern size_t heap_caps_get_per_task_info(heap_task_info_params_t *params);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // CONFIG_HEAP_TASK_TRACKING
|
|
@ -54,6 +54,14 @@ size_t multi_heap_free_size(multi_heap_handle_t heap)
|
|||
size_t multi_heap_minimum_free_size(multi_heap_handle_t heap)
|
||||
__attribute__((alias("multi_heap_minimum_free_size_impl")));
|
||||
|
||||
void *multi_heap_get_block_address(multi_heap_block_handle_t block)
|
||||
__attribute__((alias("multi_heap_get_block_address_impl")));
|
||||
|
||||
void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define ALIGN(X) ((X) & ~(sizeof(void *)-1))
|
||||
|
@ -279,12 +287,17 @@ static void split_if_necessary(heap_t *heap, heap_block_t *block, size_t size, h
|
|||
heap->free_bytes += block_data_size(new_block);
|
||||
}
|
||||
|
||||
void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block)
|
||||
{
|
||||
return ((char *)block + offsetof(heap_block_t, data));
|
||||
}
|
||||
|
||||
size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
heap_block_t *pb = get_block(p);
|
||||
|
||||
assert_valid_block(heap, pb);
|
||||
MULTI_HEAP_ASSERT(!is_free(pb), pb); // block should be free
|
||||
MULTI_HEAP_ASSERT(!is_free(pb), pb); // block shouldn't be free
|
||||
return block_data_size(pb);
|
||||
}
|
||||
|
||||
|
@ -339,6 +352,27 @@ void inline multi_heap_internal_unlock(multi_heap_handle_t heap)
|
|||
MULTI_HEAP_UNLOCK(heap->lock);
|
||||
}
|
||||
|
||||
multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap)
|
||||
{
|
||||
return &heap->first_block;
|
||||
}
|
||||
|
||||
multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block)
|
||||
{
|
||||
heap_block_t *next = get_next_block(block);
|
||||
/* check for valid free last block to avoid assert in assert_valid_block */
|
||||
if (next == heap->last_block && is_last_block(next) && is_free(next)) {
|
||||
return NULL;
|
||||
}
|
||||
assert_valid_block(heap, next);
|
||||
return next;
|
||||
}
|
||||
|
||||
bool multi_heap_is_free(multi_heap_block_handle_t block)
|
||||
{
|
||||
return is_free(block);
|
||||
}
|
||||
|
||||
void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size)
|
||||
{
|
||||
heap_block_t *best_block = NULL;
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
// limitations under the License.
|
||||
#pragma once
|
||||
|
||||
/* Opaque handle to a heap block */
|
||||
typedef const struct heap_block *multi_heap_block_handle_t;
|
||||
|
||||
/* Internal definitions for the "implementation" of the multi_heap API,
|
||||
as defined in multi_heap.c.
|
||||
|
||||
|
@ -28,6 +31,7 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
|||
size_t multi_heap_free_size_impl(multi_heap_handle_t heap);
|
||||
size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap);
|
||||
size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p);
|
||||
void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block);
|
||||
|
||||
/* Some internal functions for heap poisoning use */
|
||||
|
||||
|
@ -45,3 +49,20 @@ void multi_heap_internal_poison_fill_region(void *start, size_t size, bool is_fr
|
|||
void multi_heap_internal_lock(multi_heap_handle_t heap);
|
||||
|
||||
void multi_heap_internal_unlock(multi_heap_handle_t heap);
|
||||
|
||||
/* Some internal functions for heap debugging code to use */
|
||||
|
||||
/* Get the handle to the first (fixed free) block in a heap */
|
||||
multi_heap_block_handle_t multi_heap_get_first_block(multi_heap_handle_t heap);
|
||||
|
||||
/* Get the handle to the next block in a heap, with validation */
|
||||
multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, multi_heap_block_handle_t block);
|
||||
|
||||
/* Test if a heap block is free */
|
||||
bool multi_heap_is_free(const multi_heap_block_handle_t block);
|
||||
|
||||
/* Get the data address of a heap block */
|
||||
void *multi_heap_get_block_address(multi_heap_block_handle_t block);
|
||||
|
||||
/* Get the owner identification for a heap block */
|
||||
void *multi_heap_get_block_owner(multi_heap_block_handle_t block);
|
||||
|
|
|
@ -63,6 +63,16 @@ inline static void multi_heap_assert(bool condition, const char *format, int lin
|
|||
multi_heap_assert((CONDITION), "CORRUPT HEAP: multi_heap.c:%d detected at 0x%08x\n", \
|
||||
__LINE__, (intptr_t)(ADDRESS))
|
||||
|
||||
#ifdef CONFIG_HEAP_TASK_TRACKING
|
||||
#define MULTI_HEAP_BLOCK_OWNER TaskHandle_t task;
|
||||
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) (HEAD)->task = xTaskGetCurrentTaskHandle()
|
||||
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) ((HEAD)->task)
|
||||
#else
|
||||
#define MULTI_HEAP_BLOCK_OWNER
|
||||
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
|
||||
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
|
||||
#endif
|
||||
|
||||
#else // ESP_PLATFORM
|
||||
|
||||
#include <assert.h>
|
||||
|
@ -73,4 +83,9 @@ inline static void multi_heap_assert(bool condition, const char *format, int lin
|
|||
#define MULTI_HEAP_UNLOCK(PLOCK)
|
||||
|
||||
#define MULTI_HEAP_ASSERT(CONDITION, ADDRESS) assert((CONDITION) && "Heap corrupt")
|
||||
|
||||
#define MULTI_HEAP_BLOCK_OWNER
|
||||
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD)
|
||||
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) (NULL)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
|
||||
typedef struct {
|
||||
uint32_t head_canary;
|
||||
MULTI_HEAP_BLOCK_OWNER
|
||||
size_t alloc_size;
|
||||
} poison_head_t;
|
||||
|
||||
|
@ -67,6 +68,7 @@ static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
|
|||
poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
|
||||
head->alloc_size = alloc_size;
|
||||
head->head_canary = HEAD_CANARY_PATTERN;
|
||||
MULTI_HEAP_SET_BLOCK_OWNER(head);
|
||||
|
||||
uint32_t tail_canary = TAIL_CANARY_PATTERN;
|
||||
if ((intptr_t)tail % sizeof(void *) == 0) {
|
||||
|
@ -258,6 +260,12 @@ void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size)
|
|||
return result;
|
||||
}
|
||||
|
||||
void *multi_heap_get_block_address(multi_heap_block_handle_t block)
|
||||
{
|
||||
char *head = multi_heap_get_block_address_impl(block);
|
||||
return head + sizeof(poison_head_t);
|
||||
}
|
||||
|
||||
size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
|
||||
{
|
||||
poison_head_t *head = verify_allocated_region(p, true);
|
||||
|
@ -269,6 +277,11 @@ size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void *multi_heap_get_block_owner(multi_heap_block_handle_t block)
|
||||
{
|
||||
return MULTI_HEAP_GET_BLOCK_OWNER((poison_head_t*)multi_heap_get_block_address_impl(block));
|
||||
}
|
||||
|
||||
multi_heap_handle_t multi_heap_register(void *start, size_t size)
|
||||
{
|
||||
if (start != NULL) {
|
||||
|
|
Ładowanie…
Reference in New Issue