feat(coredump): improve the probability of accessing healthy TCBs

pull/13090/head
Erhan Kurubas 2023-12-24 21:37:45 +01:00 zatwierdzone przez BOT
rodzic 261651fc19
commit 029a0582e1
16 zmienionych plików z 295 dodań i 206 usunięć

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -7,6 +7,7 @@
#define ESP_CORE_DUMP_COMMON_H_ #define ESP_CORE_DUMP_COMMON_H_
#include "freertos/FreeRTOS.h" #include "freertos/FreeRTOS.h"
#include "esp_private/freertos_debug.h"
#include "esp_app_format.h" #include "esp_app_format.h"
#include "esp_core_dump_types.h" #include "esp_core_dump_types.h"
@ -30,22 +31,6 @@ typedef enum {
COREDUMP_MEMORY_START = COREDUMP_MEMORY_DRAM COREDUMP_MEMORY_START = COREDUMP_MEMORY_DRAM
} coredump_region_t; } coredump_region_t;
/**
* @brief Get the (FreeRTOS) task handle for the current task.
*
* @return Task handle of the current task.
*/
core_dump_task_handle_t esp_core_dump_get_current_task_handle(void);
/**
* @brief Get next task handle of a given handle.
*
* @param handle Task handle to get the next handle from.
*
* @return Next task handle.
*/
core_dump_task_handle_t esp_core_dump_get_next_task(core_dump_task_handle_t handle);
/** /**
* @brief Get a task snapshot from a given handle. * @brief Get a task snapshot from a given handle.
* *
@ -123,6 +108,16 @@ static inline uint32_t esp_core_dump_get_tcb_len(void)
sizeof(StaticTask_t); sizeof(StaticTask_t);
} }
/**
* @brief Get the (FreeRTOS) task handle for the current task.
*
* @return Task handle of the current task.
*/
static inline core_dump_task_handle_t esp_core_dump_get_current_task_handle(void)
{
return (core_dump_task_handle_t) xTaskGetCurrentTaskHandleForCore(xPortGetCoreID());
}
/** /**
* @brief Get the length, in bytes, of a given memory location. Padding is * @brief Get the length, in bytes, of a given memory location. Padding is
* taken into account in this calculation. * taken into account in this calculation.
@ -139,6 +134,31 @@ static inline uint32_t esp_core_dump_get_memory_len(uint32_t start, uint32_t end
return (len + sizeof(uint32_t) - 1) & ~(sizeof(uint32_t) - 1); return (len + sizeof(uint32_t) - 1) & ~(sizeof(uint32_t) - 1);
} }
/**
* @brief Initialize the task iterator to start traversing task lists.
*/
static inline void esp_core_dump_task_iterator_init(TaskIterator_t *iter)
{
if (iter) {
iter->uxCurrentListIndex = 0;
iter->pxNextListItem = NULL;
iter->pxTaskHandle = NULL;
}
}
/**
* @brief Get the next task using the task iterator
*
* This function retrieves the next task in the traversal sequence.
*
* @param task_iterator Pointer to the task iterator structure.
*
* @return The index of the current task list. Returns -1 if all tasks have been traversed.
*/
static inline int esp_core_dump_task_iterator_next(TaskIterator_t *task_iterator)
{
return xTaskGetNext(task_iterator);
}
#ifdef __cplusplus #ifdef __cplusplus
} }

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -91,19 +91,20 @@ esp_err_t esp_core_dump_write_binary(core_dump_write_config_t *write_cfg)
core_dump_header_t hdr = { 0 }; core_dump_header_t hdr = { 0 };
core_dump_task_header_t task_hdr = { 0 }; core_dump_task_header_t task_hdr = { 0 };
core_dump_mem_seg_header_t mem_seg = { 0 }; core_dump_mem_seg_header_t mem_seg = { 0 };
void *task = NULL; TaskIterator_t task_iter;
void *cur_task = NULL; void *cur_task = NULL;
// Verifies all tasks in the snapshot // Verifies all tasks in the snapshot
esp_core_dump_reset_tasks_snapshots_iter(); esp_core_dump_reset_tasks_snapshots_iter();
while ((task = esp_core_dump_get_next_task(task))) { esp_core_dump_task_iterator_init(&task_iter);
if (!esp_core_dump_get_task_snapshot(task, &task_hdr, &mem_seg)) { while (esp_core_dump_task_iterator_next(&task_iter) != -1) {
if (!esp_core_dump_get_task_snapshot(task_iter.pxTaskHandle, &task_hdr, &mem_seg)) {
bad_tasks_num++; bad_tasks_num++;
continue; continue;
} }
hdr.tasks_num++; hdr.tasks_num++;
if (task == esp_core_dump_get_current_task_handle()) { if (task_iter.pxTaskHandle == esp_core_dump_get_current_task_handle()) {
cur_task = task; cur_task = task_iter.pxTaskHandle;
ESP_COREDUMP_LOG_PROCESS("Task %x %x is first crashed task.", cur_task, task_hdr.tcb_addr); ESP_COREDUMP_LOG_PROCESS("Task %x %x is first crashed task.", cur_task, task_hdr.tcb_addr);
} }
ESP_COREDUMP_LOG_PROCESS("Stack len = %lu (%x %x)", task_hdr.stack_end-task_hdr.stack_start, ESP_COREDUMP_LOG_PROCESS("Stack len = %lu (%x %x)", task_hdr.stack_end-task_hdr.stack_start,
@ -128,11 +129,17 @@ esp_err_t esp_core_dump_write_binary(core_dump_write_config_t *write_cfg)
// Check if current task TCB is broken // Check if current task TCB is broken
if (cur_task == NULL) { if (cur_task == NULL) {
ESP_COREDUMP_LOG_PROCESS("The current crashed task is broken."); ESP_COREDUMP_LOG_PROCESS("The current crashed task is broken.");
cur_task = esp_core_dump_get_next_task(NULL); esp_core_dump_task_iterator_init(&task_iter);
if (cur_task == NULL) { while (esp_core_dump_task_iterator_next(&task_iter) != -1) {
ESP_COREDUMP_LOGE("No valid tasks in the system!"); if (task_iter.pxTaskHandle != NULL) {
return ESP_FAIL; cur_task = task_iter.pxTaskHandle;
} break;
}
}
if (cur_task == NULL) {
ESP_COREDUMP_LOGE("No valid tasks in the system!");
return ESP_FAIL;
}
} }
// Add user memory regions data size // Add user memory regions data size
@ -196,15 +203,16 @@ esp_err_t esp_core_dump_write_binary(core_dump_write_config_t *write_cfg)
} }
} }
// Write all other tasks in the snapshot // Write all other tasks in the snapshot
task = NULL; esp_core_dump_task_iterator_init(&task_iter);
while ((task = esp_core_dump_get_next_task(task))) { while (esp_core_dump_task_iterator_next(&task_iter) != -1) {
if (!esp_core_dump_get_task_snapshot(task, &task_hdr, NULL)) if (!esp_core_dump_get_task_snapshot(task_iter.pxTaskHandle, &task_hdr, NULL))
continue; continue;
// Skip first crashed task // Skip first crashed task
if (task == cur_task) { if (task_iter.pxTaskHandle == cur_task) {
continue; continue;
} }
ESP_COREDUMP_LOGD("Save task %x (TCB:%x, stack:%x..%x)", task, task_hdr.tcb_addr, task_hdr.stack_start, task_hdr.stack_end); ESP_COREDUMP_LOGD("Save task %x (TCB:%x, stack:%x..%x)",
task_iter.pxTaskHandle, task_hdr.tcb_addr, task_hdr.stack_start, task_hdr.stack_end);
err = esp_core_dump_save_task(write_cfg, &task_hdr); err = esp_core_dump_save_task(write_cfg, &task_hdr);
if (err != ESP_OK) { if (err != ESP_OK) {
ESP_COREDUMP_LOGE("Failed to save core dump task %x, error=%d!", ESP_COREDUMP_LOGE("Failed to save core dump task %x, error=%d!",
@ -215,10 +223,10 @@ esp_err_t esp_core_dump_write_binary(core_dump_write_config_t *write_cfg)
// Save interrupted stacks of the tasks // Save interrupted stacks of the tasks
// Actually there can be tasks interrupted at the same time, one on every core including the crashed one. // Actually there can be tasks interrupted at the same time, one on every core including the crashed one.
task = NULL;
esp_core_dump_reset_tasks_snapshots_iter(); esp_core_dump_reset_tasks_snapshots_iter();
while ((task = esp_core_dump_get_next_task(task))) { esp_core_dump_task_iterator_init(&task_iter);
if (!esp_core_dump_get_task_snapshot(task, &task_hdr, &mem_seg)) while (esp_core_dump_task_iterator_next(&task_iter) != -1) {
if (!esp_core_dump_get_task_snapshot(task_iter.pxTaskHandle, &task_hdr, &mem_seg))
continue; continue;
if (mem_seg.size > 0) { if (mem_seg.size > 0) {
ESP_COREDUMP_LOG_PROCESS("Save interrupted task stack %lu bytes @ %x", ESP_COREDUMP_LOG_PROCESS("Save interrupted task stack %lu bytes @ %x",

Wyświetl plik

@ -194,11 +194,6 @@ inline void esp_core_dump_reset_tasks_snapshots_iter(void)
esp_core_dump_reset_fake_stacks(); esp_core_dump_reset_fake_stacks();
} }
inline void *esp_core_dump_get_next_task(void *handle)
{
return pxTaskGetNext(handle);
}
bool esp_core_dump_get_task_snapshot(void *handle, core_dump_task_header_t *task, bool esp_core_dump_get_task_snapshot(void *handle, core_dump_task_header_t *task,
core_dump_mem_seg_header_t *interrupted_stack) core_dump_mem_seg_header_t *interrupted_stack)
{ {
@ -319,9 +314,4 @@ inline bool esp_core_dump_in_isr_context(void)
#endif // CONFIG_ESP_TASK_WDT_EN #endif // CONFIG_ESP_TASK_WDT_EN
} }
inline core_dump_task_handle_t esp_core_dump_get_current_task_handle()
{
return (core_dump_task_handle_t) xTaskGetCurrentTaskHandleForCore(xPortGetCoreID());
}
#endif #endif

Wyświetl plik

@ -393,6 +393,7 @@ static int elf_process_note_segment(core_dump_elf_t *self, int notes_size)
static int elf_process_tasks_regs(core_dump_elf_t *self) static int elf_process_tasks_regs(core_dump_elf_t *self)
{ {
core_dump_task_header_t task_hdr = { 0 }; core_dump_task_header_t task_hdr = { 0 };
TaskIterator_t task_iter;
void *task = NULL; void *task = NULL;
int len = 0; int len = 0;
int ret = 0; int ret = 0;
@ -413,11 +414,11 @@ static int elf_process_tasks_regs(core_dump_elf_t *self)
// processes PR_STATUS and register dump for each task // processes PR_STATUS and register dump for each task
// each call to the processing function appends PR_STATUS note into note segment // each call to the processing function appends PR_STATUS note into note segment
// and writes data or updates the segment note header accordingly (if phdr is set) // and writes data or updates the segment note header accordingly (if phdr is set)
task = NULL; esp_core_dump_task_iterator_init(&task_iter);
while ((task = esp_core_dump_get_next_task(task))) { while (esp_core_dump_task_iterator_next(&task_iter) != -1) {
if (task == esp_core_dump_get_current_task_handle()) { task = task_iter.pxTaskHandle;
continue; // skip current task (already processed) if (!task || task == esp_core_dump_get_current_task_handle()) // skip current task (already processed)
} continue;
if (esp_core_dump_get_task_snapshot(task, &task_hdr, NULL)) { if (esp_core_dump_get_task_snapshot(task, &task_hdr, NULL)) {
ret = elf_add_regs(self, &task_hdr); ret = elf_add_regs(self, &task_hdr);
if (self->elf_stage == ELF_STAGE_PLACE_HEADERS) { if (self->elf_stage == ELF_STAGE_PLACE_HEADERS) {
@ -453,9 +454,9 @@ static int elf_save_task(core_dump_elf_t *self, core_dump_task_header_t *task)
static int elf_write_tasks_data(core_dump_elf_t *self) static int elf_write_tasks_data(core_dump_elf_t *self)
{ {
int elf_len = 0; int elf_len = 0;
void *task = NULL;
core_dump_task_header_t task_hdr = { 0 }; core_dump_task_header_t task_hdr = { 0 };
core_dump_mem_seg_header_t interrupted_stack = { 0 }; core_dump_mem_seg_header_t interrupted_stack = { 0 };
TaskIterator_t task_iter;
int ret = ELF_PROC_ERR_OTHER; int ret = ELF_PROC_ERR_OTHER;
uint16_t tasks_num = 0; uint16_t tasks_num = 0;
uint16_t bad_tasks_num = 0; uint16_t bad_tasks_num = 0;
@ -468,17 +469,17 @@ static int elf_write_tasks_data(core_dump_elf_t *self)
ESP_COREDUMP_LOG_PROCESS("================ Processing task data ================"); ESP_COREDUMP_LOG_PROCESS("================ Processing task data ================");
// processes all task's stack data and writes segment data into partition // processes all task's stack data and writes segment data into partition
// if flash configuration is set // if flash configuration is set
task = NULL;
esp_core_dump_reset_tasks_snapshots_iter(); esp_core_dump_reset_tasks_snapshots_iter();
while ((task = esp_core_dump_get_next_task(task))) { esp_core_dump_task_iterator_init(&task_iter);
while (esp_core_dump_task_iterator_next(&task_iter) != -1) {
tasks_num++; tasks_num++;
if (!esp_core_dump_get_task_snapshot(task, &task_hdr, &interrupted_stack)) { if (!esp_core_dump_get_task_snapshot(task_iter.pxTaskHandle, &task_hdr, &interrupted_stack)) {
bad_tasks_num++; bad_tasks_num++;
continue; continue;
} }
ret = elf_save_task(self, &task_hdr); ret = elf_save_task(self, &task_hdr);
ELF_CHECK_ERR((ret > 0), ret, ELF_CHECK_ERR((ret > 0), ret,
"Task %x, TCB write failed, return (%d).", task, ret); "Task %x, TCB write failed, return (%d).", task_iter.pxTaskHandle, ret);
elf_len += ret; elf_len += ret;
if (interrupted_stack.size > 0) { if (interrupted_stack.size > 0) {
ESP_COREDUMP_LOG_PROCESS("Add interrupted task stack %lu bytes @ %x", ESP_COREDUMP_LOG_PROCESS("Add interrupted task stack %lu bytes @ %x",

Wyświetl plik

@ -291,6 +291,7 @@ void vPortExitCriticalIDF(void);
// ----------------------- System -------------------------- // ----------------------- System --------------------------
bool portVALID_LIST_MEM(const void *ptr);
bool portVALID_TCB_MEM(const void *ptr); bool portVALID_TCB_MEM(const void *ptr);
bool portVALID_STACK_MEM(const void *ptr); bool portVALID_STACK_MEM(const void *ptr);

Wyświetl plik

@ -600,6 +600,10 @@ unsigned long ulPortGetRunTime( void )
return ( unsigned long ) xTimes.tms_utime; return ( unsigned long ) xTimes.tms_utime;
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
bool portVALID_LIST_MEM(const void *ptr)
{
return true;
}
bool portVALID_TCB_MEM(const void *ptr) bool portVALID_TCB_MEM(const void *ptr)
{ {

Wyświetl plik

@ -323,6 +323,17 @@ void vPortSetStackWatchpoint(void *pxStackStart);
// -------------------- Heap Related ----------------------- // -------------------- Heap Related -----------------------
/**
* @brief Checks if a given piece of memory can be used to store a FreeRTOS list
*
* - Defined in heap_idf.c
*
* @param ptr Pointer to memory
* @return true Memory can be used to store a List
* @return false Otherwise
*/
bool xPortCheckValidListMem(const void *ptr);
/** /**
* @brief Checks if a given piece of memory can be used to store a task's TCB * @brief Checks if a given piece of memory can be used to store a task's TCB
* *
@ -345,6 +356,7 @@ bool xPortCheckValidTCBMem(const void *ptr);
*/ */
bool xPortcheckValidStackMem(const void *ptr); bool xPortcheckValidStackMem(const void *ptr);
#define portVALID_LIST_MEM(ptr) xPortCheckValidListMem(ptr)
#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr) #define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr) #define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)

Wyświetl plik

@ -365,6 +365,17 @@ void vPortSetStackWatchpoint(void *pxStackStart);
// -------------------- Heap Related ----------------------- // -------------------- Heap Related -----------------------
/**
* @brief Checks if a given piece of memory can be used to store a FreeRTOS list
*
* - Defined in heap_idf.c
*
* @param ptr Pointer to memory
* @return true Memory can be used to store a List
* @return false Otherwise
*/
bool xPortCheckValidListMem(const void *ptr);
/** /**
* @brief Checks if a given piece of memory can be used to store a task's TCB * @brief Checks if a given piece of memory can be used to store a task's TCB
* *
@ -387,6 +398,7 @@ bool xPortCheckValidTCBMem(const void *ptr);
*/ */
bool xPortcheckValidStackMem(const void *ptr); bool xPortcheckValidStackMem(const void *ptr);
#define portVALID_LIST_MEM(ptr) xPortCheckValidListMem(ptr)
#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr) #define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr) #define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)

Wyświetl plik

@ -48,6 +48,17 @@ static inline BaseType_t IRAM_ATTR xPortGetCoreID(void)
return (BaseType_t) 0; return (BaseType_t) 0;
} }
/**
* @brief Checks if a given piece of memory can be used to store a FreeRTOS list
*
* - Defined in heap_idf.c
*
* @param ptr Pointer to memory
* @return true Memory can be used to store a List
* @return false Otherwise
*/
bool xPortCheckValidListMem(const void *ptr);
/** /**
* @brief Checks if a given piece of memory can be used to store a task's TCB * @brief Checks if a given piece of memory can be used to store a task's TCB
* *
@ -70,6 +81,7 @@ bool xPortCheckValidTCBMem(const void *ptr);
*/ */
bool xPortcheckValidStackMem(const void *ptr); bool xPortcheckValidStackMem(const void *ptr);
#define portVALID_LIST_MEM(ptr) xPortCheckValidListMem(ptr)
#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr) #define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr) #define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)

Wyświetl plik

@ -692,6 +692,17 @@ FORCE_INLINE_ATTR bool xPortCanYield(void)
// -------------------- Heap Related ----------------------- // -------------------- Heap Related -----------------------
/**
* @brief Checks if a given piece of memory can be used to store a FreeRTOS list
*
* - Defined in heap_idf.c
*
* @param ptr Pointer to memory
* @return true Memory can be used to store a List
* @return false Otherwise
*/
bool xPortCheckValidListMem(const void *ptr);
/** /**
* @brief Checks if a given piece of memory can be used to store a task's TCB * @brief Checks if a given piece of memory can be used to store a task's TCB
* *
@ -714,6 +725,7 @@ bool xPortCheckValidTCBMem(const void *ptr);
*/ */
bool xPortcheckValidStackMem(const void *ptr); bool xPortcheckValidStackMem(const void *ptr);
#define portVALID_LIST_MEM(ptr) xPortCheckValidListMem(ptr)
#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr) #define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr) #define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)

Wyświetl plik

@ -636,6 +636,17 @@ FORCE_INLINE_ATTR BaseType_t xPortGetCoreID(void)
// -------------------- Heap Related ----------------------- // -------------------- Heap Related -----------------------
/**
* @brief Checks if a given piece of memory can be used to store a FreeRTOS list
*
* - Defined in heap_idf.c
*
* @param ptr Pointer to memory
* @return true Memory can be used to store a List
* @return false Otherwise
*/
bool xPortCheckValidListMem(const void *ptr);
/** /**
* @brief Checks if a given piece of memory can be used to store a task's TCB * @brief Checks if a given piece of memory can be used to store a task's TCB
* *
@ -658,6 +669,7 @@ bool xPortCheckValidTCBMem(const void *ptr);
*/ */
bool xPortcheckValidStackMem(const void *ptr); bool xPortcheckValidStackMem(const void *ptr);
#define portVALID_LIST_MEM(ptr) xPortCheckValidListMem(ptr)
#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr) #define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr) #define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)

Wyświetl plik

@ -895,10 +895,10 @@ static List_t * non_ready_task_lists[] =
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
/** /**
* @brief Get the next task list to traverse * @brief Get the task list from state lists by index
* *
* - Given a particular task list, this function returns the next task to traverse. * - This function returns the task list based on the specified index.
* - The task lists are returned in the following precedence * - The index is relative to the below order of the task state lists
* - Ready lists (highest to lowers priority) * - Ready lists (highest to lowers priority)
* - Pending ready list(s) * - Pending ready list(s)
* - Delayed list 1 * - Delayed list 1
@ -906,132 +906,133 @@ static List_t * non_ready_task_lists[] =
* - Waiting termination list * - Waiting termination list
* - Suspended list * - Suspended list
* *
* @param pxCurTaskList Previously traversed task list (or NULL if obtaining the first task list) * @param uxListIndex The index of the desired task list.
* @return List_t* The next task list to traverse (or NULL of all task lists have been traversed) * @return A pointer to the task list at the specified index.
* Returns NULL if the index is out of bounds or list is corrupted.
*/ */
static List_t * pxGetNextTaskList( List_t * pxCurTaskList ) static List_t * pxGetTaskListByIndex( UBaseType_t uxListIndex )
{ {
List_t * pxNextTaskList = NULL; List_t * pxTaskList;
const size_t xNonReadyTaskListsCnt = ( sizeof( non_ready_task_lists ) / sizeof( List_t * ) );
/* No Current List. Start from the highest priority ready task list */ if( uxListIndex < configMAX_PRIORITIES )
if( pxCurTaskList == NULL )
{ {
pxNextTaskList = &pxReadyTasksLists[ configMAX_PRIORITIES - 1 ]; pxTaskList = &pxReadyTasksLists[ configMAX_PRIORITIES - 1 - uxListIndex ];
} }
/* Current list is one of the ready task lists. Find the current priority, and return the next lower priority ready task list */ else if( uxListIndex < configMAX_PRIORITIES + xNonReadyTaskListsCnt + 1 )
else if( ( pxCurTaskList >= &pxReadyTasksLists[ 0 ] ) && ( pxCurTaskList <= &pxReadyTasksLists[ configMAX_PRIORITIES - 1 ] ) )
{ {
/* Find the current priority */ pxTaskList = non_ready_task_lists[ uxListIndex - configMAX_PRIORITIES ];
int cur_priority; }
else
{
pxTaskList = NULL;
}
for( cur_priority = configMAX_PRIORITIES - 1; cur_priority >= 0; cur_priority-- ) /* sanity check */
if( pxTaskList )
{
if( !portVALID_LIST_MEM( pxTaskList ) )
{ {
if( pxCurTaskList == &pxReadyTasksLists[ cur_priority ] ) pxTaskList = NULL;
{
break;
}
}
/* Return the ready task list at (cur_priority - 1), or the pending ready task list */
if( cur_priority > 0 )
{
pxNextTaskList = &pxReadyTasksLists[ cur_priority - 1 ];
}
/* We've reached the end of the Ready Task Lists. We get the next list from the non-ready task lists */
else if( cur_priority == 0 )
{
pxNextTaskList = non_ready_task_lists[ 0 ];
}
else
{
abort(); /* This should never occur */
} }
} }
/* Current list is one of the non-ready task lists. Fetch the next non-ready task list */ return pxTaskList;
if( pxNextTaskList == NULL )
{
int cur_list_idx;
const int num_non_ready_task_lists = ( sizeof( non_ready_task_lists ) / sizeof( List_t * ) );
/* Note: - 1 so that if the current list is the last on non_ready_task_lists[], the next list will return NULL */
for( cur_list_idx = 0; cur_list_idx < num_non_ready_task_lists - 1; cur_list_idx++ )
{
if( pxCurTaskList == non_ready_task_lists[ cur_list_idx ] )
{
pxNextTaskList = non_ready_task_lists[ cur_list_idx + 1 ];
break;
}
}
}
return pxNextTaskList;
} }
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
TaskHandle_t pxTaskGetNext( TaskHandle_t pxTask ) /**
* @brief Get the total count of task lists.
*
* The count includes both the ready task lists (based on priority) and non-ready task lists.
*
* @return The total count of task lists.
*
*/
static inline UBaseType_t pxGetTaskListCount( void )
{ {
TCB_t * pxTCB = ( TCB_t * ) pxTask; return configMAX_PRIORITIES + ( sizeof( non_ready_task_lists ) / sizeof( List_t * ) );
}
/*----------------------------------------------------------*/
/* Check current task is valid */ /**
if( ( pxTCB != NULL ) && !portVALID_TCB_MEM( pxTCB ) ) * @brief Get the next task using the task iterator.
*
* This function retrieves the next task in the traversal sequence.
*
* @param xIterator Pointer to the task iterator structure.
*
* @return Index of the current task list. Returns -1 if all tasks have been traversed.
*
* @note The task iterator keeps track of the current state during task traversal,
* including the index of the current task list and the pointer of the next task list item.
* When all tasks have been traversed, this function returns -1.
* If a broken or corrupted task is encountered, the task handle is set to NULL.
*/
int xTaskGetNext( TaskIterator_t * xIterator )
{
if( !xIterator )
{ {
return NULL; return -1;
} }
List_t * pxCurTaskList; ListItem_t * pxNextListItem = xIterator->pxNextListItem;
const ListItem_t * pxCurListItem; UBaseType_t uxCurListIdx = xIterator->uxCurrentListIndex;
UBaseType_t uxMaxListIdx = pxGetTaskListCount();
if( pxTCB == NULL ) for( ; uxCurListIdx < uxMaxListIdx; ++uxCurListIdx )
{ {
/* Starting traversal for the first time */ List_t * pxCurrentTaskList = pxGetTaskListByIndex( uxCurListIdx );
pxCurTaskList = pxGetNextTaskList( NULL );
pxCurListItem = listGET_END_MARKER( pxCurTaskList );
}
else
{
/* Continuing traversal */
pxCurTaskList = listLIST_ITEM_CONTAINER( &pxTCB->xStateListItem );
pxCurListItem = &pxTCB->xStateListItem;
}
ListItem_t * pxNextListItem = NULL; if( !pxCurrentTaskList || ( listCURRENT_LIST_LENGTH( pxCurrentTaskList ) == 0 ) )
if( pxCurListItem->pxNext == listGET_END_MARKER( pxCurTaskList ) )
{
List_t * pxNextTaskList = pxGetNextTaskList( pxCurTaskList );
while( pxNextTaskList != NULL )
{ {
if( !listLIST_IS_EMPTY( pxNextTaskList ) ) continue;
{
/* Get the first item in the next task list */
pxNextListItem = listGET_HEAD_ENTRY( pxNextTaskList );
break;
}
/* Task list is empty. Get the next task list */
pxNextTaskList = pxGetNextTaskList( pxNextTaskList );
} }
}
else const ListItem_t * pxCurrListItem = listGET_END_MARKER( pxCurrentTaskList );
{
/*There are still more items in the current task list. Get the next item */ if( !pxNextListItem )
pxNextListItem = listGET_NEXT( pxCurListItem ); {
/* We are here if the traversal starts from the beginning or when we finish traversing
* for one of the state lists
*/
pxNextListItem = listGET_NEXT( pxCurrListItem );
}
if( !portVALID_LIST_MEM( pxNextListItem ) )
{
/* Nothing to do with the corrupted list item. We will skip to the next task state list.
* pxNextListItem should be NULL at the beggining of each task list.
*/
pxNextListItem = NULL;
continue;
}
TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxNextListItem );
if( !portVALID_TCB_MEM( pxTCB ) )
{
pxTCB = NULL;
}
xIterator->pxTaskHandle = pxTCB;
xIterator->uxCurrentListIndex = uxCurListIdx;
if( pxCurrListItem->pxPrevious == pxNextListItem )
{
/* If this is the last item of the current state list */
xIterator->uxCurrentListIndex++;
xIterator->pxNextListItem = NULL;
}
else
{
xIterator->pxNextListItem = listGET_NEXT( pxNextListItem );
}
return uxCurListIdx;
} }
TCB_t * pxNextTCB; return -1; /* end of the task list */
if( pxNextListItem == NULL )
{
pxNextTCB = NULL;
}
else
{
pxNextTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxNextListItem );
}
return pxNextTCB;
} }
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
@ -1057,34 +1058,13 @@ UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray,
{ {
UBaseType_t uxArrayNumFilled = 0; UBaseType_t uxArrayNumFilled = 0;
/*Traverse all of the tasks lists */ /* Traverse all of the tasks lists */
List_t * pxCurTaskList = pxGetNextTaskList( NULL ); /*Get the first task list */ TaskIterator_t xTaskIter = { 0 }; /* Point to the first task list */
while( pxCurTaskList != NULL && uxArrayNumFilled < uxArrayLength ) while( xTaskGetNext( &xTaskIter ) != -1 && uxArrayNumFilled < uxArrayLength )
{ {
if( !listLIST_IS_EMPTY( pxCurTaskList ) ) vTaskGetSnapshot( xTaskIter.pxTaskHandle, &pxTaskSnapshotArray[ uxArrayNumFilled ] );
{ uxArrayNumFilled++;
const ListItem_t * pxCurListItem;
/*Walk each task on the current task list */
pxCurListItem = listGET_HEAD_ENTRY( pxCurTaskList );
while( pxCurListItem != listGET_END_MARKER( pxCurTaskList ) )
{
TCB_t * pxTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxCurListItem );
vTaskGetSnapshot( ( TaskHandle_t ) pxTCB, &pxTaskSnapshotArray[ uxArrayNumFilled ] );
uxArrayNumFilled++;
if( !( uxArrayNumFilled < uxArrayLength ) )
{
break;
}
pxCurListItem = listGET_NEXT( pxCurListItem );
}
}
/*Get the next task list */
pxCurTaskList = pxGetNextTaskList( pxCurTaskList );
} }
if( pxTCBSize != NULL ) if( pxTCBSize != NULL )

Wyświetl plik

@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -37,18 +37,33 @@ typedef struct xTASK_SNAPSHOT
} TaskSnapshot_t; } TaskSnapshot_t;
/** /**
* @brief Iterate over all tasks in the system * @brief Task Snapshot iterator
* *
* - This function can be used to iterate over every task in the system * Used in xTaskGetNext(). Must be zero/null initialized on the first call.
* - The first call to this function must set pxTask to NULL
* - When all functions have been iterated, this function will return NULL.
*
* @note This function should only be called when FreeRTOS is no longer running (e.g., during a panic) as this function
* does not acquire any locks.
* @param pxTask Handle of the previous task (or NULL on the first call of this function)
* @return TaskHandle_t Handle of the next task (or NULL when all tasks have been iterated over)
*/ */
TaskHandle_t pxTaskGetNext( TaskHandle_t pxTask ); typedef struct TaskIterator
{
UBaseType_t uxCurrentListIndex; /**< Current task list index being traversed. */
ListItem_t * pxNextListItem; /**< Next task list item will being traversed. */
TaskHandle_t pxTaskHandle; /**< Current task handle being traversed. */
} TaskIterator_t;
/**
* @brief Get the next task using the task iterator.
*
* This function retrieves the next task in the traversal sequence.
*
* @param xIterator Pointer to the task iterator structure.
*
* @return Index of the current task list. Returns -1 if all tasks have been traversed.
*
* @note The task iterator keeps track of the current state during task traversal,
* including the index of the current task list and the pointer of the next task list item.
* When all tasks have been traversed, this function returns -1.
* If a broken or corrupted task is encountered, the task handle is set to NULL.
*
*/
int xTaskGetNext( TaskIterator_t * xIterator );
/** /**
* @brief Fill a TaskSnapshot_t structure for specified task. * @brief Fill a TaskSnapshot_t structure for specified task.

Wyświetl plik

@ -76,6 +76,15 @@ size_t xPortGetMinimumEverFreeHeapSize( void )
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
bool xPortCheckValidListMem( const void * ptr )
{
#if CONFIG_IDF_TARGET_LINUX
return true;
#else /* CONFIG_IDF_TARGET_LINUX */
return esp_ptr_internal( ptr ) && esp_ptr_byte_accessible( ptr );
#endif /* CONFIG_IDF_TARGET_LINUX */
}
bool xPortCheckValidTCBMem( const void * ptr ) bool xPortCheckValidTCBMem( const void * ptr )
{ {
#if CONFIG_IDF_TARGET_LINUX #if CONFIG_IDF_TARGET_LINUX

Wyświetl plik

@ -39,8 +39,8 @@ entries:
tasks:vTaskSetThreadLocalStoragePointerAndDelCallback (default) tasks:vTaskSetThreadLocalStoragePointerAndDelCallback (default)
# Task Snapshot # Task Snapshot
if FREERTOS_PLACE_SNAPSHOT_FUNS_INTO_FLASH = y: if FREERTOS_PLACE_SNAPSHOT_FUNS_INTO_FLASH = y:
tasks:pxGetNextTaskList (default) tasks:pxGetTaskListByIndex (default)
tasks:pxTaskGetNext (default) tasks:xTaskGetNext (default)
tasks:uxTaskGetSnapshotAll (default) tasks:uxTaskGetSnapshotAll (default)
# ------------------------------------------------------------------------------------------------------------------ # ------------------------------------------------------------------------------------------------------------------

Wyświetl plik

@ -169,16 +169,17 @@ TEST_CASE("Task snapshot: Iterate", "[freertos]")
UBaseType_t old_priority; UBaseType_t old_priority;
setup(task_list, &num_tasks, &old_priority); setup(task_list, &num_tasks, &old_priority);
// Get task snapshots using pxTaskGetNext() and vTaskGetSnapshot() // Get task snapshots using xTaskGetNext() and vTaskGetSnapshot()
TaskSnapshot_t task_snapshots[TEST_MAX_TASKS_NUM]; TaskSnapshot_t task_snapshots[TEST_MAX_TASKS_NUM];
UBaseType_t num_snapshots = 0; UBaseType_t num_snapshots = 0;
TaskHandle_t cur_task_handle = pxTaskGetNext(NULL); TaskIterator_t task_iterator = {0};
while (cur_task_handle != NULL) { while (xTaskGetNext(&task_iterator) != -1) {
// Get the task's snapshot if (task_iterator.pxTaskHandle != NULL) {
BaseType_t Result = vTaskGetSnapshot(cur_task_handle, &task_snapshots[num_snapshots]); // Get the task's snapshot
TEST_ASSERT_EQUAL(pdTRUE, Result); BaseType_t Result = vTaskGetSnapshot(task_iterator.pxTaskHandle, &task_snapshots[num_snapshots]);
num_snapshots++; TEST_ASSERT_EQUAL(pdTRUE, Result);
cur_task_handle = pxTaskGetNext(cur_task_handle); num_snapshots++;
}
} }
TEST_ASSERT_LESS_OR_EQUAL(TEST_MAX_TASKS_NUM, num_snapshots); TEST_ASSERT_LESS_OR_EQUAL(TEST_MAX_TASKS_NUM, num_snapshots);