apptrace: Refactors apptrace for better support various tracing HW

pull/7307/head
Alexey Gerenkov 2020-12-21 20:17:42 +03:00
rodzic 792461aa99
commit 821869d98d
36 zmienionych plików z 1591 dodań i 1309 usunięć

Wyświetl plik

@ -6,7 +6,19 @@ set(srcs
set(include_dirs "include")
if(CONFIG_SYSVIEW_ENABLE)
set(priv_include_dirs "private_include" "port/include")
if(CONFIG_APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE)
list(APPEND srcs
"app_trace_membufs_proto.c")
if(CONFIG_IDF_TARGET_ARCH_XTENSA)
list(APPEND srcs
"port/xtensa/port.c")
endif()
endif()
if(CONFIG_APPTRACE_SV_ENABLE)
list(APPEND include_dirs
sys_view/Config
sys_view/SEGGER
@ -16,7 +28,7 @@ if(CONFIG_SYSVIEW_ENABLE)
"sys_view/SEGGER/SEGGER_SYSVIEW.c"
"sys_view/Sample/Config/SEGGER_SYSVIEW_Config_FreeRTOS.c"
"sys_view/Sample/OS/SEGGER_SYSVIEW_FreeRTOS.c"
"sys_view/esp32/SEGGER_RTT_esp32.c"
"sys_view/esp/SEGGER_RTT_esp.c"
"sys_view/ext/heap_trace_module.c"
"sys_view/ext/logging.c")
endif()
@ -30,6 +42,7 @@ endif()
idf_component_register(SRCS "${srcs}"
INCLUDE_DIRS "${include_dirs}"
PRIV_INCLUDE_DIRS "${priv_include_dirs}"
PRIV_REQUIRES soc
LDFRAGMENTS linker.lf)

Wyświetl plik

@ -4,29 +4,44 @@ menu "Application Level Tracing"
prompt "Data Destination"
default APPTRACE_DEST_NONE
help
Select destination for application trace: trace memory or none (to disable).
Select destination for application trace: JTAG or none (to disable).
config APPTRACE_DEST_TRAX
bool "Trace memory"
config APPTRACE_DEST_JTAG
bool "JTAG"
select APPTRACE_DEST_TRAX if IDF_TARGET_ARCH_XTENSA
select APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
select APPTRACE_ENABLE
config APPTRACE_DEST_NONE
bool "None"
endchoice
config APPTRACE_ENABLE
config APPTRACE_DEST_TRAX
bool
depends on !ESP32_TRAX && !ESP32S2_TRAX
depends on IDF_TARGET_ARCH_XTENSA && !ESP32_TRAX && !ESP32S2_TRAX
select ESP32_MEMMAP_TRACEMEM
select ESP32S2_MEMMAP_TRACEMEM
select ESP32_MEMMAP_TRACEMEM_TWOBANKS
select ESP32S2_MEMMAP_TRACEMEM_TWOBANKS
default n
help
Enables/disable TRAX tracing HW.
config APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
bool
default n
help
Enables/disable swapping memory buffers tracing protocol.
config APPTRACE_ENABLE
bool
default n
help
Enables/disable application tracing module.
config APPTRACE_LOCK_ENABLE
bool
default !SYSVIEW_ENABLE
default !APPTRACE_SV_ENABLE
help
Enables/disable application tracing module internal sync lock.
@ -41,16 +56,23 @@ menu "Application Level Tracing"
config APPTRACE_POSTMORTEM_FLUSH_THRESH
int "Threshold for flushing last trace data to host on panic"
depends on APPTRACE_DEST_TRAX
depends on APPTRACE_ENABLE
range 0 16384
default 0
help
Threshold for flushing last trace data to host on panic in post-mortem mode.
This is minimal amount of data needed to perform flush. In bytes.
config APPTRACE_BUF_SIZE
int "Size of the apptrace buffer"
depends on APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE && !APPTRACE_DEST_TRAX
default 16384
help
Size of the memory buffer for trace datats in bytes.
config APPTRACE_PENDING_DATA_SIZE_MAX
int "Size of the pending data buffer"
depends on APPTRACE_DEST_TRAX
depends on APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
default 0
help
Size of the buffer for events in bytes. It is useful for buffering events from
@ -59,151 +81,151 @@ menu "Application Level Tracing"
menu "FreeRTOS SystemView Tracing"
depends on APPTRACE_ENABLE
config SYSVIEW_ENABLE
config APPTRACE_SV_ENABLE
bool "SystemView Tracing Enable"
depends on APPTRACE_ENABLE
default n
help
Enables supporrt for SEGGER SystemView tracing functionality.
choice SYSVIEW_TS_SOURCE
choice APPTRACE_SV_TS_SOURCE
prompt "Timer to use as timestamp source"
depends on SYSVIEW_ENABLE
default SYSVIEW_TS_SOURCE_CCOUNT if FREERTOS_UNICORE && !PM_ENABLE
default SYSVIEW_TS_SOURCE_TIMER_00 if !FREERTOS_UNICORE && !PM_ENABLE
default SYSVIEW_TS_SOURCE_ESP_TIMER if PM_ENABLE
depends on APPTRACE_SV_ENABLE
default APPTRACE_SV_TS_SOURCE_CCOUNT if FREERTOS_UNICORE && !PM_ENABLE && !IDF_TARGET_ESP32C3
default APPTRACE_SV_TS_SOURCE_TIMER_00 if !FREERTOS_UNICORE && !PM_ENABLE && !IDF_TARGET_ESP32C3
default APPTRACE_SV_TS_SOURCE_ESP_TIMER if PM_ENABLE || IDF_TARGET_ESP32C3
help
SystemView needs to use a hardware timer as the source of timestamps
when tracing. This option selects the timer for it.
config SYSVIEW_TS_SOURCE_CCOUNT
config APPTRACE_SV_TS_SOURCE_CCOUNT
bool "CPU cycle counter (CCOUNT)"
depends on FREERTOS_UNICORE && !PM_ENABLE
depends on FREERTOS_UNICORE && !PM_ENABLE && !IDF_TARGET_ESP32C3
config SYSVIEW_TS_SOURCE_TIMER_00
config APPTRACE_SV_TS_SOURCE_TIMER_00
bool "Timer 0, Group 0"
depends on !PM_ENABLE
depends on !PM_ENABLE && !IDF_TARGET_ESP32C3
config SYSVIEW_TS_SOURCE_TIMER_01
config APPTRACE_SV_TS_SOURCE_TIMER_01
bool "Timer 1, Group 0"
depends on !PM_ENABLE
depends on !PM_ENABLE && !IDF_TARGET_ESP32C3
config SYSVIEW_TS_SOURCE_TIMER_10
config APPTRACE_SV_TS_SOURCE_TIMER_10
bool "Timer 0, Group 1"
depends on !PM_ENABLE
depends on !PM_ENABLE && !IDF_TARGET_ESP32C3
config SYSVIEW_TS_SOURCE_TIMER_11
config APPTRACE_SV_TS_SOURCE_TIMER_11
bool "Timer 1, Group 1"
depends on !PM_ENABLE
depends on !PM_ENABLE && !IDF_TARGET_ESP32C3
config SYSVIEW_TS_SOURCE_ESP_TIMER
config APPTRACE_SV_TS_SOURCE_ESP_TIMER
bool "esp_timer high resolution timer"
endchoice
config SYSVIEW_MAX_TASKS
config APPTRACE_SV_MAX_TASKS
int "Maximum supported tasks"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
range 1 64
default 16
help
Configures maximum supported tasks in sysview debug
config SYSVIEW_BUF_WAIT_TMO
config APPTRACE_SV_BUF_WAIT_TMO
int "Trace buffer wait timeout"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default 500
help
Configures timeout (in us) to wait for free space in trace buffer.
Set to -1 to wait forever and avoid lost events.
config SYSVIEW_EVT_OVERFLOW_ENABLE
config APPTRACE_SV_EVT_OVERFLOW_ENABLE
bool "Trace Buffer Overflow Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Trace Buffer Overflow" event.
config SYSVIEW_EVT_ISR_ENTER_ENABLE
config APPTRACE_SV_EVT_ISR_ENTER_ENABLE
bool "ISR Enter Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "ISR Enter" event.
config SYSVIEW_EVT_ISR_EXIT_ENABLE
config APPTRACE_SV_EVT_ISR_EXIT_ENABLE
bool "ISR Exit Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "ISR Exit" event.
config SYSVIEW_EVT_ISR_TO_SCHEDULER_ENABLE
config APPTRACE_SV_EVT_ISR_TO_SCHED_ENABLE
bool "ISR Exit to Scheduler Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "ISR to Scheduler" event.
config SYSVIEW_EVT_TASK_START_EXEC_ENABLE
config APPTRACE_SV_EVT_TASK_START_EXEC_ENABLE
bool "Task Start Execution Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Task Start Execution" event.
config SYSVIEW_EVT_TASK_STOP_EXEC_ENABLE
config APPTRACE_SV_EVT_TASK_STOP_EXEC_ENABLE
bool "Task Stop Execution Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Task Stop Execution" event.
config SYSVIEW_EVT_TASK_START_READY_ENABLE
config APPTRACE_SV_EVT_TASK_START_READY_ENABLE
bool "Task Start Ready State Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Task Start Ready State" event.
config SYSVIEW_EVT_TASK_STOP_READY_ENABLE
config APPTRACE_SV_EVT_TASK_STOP_READY_ENABLE
bool "Task Stop Ready State Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Task Stop Ready State" event.
config SYSVIEW_EVT_TASK_CREATE_ENABLE
config APPTRACE_SV_EVT_TASK_CREATE_ENABLE
bool "Task Create Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Task Create" event.
config SYSVIEW_EVT_TASK_TERMINATE_ENABLE
config APPTRACE_SV_EVT_TASK_TERMINATE_ENABLE
bool "Task Terminate Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Task Terminate" event.
config SYSVIEW_EVT_IDLE_ENABLE
config APPTRACE_SV_EVT_IDLE_ENABLE
bool "System Idle Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "System Idle" event.
config SYSVIEW_EVT_TIMER_ENTER_ENABLE
config APPTRACE_SV_EVT_TIMER_ENTER_ENABLE
bool "Timer Enter Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Timer Enter" event.
config SYSVIEW_EVT_TIMER_EXIT_ENABLE
config APPTRACE_SV_EVT_TIMER_EXIT_ENABLE
bool "Timer Exit Event"
depends on SYSVIEW_ENABLE
depends on APPTRACE_SV_ENABLE
default y
help
Enables "Timer Exit" event.
@ -212,7 +234,7 @@ menu "Application Level Tracing"
config APPTRACE_GCOV_ENABLE
bool "GCOV to Host Enable"
depends on APPTRACE_ENABLE && !SYSVIEW_ENABLE
depends on APPTRACE_ENABLE && !APPTRACE_SV_ENABLE
select ESP_DEBUG_STUBS_ENABLE
default n
help

Wyświetl plik

@ -0,0 +1,364 @@
#include <sys/param.h>
#include <string.h>
#include "sdkconfig.h"
#include "esp_log.h"
#include "esp_app_trace_membufs_proto.h"
/** Trace data header. Every user data chunk is prepended with this header.
* User allocates block with esp_apptrace_buffer_get and then fills it with data,
* in multithreading environment it can happen that tasks gets buffer and then gets interrupted,
* so it is possible that user data are incomplete when memory block is exposed to the host.
* In this case host SW will see that wr_sz < block_sz and will report error.
*/
typedef struct {
#if CONFIG_APPTRACE_SV_ENABLE
uint8_t block_sz; // size of allocated block for user data
uint8_t wr_sz; // size of actually written data
#else
uint16_t block_sz; // size of allocated block for user data
uint16_t wr_sz; // size of actually written data
#endif
} esp_tracedata_hdr_t;
/** TODO: docs
*/
typedef struct {
uint16_t block_sz; // size of allocated block for user data
} esp_hostdata_hdr_t;
#if CONFIG_APPTRACE_SV_ENABLE
#define ESP_APPTRACE_USR_BLOCK_CORE(_cid_) (0)
#define ESP_APPTRACE_USR_BLOCK_LEN(_v_) (_v_)
#define ESP_APPTRACE_USR_DATA_LEN_MAX(_hw_data_) 255UL
#else
#define ESP_APPTRACE_USR_BLOCK_CORE(_cid_) ((_cid_) << 15)
#define ESP_APPTRACE_USR_BLOCK_LEN(_v_) (~(1 << 15) & (_v_))
#define ESP_APPTRACE_USR_DATA_LEN_MAX(_hw_data_) (ESP_APPTRACE_INBLOCK(_hw_data_)->sz - sizeof(esp_tracedata_hdr_t))
#endif
#define ESP_APPTRACE_USR_BLOCK_RAW_SZ(_s_) ((_s_) + sizeof(esp_tracedata_hdr_t))
#define ESP_APPTRACE_INBLOCK_MARKER(_hw_data_) ((_hw_data_)->state.markers[(_hw_data_)->state.in_block % 2])
#define ESP_APPTRACE_INBLOCK_MARKER_UPD(_hw_data_, _v_) do {(_hw_data_)->state.markers[(_hw_data_)->state.in_block % 2] += (_v_);}while(0)
#define ESP_APPTRACE_INBLOCK(_hw_data_) (&(_hw_data_)->blocks[(_hw_data_)->state.in_block % 2])
const static char *TAG = "esp_apptrace";
static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size);
esp_err_t esp_apptrace_membufs_init(esp_apptrace_membufs_proto_data_t *proto, const esp_apptrace_mem_block_t blocks_cfg[2])
{
// disabled by default
esp_apptrace_rb_init(&proto->rb_down, NULL, 0);
// membufs proto init
for (unsigned i = 0; i < 2; i++) {
proto->blocks[i].start = blocks_cfg[i].start;
proto->blocks[i].sz = blocks_cfg[i].sz;
proto->state.markers[i] = 0;
}
proto->state.in_block = 0;
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
esp_apptrace_rb_init(&proto->rb_pend, proto->pending_data,
sizeof(proto->pending_data));
#endif
return ESP_OK;
}
void esp_apptrace_membufs_down_buffer_config(esp_apptrace_membufs_proto_data_t *data, uint8_t *buf, uint32_t size)
{
esp_apptrace_rb_init(&data->rb_down, buf, size);
}
// assumed to be protected by caller from multi-core/thread access
static esp_err_t esp_apptrace_membufs_swap(esp_apptrace_membufs_proto_data_t *proto)
{
int prev_block_num = proto->state.in_block % 2;
int new_block_num = prev_block_num ? (0) : (1);
esp_err_t res = ESP_OK;
res = proto->hw->swap_start(proto->state.in_block);
if (res != ESP_OK) {
return res;
}
proto->state.markers[new_block_num] = 0;
// switch to new block
proto->state.in_block++;
proto->hw->swap(new_block_num);
// handle data from host
esp_hostdata_hdr_t *hdr = (esp_hostdata_hdr_t *)proto->blocks[new_block_num].start;
// ESP_APPTRACE_LOGV("Host data %d, sz %d @ %p", proto->hw->host_data_pending(), hdr->block_sz, hdr);
if (proto->hw->host_data_pending() && hdr->block_sz > 0) {
// TODO: add support for multiple blocks from host, currently there is no need for that
uint8_t *p = proto->blocks[new_block_num].start + proto->blocks[new_block_num].sz;
ESP_APPTRACE_LOGD("Recvd %d bytes from host [%x %x %x %x %x %x %x %x .. %x %x %x %x %x %x %x %x]", hdr->block_sz,
*(proto->blocks[new_block_num].start+0), *(proto->blocks[new_block_num].start+1),
*(proto->blocks[new_block_num].start+2), *(proto->blocks[new_block_num].start+3),
*(proto->blocks[new_block_num].start+4), *(proto->blocks[new_block_num].start+5),
*(proto->blocks[new_block_num].start+6), *(proto->blocks[new_block_num].start+7),
*(p-8), *(p-7), *(p-6), *(p-5), *(p-4), *(p-3), *(p-2), *(p-1));
uint32_t sz = esp_apptrace_membufs_down_buffer_write_nolock(proto, (uint8_t *)(hdr+1), hdr->block_sz);
if (sz != hdr->block_sz) {
ESP_APPTRACE_LOGE("Failed to write %d bytes to down buffer (%d %d)!", hdr->block_sz - sz, hdr->block_sz, sz);
}
hdr->block_sz = 0;
}
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
// copy pending data to block if any
while (proto->state.markers[new_block_num] < proto->blocks[new_block_num].sz) {
uint32_t read_sz = esp_apptrace_rb_read_size_get(&proto->rb_pend);
if (read_sz == 0) {
break; // no more data in pending buffer
}
if (read_sz > proto->blocks[new_block_num].sz - proto->state.markers[new_block_num]) {
read_sz = proto->blocks[new_block_num].sz - proto->state.markers[new_block_num];
}
uint8_t *ptr = esp_apptrace_rb_consume(&proto->rb_pend, read_sz);
if (!ptr) {
assert(false && "Failed to consume pended bytes!!");
break;
}
ESP_APPTRACE_LOGD("Pump %d pend bytes [%x %x %x %x : %x %x %x %x : %x %x %x %x : %x %x...%x %x]",
read_sz, *(ptr+0), *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
*(ptr+5), *(ptr+6), *(ptr+7), *(ptr+8), *(ptr+9), *(ptr+10), *(ptr+11), *(ptr+12), *(ptr+13), *(ptr+read_sz-2), *(ptr+read_sz-1));
memcpy(proto->blocks[new_block_num].start + proto->state.markers[new_block_num], ptr, read_sz);
proto->state.markers[new_block_num] += read_sz;
}
#endif
proto->hw->swap_end(proto->state.in_block, proto->state.markers[prev_block_num]);
return res;
}
static esp_err_t esp_apptrace_membufs_swap_waitus(esp_apptrace_membufs_proto_data_t *proto, esp_apptrace_tmo_t *tmo)
{
int res;
while ((res = esp_apptrace_membufs_swap(proto)) != ESP_OK) {
res = esp_apptrace_tmo_check(tmo);
if (res != ESP_OK) {
break;
}
}
return res;
}
uint8_t *esp_apptrace_membufs_down_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t *size, esp_apptrace_tmo_t *tmo)
{
uint8_t *ptr = NULL;
while (1) {
uint32_t sz = esp_apptrace_rb_read_size_get(&proto->rb_down);
if (sz != 0) {
*size = MIN(*size, sz);
ptr = esp_apptrace_rb_consume(&proto->rb_down, *size);
if (!ptr) {
assert(false && "Failed to consume bytes from down buffer!");
}
break;
}
// may need to flush
if (proto->hw->host_data_pending()) {
ESP_APPTRACE_LOGD("force flush");
int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
if (res != ESP_OK) {
ESP_APPTRACE_LOGE("Failed to switch to another block to recv data from host!");
/*do not return error because data can be in down buffer already*/
}
} else {
// check tmo only if there is no data from host
int res = esp_apptrace_tmo_check(tmo);
if (res != ESP_OK) {
return NULL;
}
}
}
return ptr;
}
esp_err_t esp_apptrace_membufs_down_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
{
/* nothing todo */
return ESP_OK;
}
static uint32_t esp_apptrace_membufs_down_buffer_write_nolock(esp_apptrace_membufs_proto_data_t *proto, uint8_t *data, uint32_t size)
{
uint32_t total_sz = 0;
while (total_sz < size) {
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock WRS %d-%d-%d %d", proto->rb_down.wr, proto->rb_down.rd,
proto->rb_down.cur_size, size);
uint32_t wr_sz = esp_apptrace_rb_write_size_get(&proto->rb_down);
if (wr_sz == 0) {
break;
}
if (wr_sz > size - total_sz) {
wr_sz = size - total_sz;
}
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d", wr_sz);
uint8_t *ptr = esp_apptrace_rb_produce(&proto->rb_down, wr_sz);
if (!ptr) {
assert(false && "Failed to produce bytes to down buffer!");
}
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d to 0x%x from 0x%x", wr_sz, ptr, data + total_sz + wr_sz);
memcpy(ptr, data + total_sz, wr_sz);
total_sz += wr_sz;
ESP_APPTRACE_LOGD("esp_apptrace_trax_down_buffer_write_nolock wr %d/%d", wr_sz, total_sz);
}
return total_sz;
}
static inline uint8_t *esp_apptrace_membufs_wait4buf(esp_apptrace_membufs_proto_data_t *proto, uint16_t size, esp_apptrace_tmo_t *tmo, int *pended)
{
uint8_t *ptr = NULL;
int res = esp_apptrace_membufs_swap_waitus(proto, tmo);
if (res != ESP_OK) {
return NULL;
}
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
// check if we still have pending data
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
// if after block switch we still have pending data (not all pending data have been pumped to block)
// alloc new pending buffer
*pended = 1;
ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
if (!ptr) {
ESP_APPTRACE_LOGE("Failed to alloc pend buf 1: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
}
} else
#endif
{
// update block pointers
if (ESP_APPTRACE_INBLOCK_MARKER(proto) + size > ESP_APPTRACE_INBLOCK(proto)->sz) {
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
*pended = 1;
ptr = esp_apptrace_rb_produce(&proto->rb_pend, size);
if (ptr == NULL) {
ESP_APPTRACE_LOGE("Failed to alloc pend buf 2: w-r-s %d-%d-%d!", proto->rb_pend.wr, proto->rb_pend.rd, proto->rb_pend.cur_size);
}
#endif
} else {
*pended = 0;
ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
}
}
return ptr;
}
static inline uint8_t *esp_apptrace_membufs_pkt_start(uint8_t *ptr, uint16_t size)
{
// it is safe to use cpu_hal_get_core_id() in macro call because arg is used only once inside it
((esp_tracedata_hdr_t *)ptr)->block_sz = ESP_APPTRACE_USR_BLOCK_CORE(cpu_hal_get_core_id()) | size;
((esp_tracedata_hdr_t *)ptr)->wr_sz = 0;
return ptr + sizeof(esp_tracedata_hdr_t);
}
static inline void esp_apptrace_membufs_pkt_end(uint8_t *ptr)
{
esp_tracedata_hdr_t *hdr = (esp_tracedata_hdr_t *)(ptr - sizeof(esp_tracedata_hdr_t));
// update written size
hdr->wr_sz = hdr->block_sz;
}
uint8_t *esp_apptrace_membufs_up_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t size, esp_apptrace_tmo_t *tmo)
{
uint8_t *buf_ptr = NULL;
if (size > ESP_APPTRACE_USR_DATA_LEN_MAX(proto)) {
ESP_APPTRACE_LOGE("Too large user data size %d!", size);
return NULL;
}
// check for data in the pending buffer
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
// if we have buffered data try to switch block
esp_apptrace_membufs_swap(proto);
// if switch was successful, part or all pended data have been copied to block
}
if (esp_apptrace_rb_read_size_get(&proto->rb_pend) > 0) {
// if we have buffered data alloc new pending buffer
ESP_APPTRACE_LOGD("Get %d bytes from PEND buffer", size);
buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
if (buf_ptr == NULL) {
int pended_buf;
buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
if (buf_ptr && !pended_buf) {
ESP_APPTRACE_LOGD("Get %d bytes from block", size);
// update cur block marker
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
}
}
} else {
#else
if (1) {
#endif
if (ESP_APPTRACE_INBLOCK_MARKER(proto) + ESP_APPTRACE_USR_BLOCK_RAW_SZ(size) > ESP_APPTRACE_INBLOCK(proto)->sz) {
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
ESP_APPTRACE_LOGD("Block full. Get %d bytes from PEND buffer", size);
buf_ptr = esp_apptrace_rb_produce(&proto->rb_pend, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
#endif
if (buf_ptr == NULL) {
int pended_buf;
ESP_APPTRACE_LOGD(" full. Get %d bytes from pend buffer", size);
buf_ptr = esp_apptrace_membufs_wait4buf(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size), tmo, &pended_buf);
if (buf_ptr && !pended_buf) {
ESP_APPTRACE_LOGD("Got %d bytes from block", size);
// update cur block marker
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
}
}
} else {
ESP_APPTRACE_LOGD("Get %d bytes from buffer", size);
// fit to curr nlock
buf_ptr = ESP_APPTRACE_INBLOCK(proto)->start + ESP_APPTRACE_INBLOCK_MARKER(proto);
// update cur block marker
ESP_APPTRACE_INBLOCK_MARKER_UPD(proto, ESP_APPTRACE_USR_BLOCK_RAW_SZ(size));
}
}
if (buf_ptr) {
buf_ptr = esp_apptrace_membufs_pkt_start(buf_ptr, size);
}
return buf_ptr;
}
esp_err_t esp_apptrace_membufs_up_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
{
esp_apptrace_membufs_pkt_end(ptr);
// TODO: mark block as busy in order not to re-use it for other tracing calls until it is completely written
// TODO: avoid potential situation when all memory is consumed by low prio tasks which can not complete writing due to
// higher prio tasks and the latter can not allocate buffers at all
// this is abnormal situation can be detected on host which will receive only uncompleted buffers
// workaround: use own memcpy which will kick-off dead tracing calls
return ESP_OK;
}
esp_err_t esp_apptrace_membufs_flush_nolock(esp_apptrace_membufs_proto_data_t *proto, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
{
int res = ESP_OK;
if (ESP_APPTRACE_INBLOCK_MARKER(proto) < min_sz) {
ESP_APPTRACE_LOGI("Ignore flush request for min %d bytes. Bytes in block: %d.", min_sz, ESP_APPTRACE_INBLOCK_MARKER(proto));
return ESP_OK;
}
// switch block while size of data (including that in pending buffer) is more than min size
while (ESP_APPTRACE_INBLOCK_MARKER(proto) > min_sz) {
ESP_APPTRACE_LOGD("Try to flush %d bytes. Wait until block switch for %lld us", ESP_APPTRACE_INBLOCK_MARKER(proto), tmo->tmo);
res = esp_apptrace_membufs_swap_waitus(proto, tmo);
if (res != ESP_OK) {
if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE)
ESP_APPTRACE_LOGW("Failed to switch to another block in %lld us!", tmo->tmo);
else
ESP_APPTRACE_LOGE("Failed to switch to another block in %lld us!", tmo->tmo);
return res;
}
}
return res;
}

Wyświetl plik

@ -8,33 +8,42 @@
#include "freertos/task.h"
#include "esp_app_trace_util.h"
#include "sdkconfig.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/clk.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/clk.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/clk.h"
#elif CONFIG_IDF_TARGET_ESP32C3
#include "esp32c3/clk.h"
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Locks /////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
#if ESP_APPTRACE_PRINT_LOCK
static esp_apptrace_lock_t s_log_lock = {.irq_stat = 0, .portmux = portMUX_INITIALIZER_UNLOCKED};
#endif
int esp_apptrace_log_lock(void)
{
#if ESP_APPTRACE_PRINT_LOCK
esp_apptrace_tmo_t tmo;
esp_apptrace_tmo_init(&tmo, ESP_APPTRACE_TMO_INFINITE);
int ret = esp_apptrace_lock_take(&s_log_lock, &tmo);
return ret;
#else
return 0;
#endif
}
void esp_apptrace_log_unlock(void)
{
#if ESP_APPTRACE_PRINT_LOCK
esp_apptrace_lock_give(&s_log_lock);
#endif
}
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////// TIMEOUT /////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
#define ESP_APPTRACE_CPUTICKS2US(_t_, _cpu_freq_) ((_t_)/(_cpu_freq_/1000000))
#define ESP_APPTRACE_US2CPUTICKS(_t_, _cpu_freq_) ((_t_)*(_cpu_freq_/1000000))
esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo)
{
int cpu_freq = esp_clk_cpu_freq();
if (tmo->tmo != ESP_APPTRACE_TMO_INFINITE) {
unsigned cur = portGET_RUN_TIME_COUNTER_VALUE();
if (tmo->start <= cur) {
tmo->elapsed = ESP_APPTRACE_CPUTICKS2US(cur - tmo->start, cpu_freq);
} else {
tmo->elapsed = ESP_APPTRACE_CPUTICKS2US(0xFFFFFFFF - tmo->start + cur, cpu_freq);
}
if (tmo->tmo != (int64_t)-1) {
tmo->elapsed = esp_timer_get_time() - tmo->start;
if (tmo->elapsed >= tmo->tmo) {
return ESP_ERR_TIMEOUT;
}

Wyświetl plik

@ -4,14 +4,21 @@
COMPONENT_SRCDIRS := .
ifdef CONFIG_APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE
COMPONENT_SRCDIRS += port/xtensa
endif
COMPONENT_ADD_INCLUDEDIRS = include
COMPONENT_PRIV_INCLUDEDIRS = private_include \
port/include
COMPONENT_ADD_LDFLAGS = -lapp_trace
# do not produce gcov info for this module, it is used as transport for gcov
CFLAGS := $(subst --coverage,,$(CFLAGS))
ifdef CONFIG_SYSVIEW_ENABLE
ifdef CONFIG_APPTRACE_SV_ENABLE
COMPONENT_ADD_INCLUDEDIRS += \
sys_view/Config \
@ -23,7 +30,7 @@ COMPONENT_SRCDIRS += \
sys_view/SEGGER \
sys_view/Sample/OS \
sys_view/Sample/Config \
sys_view/esp32 \
sys_view/esp \
sys_view/ext
else
COMPONENT_SRCDIRS += gcov

Wyświetl plik

@ -129,7 +129,7 @@ void esp_gcov_dump(void)
// disable IRQs on this CPU, other CPU is halted by OpenOCD
unsigned irq_state = portENTER_CRITICAL_NESTED();
#if !CONFIG_FREERTOS_UNICORE
int other_core = xPortGetCoreID() ? 0 : 1;
int other_core = cpu_hal_get_core_id() ? 0 : 1;
esp_cpu_stall(other_core);
#endif
while (!esp_apptrace_host_is_connected(ESP_APPTRACE_DEST_TRAX)) {

Wyświetl plik

@ -9,7 +9,7 @@
#include "esp_heap_trace.h"
#undef HEAP_TRACE_SRCFILE
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
#include "esp_app_trace.h"
#include "esp_sysview_trace.h"
#endif
@ -18,7 +18,7 @@
#ifdef CONFIG_HEAP_TRACING_TOHOST
#if !CONFIG_SYSVIEW_ENABLE
#if !CONFIG_APPTRACE_SV_ENABLE
#error None of the heap tracing backends is enabled! You must enable SystemView compatible tracing to use this feature.
#endif
@ -34,7 +34,7 @@ esp_err_t heap_trace_init_tohost(void)
esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
{
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
esp_err_t ret = esp_sysview_heap_trace_start((uint32_t)-1);
if (ret != ESP_OK) {
return ret;
@ -47,7 +47,7 @@ esp_err_t heap_trace_start(heap_trace_mode_t mode_param)
esp_err_t heap_trace_stop(void)
{
esp_err_t ret = ESP_ERR_NOT_SUPPORTED;
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
ret = esp_sysview_heap_trace_stop();
#endif
s_tracing = false;
@ -80,7 +80,7 @@ static IRAM_ATTR void record_allocation(const heap_trace_record_t *record)
if (!s_tracing) {
return;
}
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
esp_sysview_heap_trace_alloc(record->address, record->size, record->alloced_by);
#endif
}
@ -95,7 +95,7 @@ static IRAM_ATTR void record_free(void *p, void **callers)
if (!s_tracing) {
return;
}
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
esp_sysview_heap_trace_free(p, callers);
#endif
}

Wyświetl plik

@ -18,8 +18,11 @@ extern "C" {
* Application trace data destinations bits.
*/
typedef enum {
ESP_APPTRACE_DEST_TRAX = 0x1, ///< JTAG destination
ESP_APPTRACE_DEST_UART0 = 0x2, ///< UART destination
ESP_APPTRACE_DEST_JTAG = 1, ///< JTAG destination
ESP_APPTRACE_DEST_TRAX = ESP_APPTRACE_DEST_JTAG, ///< xxx_TRAX name is obsolete, use more common xxx_JTAG
ESP_APPTRACE_DEST_UART0, ///< UART0 destination
ESP_APPTRACE_DEST_MAX = ESP_APPTRACE_DEST_UART0,
ESP_APPTRACE_DEST_NUM
} esp_apptrace_dest_t;
/**

Wyświetl plik

@ -12,6 +12,7 @@ extern "C" {
#include "freertos/FreeRTOS.h"
#include "esp_err.h"
#include "esp_timer.h"
/** Infinite waiting timeout */
#define ESP_APPTRACE_TMO_INFINITE ((uint32_t)-1)
@ -22,9 +23,9 @@ extern "C" {
* periodically to check timeout for expiration.
*/
typedef struct {
uint32_t start; ///< time interval start (in CPU ticks)
uint32_t tmo; ///< timeout value (in us)
uint32_t elapsed; ///< elapsed time (in us)
int64_t start; ///< time interval start (in us)
int64_t tmo; ///< timeout value (in us)
int64_t elapsed; ///< elapsed time (in us)
} esp_apptrace_tmo_t;
/**
@ -35,23 +36,23 @@ typedef struct {
*/
static inline void esp_apptrace_tmo_init(esp_apptrace_tmo_t *tmo, uint32_t user_tmo)
{
tmo->start = portGET_RUN_TIME_COUNTER_VALUE();
tmo->tmo = user_tmo;
tmo->start = esp_timer_get_time();
tmo->tmo = user_tmo == ESP_APPTRACE_TMO_INFINITE ? (int64_t)-1 : (int64_t)user_tmo;
tmo->elapsed = 0;
}
/**
* @brief Checks timeout for expiration.
*
* @param tmo Pointer to timeout structure to be initialized.
* @param tmo Pointer to timeout structure.
*
* @return ESP_OK on success, otherwise \see esp_err_t
* @return number of remaining us till tmo.
*/
esp_err_t esp_apptrace_tmo_check(esp_apptrace_tmo_t *tmo);
static inline uint32_t esp_apptrace_tmo_remaining_us(esp_apptrace_tmo_t *tmo)
{
return tmo->tmo != ESP_APPTRACE_TMO_INFINITE ? (tmo->elapsed - tmo->tmo) : ESP_APPTRACE_TMO_INFINITE;
return tmo->tmo != (int64_t)-1 ? (tmo->elapsed - tmo->tmo) : ESP_APPTRACE_TMO_INFINITE;
}
/** Tracing module synchronization lock */
@ -160,6 +161,30 @@ uint32_t esp_apptrace_rb_read_size_get(esp_apptrace_rb_t *rb);
*/
uint32_t esp_apptrace_rb_write_size_get(esp_apptrace_rb_t *rb);
int esp_apptrace_log_lock(void);
void esp_apptrace_log_unlock(void);
#define ESP_APPTRACE_LOG( format, ... ) \
do { \
esp_apptrace_log_lock(); \
esp_rom_printf(format, ##__VA_ARGS__); \
esp_apptrace_log_unlock(); \
} while(0)
#define ESP_APPTRACE_LOG_LEV( _L_, level, format, ... ) \
do { \
if (LOG_LOCAL_LEVEL >= level) { \
ESP_APPTRACE_LOG(LOG_FORMAT(_L_, format), esp_log_early_timestamp(), TAG, ##__VA_ARGS__); \
} \
} while(0)
#define ESP_APPTRACE_LOGE( format, ... ) ESP_APPTRACE_LOG_LEV(E, ESP_LOG_ERROR, format, ##__VA_ARGS__)
#define ESP_APPTRACE_LOGW( format, ... ) ESP_APPTRACE_LOG_LEV(W, ESP_LOG_WARN, format, ##__VA_ARGS__)
#define ESP_APPTRACE_LOGI( format, ... ) ESP_APPTRACE_LOG_LEV(I, ESP_LOG_INFO, format, ##__VA_ARGS__)
#define ESP_APPTRACE_LOGD( format, ... ) ESP_APPTRACE_LOG_LEV(D, ESP_LOG_DEBUG, format, ##__VA_ARGS__)
#define ESP_APPTRACE_LOGV( format, ... ) ESP_APPTRACE_LOG_LEV(V, ESP_LOG_VERBOSE, format, ##__VA_ARGS__)
#define ESP_APPTRACE_LOGO( format, ... ) ESP_APPTRACE_LOG_LEV(E, ESP_LOG_NONE, format, ##__VA_ARGS__)
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -12,7 +12,7 @@ extern "C" {
#include <stdarg.h>
#include "esp_err.h"
#include "SEGGER_RTT.h" // SEGGER_RTT_ESP32_Flush
#include "SEGGER_RTT.h" // SEGGER_RTT_ESP_Flush
#include "esp_app_trace_util.h" // ESP_APPTRACE_TMO_INFINITE
/**
@ -24,7 +24,7 @@ extern "C" {
*/
static inline esp_err_t esp_sysview_flush(uint32_t tmo)
{
SEGGER_RTT_ESP32_Flush(0, tmo);
SEGGER_RTT_ESP_Flush(0, tmo);
return ESP_OK;
}

Wyświetl plik

@ -3,17 +3,17 @@ archive: libapp_trace.a
entries:
app_trace (noflash)
app_trace_util (noflash)
if SYSVIEW_ENABLE = y:
if APPTRACE_SV_ENABLE = y:
SEGGER_SYSVIEW (noflash)
SEGGER_RTT_esp32 (noflash)
SEGGER_RTT_esp (noflash)
SEGGER_SYSVIEW_Config_FreeRTOS (noflash)
SEGGER_SYSVIEW_FreeRTOS (noflash)
[mapping:app_trace_driver]
archive: libdriver.a
entries:
if SYSVIEW_TS_SOURCE_TIMER_00 = y || SYSVIEW_TS_SOURCE_TIMER_01 = y
|| SYSVIEW_TS_SOURCE_TIMER_10 = y || SYSVIEW_TS_SOURCE_TIMER_11 = y:
if APPTRACE_SV_TS_SOURCE_TIMER_00 = y || APPTRACE_SV_TS_SOURCE_TIMER_01 = y
|| APPTRACE_SV_TS_SOURCE_TIMER_10 = y || APPTRACE_SV_TS_SOURCE_TIMER_11 = y:
timer (noflash)
else:
* (default)

Wyświetl plik

@ -0,0 +1,43 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ESP_APP_TRACE_PORT_H_
#define ESP_APP_TRACE_PORT_H_
#include "esp_app_trace_util.h"
#ifdef __cplusplus
extern "C" {
#endif
/** Apptrace HW interface. */
typedef struct {
esp_err_t (*init)(void *hw_data);
uint8_t *(*get_up_buffer)(void *hw_data, uint32_t, esp_apptrace_tmo_t *);
esp_err_t (*put_up_buffer)(void *hw_data, uint8_t *, esp_apptrace_tmo_t *);
esp_err_t (*flush_up_buffer_nolock)(void *hw_data, uint32_t, esp_apptrace_tmo_t *);
esp_err_t (*flush_up_buffer)(void *hw_data, esp_apptrace_tmo_t *);
void (*down_buffer_config)(void *hw_data, uint8_t *buf, uint32_t size);
uint8_t *(*get_down_buffer)(void *hw_data, uint32_t *, esp_apptrace_tmo_t *);
esp_err_t (*put_down_buffer)(void *hw_data, uint8_t *, esp_apptrace_tmo_t *);
bool (*host_is_connected)(void *hw_data);
} esp_apptrace_hw_t;
esp_apptrace_hw_t *esp_apptrace_jtag_hw_get(void **data);
esp_apptrace_hw_t *esp_apptrace_uart_hw_get(int num, void **data);
#ifdef __cplusplus
}
#endif
#endif

Wyświetl plik

@ -0,0 +1,543 @@
//
// How It Works
// ************
// 1. Components Overview
// ======================
// Xtensa has useful feature: TRAX debug module. It allows recording program execution flow at run-time without disturbing CPU.
// Exectution flow data are written to configurable Trace RAM block. Besides accessing Trace RAM itself TRAX module also allows to read/write
// trace memory via its registers by means of JTAG, APB or ERI transactions.
// ESP32 has two Xtensa cores with separate TRAX modules on them and provides two special memory regions to be used as trace memory.
// Chip allows muxing access to those trace memory blocks in such a way that while one block is accessed by CPUs another one can be accessed by host
// by means of reading/writing TRAX registers via JTAG. Blocks muxing is configurable at run-time and allows switching trace memory blocks between
// accessors in round-robin fashion so they can read/write separate memory blocks without disturbing each other.
// This module implements application tracing feature based on above mechanisms. It allows to transfer arbitrary user data to/from
// host via JTAG with minimal impact on system performance. This module is implied to be used in the following tracing scheme.
// ------>------ ----- (host components) -----
// | | | |
// ------------------- ----------------------- ----------------------- ---------------- ------ --------- -----------------
// |trace data source|-->|target tracing module|<--->|TRAX_MEM0 | TRAX_MEM1|---->|TRAX_DATA_REGS|<-->|JTAG|<--->|OpenOCD|-->|trace data sink|
// ------------------- ----------------------- ----------------------- ---------------- ------ --------- -----------------
// | | | |
// | ------<------ ---------------- |
// |<------------------------------------------->|TRAX_CTRL_REGS|<---->|
// ----------------
// In general tracing goes in the following way. User application requests tracing module to send some data by calling esp_apptrace_buffer_get(),
// module allocates necessary buffer in current input trace block. Then user fills received buffer with data and calls esp_apptrace_buffer_put().
// When current input trace block is filled with app data it is exposed to host and the second block becomes input one and buffer filling restarts.
// While target application fills one TRAX block host reads another one via JTAG.
// This module also allows communication in the opposite direction: from host to target. As it was said ESP32 and host can access different TRAX blocks
// simultaneously, so while target writes trace data to one block host can write its own data (e.g. tracing commands) to another one then when
// blocks are switched host receives trace data and target receives data written by host application. Target user application can read host data
// by calling esp_apptrace_read() API.
// To control buffer switching and for other communication purposes this implementation uses some TRAX registers. It is safe since HW TRAX tracing
// can not be used along with application tracing feature so these registers are freely readable/writeable via JTAG from host and via ERI from ESP32 cores.
// Overhead of this implementation on target CPU is produced only by allocating/managing buffers and copying of data.
// On the host side special OpenOCD command must be used to read trace data.
// 2. TRAX Registers layout
// ========================
// This module uses two TRAX HW registers to communicate with host SW (OpenOCD).
// - Control register uses TRAX_DELAYCNT as storage. Only lower 24 bits of TRAX_DELAYCNT are writable. Control register has the following bitfields:
// | 31..XXXXXX..24 | 23 .(host_connect). 23| 22..(block_id)..15 | 14..(block_len)..0 |
// 14..0 bits - actual length of user data in trace memory block. Target updates it every time it fills memory block and exposes it to host.
// Host writes zero to this field when it finishes reading exposed block;
// 21..15 bits - trace memory block transfer ID. Block counter. It can overflow. Updated by target, host should not modify it. Actually can be 2 bits;
// 22 bit - 'host data present' flag. If set to one there is data from host, otherwise - no host data;
// 23 bit - 'host connected' flag. If zero then host is not connected and tracing module works in post-mortem mode, otherwise in streaming mode;
// - Status register uses TRAX_TRIGGERPC as storage. If this register is not zero then current CPU is changing TRAX registers and
// this register holds address of the instruction which application will execute when it finishes with those registers modifications.
// See 'Targets Connection' setion for details.
// 3. Modes of operation
// =====================
// This module supports two modes of operation:
// - Post-mortem mode. This is the default mode. In this mode application tracing module does not check whether host has read all the data from block
// exposed to it and switches block in any case. The mode does not need host interaction for operation and so can be useful when only the latest
// trace data are necessary, e.g. for analyzing crashes. On panic the latest data from current input block are exposed to host and host can read them.
// It can happen that system panic occurs when there are very small amount of data which are not exposed to host yet (e.g. crash just after the
// TRAX block switch). In this case the previous 16KB of collected data will be dropped and host will see the latest, but very small piece of trace.
// It can be insufficient to diagnose the problem. To avoid such situations there is menuconfig option
// CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH
// which controls the threshold for flushing data in case of panic.
// - Streaming mode. Tracing module enters this mode when host connects to target and sets respective bits in control registers (per core).
// In this mode before switching the block tracing module waits for the host to read all the data from the previously exposed block.
// On panic tracing module also waits (timeout is configured via menuconfig via CONFIG_APPTRACE_ONPANIC_HOST_FLUSH_TMO) for the host to read all data.
// 4. Communication Protocol
// =========================
// 4.1 Trace Memory Blocks
// -----------------------
// Communication is controlled via special register. Host periodically polls control register on each core to find out if there are any data available.
// When current input memory block is filled it is exposed to host and 'block_len' and 'block_id' fields are updated in the control register.
// Host reads new register value and according to it's value starts reading data from exposed block. Meanwhile target starts filling another trace block.
// When host finishes reading the block it clears 'block_len' field in control register indicating to the target that it is ready to accept the next one.
// If the host has some data to transfer to the target it writes them to trace memory block before clearing 'block_len' field. Then it sets
// 'host_data_present' bit and clears 'block_len' field in control register. Upon every block switch target checks 'host_data_present' bit and if it is set
// reads them to down buffer before writing any trace data to switched TRAX block.
// 4.2 User Data Chunks Level
// --------------------------
// Since trace memory block is shared between user data chunks and data copying is performed on behalf of the API user (in its normal context) in
// multithreading environment it can happen that task/ISR which copies data is preempted by another high prio task/ISR. So it is possible situation
// that task/ISR will fail to complete filling its data chunk before the whole trace block is exposed to the host. To handle such conditions tracing
// module prepends all user data chunks with header which contains allocated buffer size and actual data length within it. OpenOCD command
// which reads application traces reports error when it reads incomplete user data block.
// Data which are transffered from host to target are also prepended with a header. Down channel data header is simple and consists of one two bytes field
// containing length of host data following the header.
// 4.3 Data Buffering
// ------------------
// It takes some time for the host to read TRAX memory block via JTAG. In streaming mode it can happen that target has filled its TRAX block, but host
// has not completed reading of the previous one yet. So in this case time critical tracing calls (which can not be delayed for too long time due to
// the lack of free memory in TRAX block) can be dropped. To avoid such scenarios tracing module implements data buffering. Buffered data will be sent
// to the host later when TRAX block switch occurs. The maximum size of the buffered data is controlled by menuconfig option
// CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX.
// 4.4 Target Connection/Disconnection
// -----------------------------------
// When host is going to start tracing in streaming mode it needs to put both ESP32 cores into initial state when 'host connected' bit is set
// on both cores. To accomplish this host halts both cores and sets this bit in TRAX registers. But target code can be halted in state when it has read control
// register but has not updated its value. To handle such situations target code indicates to the host that it is updating control register by writing
// non-zero value to status register. Actually it writes address of the instruction which it will execute when it finishes with
// the registers update. When target is halted during control register update host sets breakpoint at the address from status register and resumes CPU.
// After target code finishes with register update it is halted on breakpoint, host detects it and safely sets 'host connected' bit. When both cores
// are set up they are resumed. Tracing starts without further intrusion into CPUs work.
// When host is going to stop tracing in streaming mode it needs to disconnect targets. Disconnection process is done using the same algorithm
// as for connecting, but 'host connected' bits are cleared on ESP32 cores.
// 5. Module Access Synchronization
// ================================
// Access to internal module's data is synchronized with custom mutex. Mutex is a wrapper for portMUX_TYPE and uses almost the same sync mechanism as in
// vPortCPUAcquireMutex/vPortCPUReleaseMutex. The mechanism uses S32C1I Xtensa instruction to implement exclusive access to module's data from tasks and
// ISRs running on both cores. Also custom mutex allows specifying timeout for locking operation. Locking routine checks underlaying mutex in cycle until
// it gets its ownership or timeout expires. The differences of application tracing module's mutex implementation from vPortCPUAcquireMutex/vPortCPUReleaseMutex are:
// - Support for timeouts.
// - Local IRQs for CPU which owns the mutex are disabled till the call to unlocking routine. This is made to avoid possible task's prio inversion.
// When low prio task takes mutex and enables local IRQs gets preempted by high prio task which in its turn can try to acquire mutex using infinite timeout.
// So no local task switch occurs when mutex is locked. But this does not apply to tasks on another CPU.
// WARNING: Priority inversion can happen when low prio task works on one CPU and medium and high prio tasks work on another.
// WARNING: Care must be taken when selecting timeout values for trace calls from ISRs. Tracing module does not care about watchdogs when waiting
// on internal locks and for host to complete previous block reading, so if timeout value exceeds watchdog's one it can lead to the system reboot.
// 6. Timeouts
// ===========
// Timeout mechanism is based on xthal_get_ccount() routine and supports timeout values in microseconds.
// There are two situations when task/ISR can be delayed by tracing API call. Timeout mechanism takes into account both conditions:
// - Trace data are locked by another task/ISR. When wating on trace data lock.
// - Current TRAX memory input block is full when working in streaming mode (host is connected). When waiting for host to complete previous block reading.
// When wating for any of above conditions xthal_get_ccount() is called periodically to calculate time elapsed from trace API routine entry. When elapsed
// time exceeds specified timeout value operation is canceled and ESP_ERR_TIMEOUT code is returned.
#include "sdkconfig.h"
#include "soc/soc.h"
#include "soc/dport_access.h"
#if CONFIG_IDF_TARGET_ESP32
#include "soc/dport_reg.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "soc/sensitive_reg.h"
#endif
#include "eri.h"
#include "trax.h"
#include "esp_log.h"
#include "esp_app_trace_membufs_proto.h"
#include "esp_app_trace_port.h"
// TODO: move these (and same definitions in trax.c to dport_reg.h)
#if CONFIG_IDF_TARGET_ESP32
#define TRACEMEM_MUX_PROBLK0_APPBLK1 0
#define TRACEMEM_MUX_BLK0_ONLY 1
#define TRACEMEM_MUX_BLK1_ONLY 2
#define TRACEMEM_MUX_PROBLK1_APPBLK0 3
#elif CONFIG_IDF_TARGET_ESP32S2
#define TRACEMEM_MUX_BLK0_NUM 19
#define TRACEMEM_MUX_BLK1_NUM 20
#define TRACEMEM_BLK_NUM2ADDR(_n_) (0x3FFB8000UL + 0x4000UL*((_n_)-4))
#endif
// TRAX is disabled, so we use its registers for our own purposes
// | 31..XXXXXX..24 | 23 .(host_connect). 23 | 22 .(host_data). 22| 21..(block_id)..15 | 14..(block_len)..0 |
#define ESP_APPTRACE_TRAX_CTRL_REG ERI_TRAX_DELAYCNT
#define ESP_APPTRACE_TRAX_STAT_REG ERI_TRAX_TRIGGERPC
#define ESP_APPTRACE_TRAX_BLOCK_LEN_MSK 0x7FFFUL
#define ESP_APPTRACE_TRAX_BLOCK_LEN(_l_) ((_l_) & ESP_APPTRACE_TRAX_BLOCK_LEN_MSK)
#define ESP_APPTRACE_TRAX_BLOCK_LEN_GET(_v_) ((_v_) & ESP_APPTRACE_TRAX_BLOCK_LEN_MSK)
#define ESP_APPTRACE_TRAX_BLOCK_ID_MSK 0x7FUL
#define ESP_APPTRACE_TRAX_BLOCK_ID(_id_) (((_id_) & ESP_APPTRACE_TRAX_BLOCK_ID_MSK) << 15)
#define ESP_APPTRACE_TRAX_BLOCK_ID_GET(_v_) (((_v_) >> 15) & ESP_APPTRACE_TRAX_BLOCK_ID_MSK)
#define ESP_APPTRACE_TRAX_HOST_DATA (1 << 22)
#define ESP_APPTRACE_TRAX_HOST_CONNECT (1 << 23)
#define ESP_APPTRACE_TRAX_INITED(_hw_) ((_hw_)->inited & (1 << cpu_hal_get_core_id()))
#if CONFIG_IDF_TARGET_ESP32
static uint8_t * const s_trax_blocks[] = {
(uint8_t *) 0x3FFFC000,
(uint8_t *) 0x3FFF8000
};
#elif CONFIG_IDF_TARGET_ESP32S2
static uint8_t * const s_trax_blocks[] = {
(uint8_t *)TRACEMEM_BLK_NUM2ADDR(TRACEMEM_MUX_BLK0_NUM),
(uint8_t *)TRACEMEM_BLK_NUM2ADDR(TRACEMEM_MUX_BLK1_NUM)
};
#endif
#define ESP_APPTRACE_TRAX_BLOCK_SIZE (0x4000UL)
/** TRAX HW transport data */
typedef struct {
uint8_t inited;
#if CONFIG_APPTRACE_LOCK_ENABLE
esp_apptrace_lock_t lock; // sync lock
#endif
esp_apptrace_membufs_proto_data_t membufs;
} esp_apptrace_trax_data_t;
static esp_err_t esp_apptrace_trax_init(esp_apptrace_trax_data_t *hw_data);
static esp_err_t esp_apptrace_trax_flush(esp_apptrace_trax_data_t *hw_data, esp_apptrace_tmo_t *tmo);
static esp_err_t esp_apptrace_trax_flush_nolock(esp_apptrace_trax_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo);
static uint8_t *esp_apptrace_trax_up_buffer_get(esp_apptrace_trax_data_t *hw_data, uint32_t size, esp_apptrace_tmo_t *tmo);
static esp_err_t esp_apptrace_trax_up_buffer_put(esp_apptrace_trax_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
static void esp_apptrace_trax_down_buffer_config(esp_apptrace_trax_data_t *hw_data, uint8_t *buf, uint32_t size);
static uint8_t *esp_apptrace_trax_down_buffer_get(esp_apptrace_trax_data_t *hw_data, uint32_t *size, esp_apptrace_tmo_t *tmo);
static esp_err_t esp_apptrace_trax_down_buffer_put(esp_apptrace_trax_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
static bool esp_apptrace_trax_host_is_connected(esp_apptrace_trax_data_t *hw_data);
static esp_err_t esp_apptrace_trax_buffer_swap_start(uint32_t curr_block_id);
static esp_err_t esp_apptrace_trax_buffer_swap(uint32_t new_block_id);
static esp_err_t esp_apptrace_trax_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len);
static bool esp_apptrace_trax_host_data_pending(void);
const static char *TAG = "esp_apptrace";
esp_apptrace_hw_t *esp_apptrace_uart_hw_get(int num, void **data)
{
return NULL;
}
esp_apptrace_hw_t *esp_apptrace_jtag_hw_get(void **data)
{
#if CONFIG_APPTRACE_DEST_JTAG
static esp_apptrace_membufs_proto_hw_t s_trax_proto_hw = {
.swap_start = esp_apptrace_trax_buffer_swap_start,
.swap = esp_apptrace_trax_buffer_swap,
.swap_end = esp_apptrace_trax_buffer_swap_end,
.host_data_pending = esp_apptrace_trax_host_data_pending,
};
static esp_apptrace_trax_data_t s_trax_hw_data = {
.membufs = {
.hw = &s_trax_proto_hw,
},
};
static esp_apptrace_hw_t s_trax_hw = {
.init = (esp_err_t (*)(void *))esp_apptrace_trax_init,
.get_up_buffer = (uint8_t *(*)(void *, uint32_t, esp_apptrace_tmo_t *))esp_apptrace_trax_up_buffer_get,
.put_up_buffer = (esp_err_t (*)(void *, uint8_t *, esp_apptrace_tmo_t *))esp_apptrace_trax_up_buffer_put,
.flush_up_buffer_nolock = (esp_err_t (*)(void *, uint32_t, esp_apptrace_tmo_t *))esp_apptrace_trax_flush_nolock,
.flush_up_buffer = (esp_err_t (*)(void *, esp_apptrace_tmo_t *))esp_apptrace_trax_flush,
.down_buffer_config = (void (*)(void *, uint8_t *, uint32_t ))esp_apptrace_trax_down_buffer_config,
.get_down_buffer = (uint8_t *(*)(void *, uint32_t *, esp_apptrace_tmo_t *))esp_apptrace_trax_down_buffer_get,
.put_down_buffer = (esp_err_t (*)(void *, uint8_t *, esp_apptrace_tmo_t *))esp_apptrace_trax_down_buffer_put,
.host_is_connected = (bool (*)(void *))esp_apptrace_trax_host_is_connected,
};
*data = &s_trax_hw_data;
return &s_trax_hw;
#else
return NULL;
#endif
}
static esp_err_t esp_apptrace_trax_lock(esp_apptrace_trax_data_t *hw_data, esp_apptrace_tmo_t *tmo)
{
#if CONFIG_APPTRACE_LOCK_ENABLE
esp_err_t ret = esp_apptrace_lock_take(&hw_data->lock, tmo);
if (ret != ESP_OK) {
return ESP_FAIL;
}
#endif
return ESP_OK;
}
static esp_err_t esp_apptrace_trax_unlock(esp_apptrace_trax_data_t *hw_data)
{
esp_err_t ret = ESP_OK;
#if CONFIG_APPTRACE_LOCK_ENABLE
ret = esp_apptrace_lock_give(&hw_data->lock);
#endif
return ret;
}
static inline void esp_apptrace_trax_hw_init(void)
{
// Stop trace, if any (on the current CPU)
eri_write(ERI_TRAX_TRAXCTRL, TRAXCTRL_TRSTP);
eri_write(ERI_TRAX_TRAXCTRL, TRAXCTRL_TMEN);
eri_write(ESP_APPTRACE_TRAX_CTRL_REG, ESP_APPTRACE_TRAX_BLOCK_ID(0));
// this is for OpenOCD to let him know where stub entries vector is resided
// must be read by host before any transfer using TRAX
eri_write(ESP_APPTRACE_TRAX_STAT_REG, 0);
ESP_APPTRACE_LOGI("Initialized TRAX on CPU%d", cpu_hal_get_core_id());
}
static inline void esp_apptrace_trax_select_memory_block(int block_num)
{
// select memory block to be exposed to the TRAX module (accessed by host)
#if CONFIG_IDF_TARGET_ESP32
DPORT_WRITE_PERI_REG(DPORT_TRACEMEM_MUX_MODE_REG, block_num ? TRACEMEM_MUX_BLK0_ONLY : TRACEMEM_MUX_BLK1_ONLY);
#elif CONFIG_IDF_TARGET_ESP32S2
WRITE_PERI_REG(DPORT_PMS_OCCUPY_3_REG, block_num ? BIT(TRACEMEM_MUX_BLK0_NUM-4) : BIT(TRACEMEM_MUX_BLK1_NUM-4));
#endif
}
static inline void esp_apptrace_trax_memory_enable(void)
{
#if CONFIG_IDF_TARGET_ESP32
/* Enable trace memory on PRO CPU */
DPORT_WRITE_PERI_REG(DPORT_PRO_TRACEMEM_ENA_REG, DPORT_PRO_TRACEMEM_ENA_M);
#if CONFIG_FREERTOS_UNICORE == 0
/* Enable trace memory on APP CPU */
DPORT_WRITE_PERI_REG(DPORT_APP_TRACEMEM_ENA_REG, DPORT_APP_TRACEMEM_ENA_M);
#endif
#endif
}
/*****************************************************************************************/
/***************************** Apptrace HW iface *****************************************/
/*****************************************************************************************/
static esp_err_t esp_apptrace_trax_init(esp_apptrace_trax_data_t *hw_data)
{
int core_id = cpu_hal_get_core_id();
// 'esp_apptrace_trax_init()' is called on every core, so ensure to do main initialization only once
if (core_id == 0) {
esp_apptrace_mem_block_t mem_blocks_cfg[2] = {
{
.start = s_trax_blocks[0],
.sz = ESP_APPTRACE_TRAX_BLOCK_SIZE
},
{
.start = s_trax_blocks[1],
.sz = ESP_APPTRACE_TRAX_BLOCK_SIZE
},
};
esp_err_t res = esp_apptrace_membufs_init(&hw_data->membufs, mem_blocks_cfg);
if (res != ESP_OK) {
ESP_APPTRACE_LOGE("Failed to init membufs proto (%d)!", res);
return res;
}
#if CONFIG_APPTRACE_LOCK_ENABLE
esp_apptrace_lock_init(&hw_data->lock);
#endif
esp_apptrace_trax_memory_enable();
esp_apptrace_trax_select_memory_block(0);
}
// init TRAX on this CPU
esp_apptrace_trax_hw_init();
hw_data->inited |= 1 << core_id;
return ESP_OK;
}
static uint8_t *esp_apptrace_trax_up_buffer_get(esp_apptrace_trax_data_t *hw_data, uint32_t size, esp_apptrace_tmo_t *tmo)
{
uint8_t *ptr;
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return NULL;
}
esp_err_t res = esp_apptrace_trax_lock(hw_data, tmo);
if (res != ESP_OK) {
return NULL;
}
ptr = esp_apptrace_membufs_up_buffer_get(&hw_data->membufs, size, tmo);
// now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
if (esp_apptrace_trax_unlock(hw_data) != ESP_OK) {
assert(false && "Failed to unlock apptrace data!");
}
return ptr;
}
static esp_err_t esp_apptrace_trax_up_buffer_put(esp_apptrace_trax_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
{
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return ESP_ERR_INVALID_STATE;
}
// Can avoid locking because esp_apptrace_membufs_up_buffer_put() just modifies buffer's header
esp_err_t res = esp_apptrace_membufs_up_buffer_put(&hw_data->membufs, ptr, tmo);
return res;
}
static void esp_apptrace_trax_down_buffer_config(esp_apptrace_trax_data_t *hw_data, uint8_t *buf, uint32_t size)
{
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return;
}
esp_apptrace_membufs_down_buffer_config(&hw_data->membufs, buf, size);
}
static uint8_t *esp_apptrace_trax_down_buffer_get(esp_apptrace_trax_data_t *hw_data, uint32_t *size, esp_apptrace_tmo_t *tmo)
{
uint8_t *ptr;
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return NULL;
}
esp_err_t res = esp_apptrace_trax_lock(hw_data, tmo);
if (res != ESP_OK) {
return NULL;
}
ptr = esp_apptrace_membufs_down_buffer_get(&hw_data->membufs, size, tmo);
// now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
if (esp_apptrace_trax_unlock(hw_data) != ESP_OK) {
assert(false && "Failed to unlock apptrace data!");
}
return ptr;
}
static esp_err_t esp_apptrace_trax_down_buffer_put(esp_apptrace_trax_data_t *hw_data, uint8_t *ptr, esp_apptrace_tmo_t *tmo)
{
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return ESP_ERR_INVALID_STATE;
}
// Can avoid locking because esp_apptrace_membufs_down_buffer_put() does nothing
/*esp_err_t res = esp_apptrace_trax_lock(hw_data, tmo);
if (res != ESP_OK) {
return res;
}*/
esp_err_t res = esp_apptrace_membufs_down_buffer_put(&hw_data->membufs, ptr, tmo);
// now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
/*if (esp_apptrace_trax_unlock(hw_data) != ESP_OK) {
assert(false && "Failed to unlock apptrace data!");
}*/
return res;
}
static bool esp_apptrace_trax_host_is_connected(esp_apptrace_trax_data_t *hw_data)
{
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return false;
}
return eri_read(ESP_APPTRACE_TRAX_CTRL_REG) & ESP_APPTRACE_TRAX_HOST_CONNECT ? true : false;
}
static esp_err_t esp_apptrace_trax_flush_nolock(esp_apptrace_trax_data_t *hw_data, uint32_t min_sz, esp_apptrace_tmo_t *tmo)
{
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return ESP_ERR_INVALID_STATE;
}
return esp_apptrace_membufs_flush_nolock(&hw_data->membufs, min_sz, tmo);
}
static esp_err_t esp_apptrace_trax_flush(esp_apptrace_trax_data_t *hw_data, esp_apptrace_tmo_t *tmo)
{
if (!ESP_APPTRACE_TRAX_INITED(hw_data)) {
return ESP_ERR_INVALID_STATE;
}
esp_err_t res = esp_apptrace_trax_lock(hw_data, tmo);
if (res != ESP_OK) {
return res;
}
res = esp_apptrace_membufs_flush_nolock(&hw_data->membufs, 0, tmo);
// now we can safely unlock apptrace to allow other tasks/ISRs to get other buffers and write their data
if (esp_apptrace_trax_unlock(hw_data) != ESP_OK) {
assert(false && "Failed to unlock apptrace data!");
}
return res;
}
/*****************************************************************************************/
/************************** Membufs proto HW iface ***************************************/
/*****************************************************************************************/
static inline void esp_apptrace_trax_buffer_swap_lock(void)
{
extern uint32_t __esp_apptrace_trax_eri_updated;
// indicate to host that we are about to update.
// this is used only to place CPU into streaming mode at tracing startup
// before starting streaming host can halt us after we read ESP_APPTRACE_TRAX_CTRL_REG and before we updated it
// HACK: in this case host will set breakpoint just after ESP_APPTRACE_TRAX_CTRL_REG update,
// here we set address to set bp at
// enter ERI update critical section
eri_write(ESP_APPTRACE_TRAX_STAT_REG, (uint32_t)&__esp_apptrace_trax_eri_updated);
}
static __attribute__((noinline)) void esp_apptrace_trax_buffer_swap_unlock(void)
{
// exit ERI update critical section
eri_write(ESP_APPTRACE_TRAX_STAT_REG, 0x0);
// TODO: currently host sets breakpoint, use break instruction to stop;
// it will allow to use ESP_APPTRACE_TRAX_STAT_REG for other purposes
asm volatile (
" .global __esp_apptrace_trax_eri_updated\n"
"__esp_apptrace_trax_eri_updated:\n"); // host will set bp here to resolve collision at streaming start
}
static esp_err_t esp_apptrace_trax_buffer_swap_start(uint32_t curr_block_id)
{
esp_err_t res = ESP_OK;
esp_apptrace_trax_buffer_swap_lock();
uint32_t ctrl_reg = eri_read(ESP_APPTRACE_TRAX_CTRL_REG);
uint32_t host_connected = ESP_APPTRACE_TRAX_HOST_CONNECT & ctrl_reg;
if (host_connected) {
uint32_t acked_block = ESP_APPTRACE_TRAX_BLOCK_ID_GET(ctrl_reg);
uint32_t host_to_read = ESP_APPTRACE_TRAX_BLOCK_LEN_GET(ctrl_reg);
if (host_to_read != 0 || acked_block != (curr_block_id & ESP_APPTRACE_TRAX_BLOCK_ID_MSK)) {
ESP_APPTRACE_LOGD("HC[%d]: Can not switch %x %d %x %x/%lx", cpu_hal_get_core_id(), ctrl_reg, host_to_read, acked_block,
curr_block_id & ESP_APPTRACE_TRAX_BLOCK_ID_MSK, curr_block_id);
res = ESP_ERR_NO_MEM;
goto _on_err;
}
}
return ESP_OK;
_on_err:
esp_apptrace_trax_buffer_swap_unlock();
return res;
}
static esp_err_t esp_apptrace_trax_buffer_swap_end(uint32_t new_block_id, uint32_t prev_block_len)
{
uint32_t ctrl_reg = eri_read(ESP_APPTRACE_TRAX_CTRL_REG);
uint32_t host_connected = ESP_APPTRACE_TRAX_HOST_CONNECT & ctrl_reg;
eri_write(ESP_APPTRACE_TRAX_CTRL_REG, ESP_APPTRACE_TRAX_BLOCK_ID(new_block_id) |
host_connected | ESP_APPTRACE_TRAX_BLOCK_LEN(prev_block_len));
esp_apptrace_trax_buffer_swap_unlock();
return ESP_OK;
}
static esp_err_t esp_apptrace_trax_buffer_swap(uint32_t new_block_id)
{
esp_apptrace_trax_select_memory_block(new_block_id);
return ESP_OK;
}
static bool esp_apptrace_trax_host_data_pending(void)
{
uint32_t ctrl_reg = eri_read(ESP_APPTRACE_TRAX_CTRL_REG);
return (ctrl_reg & ESP_APPTRACE_TRAX_HOST_DATA) ? true : false;
}

Wyświetl plik

@ -0,0 +1,70 @@
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ESP_APP_TRACE_MEMBUFS_PROTO_H_
#define ESP_APP_TRACE_MEMBUFS_PROTO_H_
#include "esp_app_trace_util.h"
#ifdef __cplusplus
extern "C" {
#endif
/** TRAX HW transport state */
typedef struct {
uint32_t in_block; // input block ID
// TODO: change to uint16_t
uint32_t markers[2]; // block filling level markers
} esp_apptrace_membufs_state_t;
/** memory block parameters,
* should be packed, because it is read from the host */
typedef struct {
uint8_t *start; // start address
uint32_t sz; // size
} esp_apptrace_mem_block_t;
typedef struct {
esp_err_t (*swap_start)(uint32_t curr_block_id);
esp_err_t (*swap)(uint32_t new_block_id);
esp_err_t (*swap_end)(uint32_t new_block_id, uint32_t prev_block_len);
bool (*host_data_pending)(void);
} esp_apptrace_membufs_proto_hw_t;
typedef struct {
esp_apptrace_membufs_proto_hw_t * hw;
volatile esp_apptrace_membufs_state_t state; // state
esp_apptrace_mem_block_t blocks[2]; // memory blocks
#if CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX > 0
// ring buffer control struct for pending user blocks
esp_apptrace_rb_t rb_pend;
// storage for pending user blocks
uint8_t pending_data[CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX + 1];
#endif
// ring buffer control struct for data from host (down buffer)
esp_apptrace_rb_t rb_down;
} esp_apptrace_membufs_proto_data_t;
esp_err_t esp_apptrace_membufs_init(esp_apptrace_membufs_proto_data_t *proto, const esp_apptrace_mem_block_t blocks_cfg[2]);
void esp_apptrace_membufs_down_buffer_config(esp_apptrace_membufs_proto_data_t *data, uint8_t *buf, uint32_t size);
uint8_t *esp_apptrace_membufs_down_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t *size, esp_apptrace_tmo_t *tmo);
esp_err_t esp_apptrace_membufs_down_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
uint8_t *esp_apptrace_membufs_up_buffer_get(esp_apptrace_membufs_proto_data_t *proto, uint32_t size, esp_apptrace_tmo_t *tmo);
esp_err_t esp_apptrace_membufs_up_buffer_put(esp_apptrace_membufs_proto_data_t *proto, uint8_t *ptr, esp_apptrace_tmo_t *tmo);
esp_err_t esp_apptrace_membufs_flush_nolock(esp_apptrace_membufs_proto_data_t *proto, uint32_t min_sz, esp_apptrace_tmo_t *tmo);
#ifdef __cplusplus
}
#endif
#endif

Wyświetl plik

@ -3,10 +3,34 @@
CONFIG_ESP32_APPTRACE_DESTINATION CONFIG_APPTRACE_DESTINATION
CONFIG_ESP32_APPTRACE_DEST_NONE CONFIG_APPTRACE_DEST_NONE
CONFIG_ESP32_APPTRACE_DEST_TRAX CONFIG_APPTRACE_DEST_TRAX
CONFIG_ESP32_APPTRACE_DEST_TRAX CONFIG_APPTRACE_DEST_JTAG
CONFIG_ESP32_APPTRACE_ENABLE CONFIG_APPTRACE_ENABLE
CONFIG_ESP32_APPTRACE_LOCK_ENABLE CONFIG_APPTRACE_LOCK_ENABLE
CONFIG_ESP32_APPTRACE_ONPANIC_HOST_FLUSH_TMO CONFIG_APPTRACE_ONPANIC_HOST_FLUSH_TMO
CONFIG_ESP32_APPTRACE_POSTMORTEM_FLUSH_TRAX_THRESH CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH
CONFIG_ESP32_APPTRACE_PENDING_DATA_SIZE_MAX CONFIG_APPTRACE_PENDING_DATA_SIZE_MAX
CONFIG_ESP32_GCOV_ENABLE CONFIG_APPTRACE_GCOV_ENABLE
CONFIG_SYSVIEW_ENABLE CONFIG_APPTRACE_SV_ENABLE
CONFIG_SYSVIEW_TS_SOURCE CONFIG_APPTRACE_SV_TS_SOURCE
CONFIG_SYSVIEW_TS_SOURCE_CCOUNT CONFIG_APPTRACE_SV_TS_SOURCE_CCOUNT
CONFIG_SYSVIEW_TS_SOURCE_TIMER_00 CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_00
CONFIG_SYSVIEW_TS_SOURCE_TIMER_01 CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_01
CONFIG_SYSVIEW_TS_SOURCE_TIMER_10 CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_10
CONFIG_SYSVIEW_TS_SOURCE_TIMER_11 CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_11
CONFIG_SYSVIEW_TS_SOURCE_ESP_TIMER CONFIG_APPTRACE_SV_TS_SOURCE_ESP_TIMER
CONFIG_SYSVIEW_MAX_TASKS CONFIG_APPTRACE_SV_MAX_TASKS
CONFIG_SYSVIEW_BUF_WAIT_TMO CONFIG_APPTRACE_SV_BUF_WAIT_TMO
CONFIG_SYSVIEW_EVT_OVERFLOW_ENABLE CONFIG_APPTRACE_SV_EVT_OVERFLOW_ENABLE
CONFIG_SYSVIEW_EVT_ISR_ENTER_ENABLE CONFIG_APPTRACE_SV_EVT_ISR_ENTER_ENABLE
CONFIG_SYSVIEW_EVT_ISR_EXIT_ENABLE CONFIG_APPTRACE_SV_EVT_ISR_EXIT_ENABLE
CONFIG_SYSVIEW_EVT_ISR_TO_SCHEDULER_ENABLE CONFIG_APPTRACE_SV_EVT_ISR_TO_SCHED_ENABLE
CONFIG_SYSVIEW_EVT_TASK_START_EXEC_ENABLE CONFIG_APPTRACE_SV_EVT_TASK_START_EXEC_ENABLE
CONFIG_SYSVIEW_EVT_TASK_STOP_EXEC_ENABLE CONFIG_APPTRACE_SV_EVT_TASK_STOP_EXEC_ENABLE
CONFIG_SYSVIEW_EVT_TASK_START_READY_ENABLE CONFIG_APPTRACE_SV_EVT_TASK_START_READY_ENABLE
CONFIG_SYSVIEW_EVT_TASK_STOP_READY_ENABLE CONFIG_APPTRACE_SV_EVT_TASK_STOP_READY_ENABLE
CONFIG_SYSVIEW_EVT_TASK_CREATE_ENABLE CONFIG_APPTRACE_SV_EVT_TASK_CREATE_ENABLE
CONFIG_SYSVIEW_EVT_TASK_TERMINATE_ENABLE CONFIG_APPTRACE_SV_EVT_TASK_TERMINATE_ENABLE
CONFIG_SYSVIEW_EVT_IDLE_ENABLE CONFIG_APPTRACE_SV_EVT_IDLE_ENABLE
CONFIG_SYSVIEW_EVT_TIMER_ENTER_ENABLE CONFIG_APPTRACE_SV_EVT_TIMER_ENTER_ENABLE
CONFIG_SYSVIEW_EVT_TIMER_EXIT_ENABLE CONFIG_APPTRACE_SV_EVT_TIMER_EXIT_ENABLE

Wyświetl plik

@ -159,8 +159,8 @@ unsigned SEGGER_RTT_WriteNoLock (unsigned BufferIndex, const voi
unsigned SEGGER_RTT_WriteSkipNoLock (unsigned BufferIndex, const void* pBuffer, unsigned NumBytes);
unsigned SEGGER_RTT_WriteString (unsigned BufferIndex, const char* s);
void SEGGER_RTT_WriteWithOverwriteNoLock(unsigned BufferIndex, const void* pBuffer, unsigned NumBytes);
void SEGGER_RTT_ESP32_FlushNoLock (unsigned long min_sz, unsigned long tmo);
void SEGGER_RTT_ESP32_Flush (unsigned long min_sz, unsigned long tmo);
void SEGGER_RTT_ESP_FlushNoLock (unsigned long min_sz, unsigned long tmo);
void SEGGER_RTT_ESP_Flush (unsigned long min_sz, unsigned long tmo);
//
// Function macro for performance optimization
//

Wyświetl plik

@ -91,9 +91,9 @@ extern const SEGGER_SYSVIEW_OS_API SYSVIEW_X_OS_TraceAPI;
#define SYSVIEW_DEVICE_NAME CONFIG_IDF_TARGET
// Determine which timer to use as timestamp source
#if CONFIG_SYSVIEW_TS_SOURCE_CCOUNT
#if CONFIG_APPTRACE_SV_TS_SOURCE_CCOUNT
#define TS_USE_CCOUNT 1
#elif CONFIG_SYSVIEW_TS_SOURCE_ESP_TIMER
#elif CONFIG_APPTRACE_SV_TS_SOURCE_ESP_TIMER
#define TS_USE_ESP_TIMER 1
#else
#define TS_USE_TIMERGROUP 1
@ -109,13 +109,13 @@ extern const SEGGER_SYSVIEW_OS_API SYSVIEW_X_OS_TraceAPI;
#define SYSVIEW_TIMESTAMP_FREQ (esp_clk_apb_freq() / SYSVIEW_TIMER_DIV)
// Timer ID and group ID
#if defined(CONFIG_SYSVIEW_TS_SOURCE_TIMER_00) || defined(CONFIG_SYSVIEW_TS_SOURCE_TIMER_01)
#if defined(CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_00) || defined(CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_01)
#define TS_TIMER_ID 0
#else
#define TS_TIMER_ID 1
#endif // TIMER_00 || TIMER_01
#if defined(CONFIG_SYSVIEW_TS_SOURCE_TIMER_00) || defined(CONFIG_SYSVIEW_TS_SOURCE_TIMER_10)
#if defined(CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_00) || defined(CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_10)
#define TS_TIMER_GROUP 0
#else
#define TS_TIMER_GROUP 1
@ -213,43 +213,43 @@ void SEGGER_SYSVIEW_Conf(void) {
&SYSVIEW_X_OS_TraceAPI, _cbSendSystemDesc);
SEGGER_SYSVIEW_SetRAMBase(SYSVIEW_RAM_BASE);
#if !CONFIG_SYSVIEW_EVT_OVERFLOW_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_OVERFLOW_ENABLE
disable_evts |= SYSVIEW_EVTMASK_OVERFLOW;
#endif
#if !CONFIG_SYSVIEW_EVT_ISR_ENTER_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_ISR_ENTER_ENABLE
disable_evts |= SYSVIEW_EVTMASK_ISR_ENTER;
#endif
#if !CONFIG_SYSVIEW_EVT_ISR_EXIT_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_ISR_EXIT_ENABLE
disable_evts |= SYSVIEW_EVTMASK_ISR_EXIT;
#endif
#if !CONFIG_SYSVIEW_EVT_TASK_START_EXEC_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TASK_START_EXEC_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TASK_START_EXEC;
#endif
#if !CONFIG_SYSVIEW_EVT_TASK_STOP_EXEC_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TASK_STOP_EXEC_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TASK_STOP_EXEC;
#endif
#if !CONFIG_SYSVIEW_EVT_TASK_START_READY_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TASK_START_READY_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TASK_START_READY;
#endif
#if !CONFIG_SYSVIEW_EVT_TASK_STOP_READY_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TASK_STOP_READY_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TASK_STOP_READY;
#endif
#if !CONFIG_SYSVIEW_EVT_TASK_CREATE_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TASK_CREATE_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TASK_CREATE;
#endif
#if !CONFIG_SYSVIEW_EVT_TASK_TERMINATE_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TASK_TERMINATE_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TASK_TERMINATE;
#endif
#if !CONFIG_SYSVIEW_EVT_IDLE_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_IDLE_ENABLE
disable_evts |= SYSVIEW_EVTMASK_IDLE;
#endif
#if !CONFIG_SYSVIEW_EVT_ISR_TO_SCHEDULER_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_ISR_TO_SCHED_ENABLE
disable_evts |= SYSVIEW_EVTMASK_ISR_TO_SCHEDULER;
#endif
#if !CONFIG_SYSVIEW_EVT_TIMER_ENTER_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TIMER_ENTER_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TIMER_ENTER;
#endif
#if !CONFIG_SYSVIEW_EVT_TIMER_EXIT_ENABLE
#if !CONFIG_APPTRACE_SV_EVT_TIMER_EXIT_ENABLE
disable_evts |= SYSVIEW_EVTMASK_TIMER_EXIT;
#endif
SEGGER_SYSVIEW_DisableEvents(disable_evts);

Wyświetl plik

@ -80,7 +80,7 @@ Notes:
#define portSTACK_GROWTH ( -1 )
#endif
#define SYSVIEW_FREERTOS_MAX_NOF_TASKS CONFIG_SYSVIEW_MAX_TASKS
#define SYSVIEW_FREERTOS_MAX_NOF_TASKS CONFIG_APPTRACE_SV_MAX_TASKS
/*********************************************************************
*
@ -290,12 +290,12 @@ Notes:
#define traceTASK_SWITCHED_IN() if(prvGetTCBFromHandle(NULL) == xTaskGetIdleTaskHandle()) { \
SEGGER_SYSVIEW_OnIdle(); \
} else { \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[xPortGetCoreID()]); \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[cpu_hal_get_core_id()]); \
}
#else
#define traceTASK_SWITCHED_IN() { \
if (memcmp(pxCurrentTCB[xPortGetCoreID()]->pcTaskName, "IDLE", 5) != 0) { \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[xPortGetCoreID()]); \
if (memcmp(pxCurrentTCB[cpu_hal_get_core_id()]->pcTaskName, "IDLE", 5) != 0) { \
SEGGER_SYSVIEW_OnTaskStartExec((U32)pxCurrentTCB[cpu_hal_get_core_id()]); \
} else { \
SEGGER_SYSVIEW_OnIdle(); \
} \
@ -305,14 +305,15 @@ Notes:
#define traceMOVED_TASK_TO_READY_STATE(pxTCB) SEGGER_SYSVIEW_OnTaskStartReady((U32)pxTCB)
#define traceREADDED_TASK_TO_READY_STATE(pxTCB)
#define traceMOVED_TASK_TO_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[xPortGetCoreID()], (1u << 2))
#define traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[xPortGetCoreID()], (1u << 2))
#define traceMOVED_TASK_TO_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[cpu_hal_get_core_id()], (1u << 2))
#define traceMOVED_TASK_TO_OVERFLOW_DELAYED_LIST() SEGGER_SYSVIEW_OnTaskStopReady((U32)pxCurrentTCB[cpu_hal_get_core_id()], (1u << 2))
#define traceMOVED_TASK_TO_SUSPENDED_LIST(pxTCB) SEGGER_SYSVIEW_OnTaskStopReady((U32)pxTCB, ((3u << 3) | 3))
#define traceISR_EXIT_TO_SCHEDULER() SEGGER_SYSVIEW_RecordExitISRToScheduler()
#define traceISR_EXIT() SEGGER_SYSVIEW_RecordExitISR()
#define traceISR_ENTER(_n_) SEGGER_SYSVIEW_RecordEnterISR(_n_)
/*********************************************************************
*
* API functions

Wyświetl plik

@ -20,10 +20,10 @@ const static char *TAG = "segger_rtt";
// size of down channel data buf
#define SYSVIEW_DOWN_BUF_SIZE 32
#define SEGGER_STOP_WAIT_TMO 1000000 //us
#if CONFIG_SYSVIEW_BUF_WAIT_TMO == -1
#if CONFIG_APPTRACE_SV_BUF_WAIT_TMO == -1
#define SEGGER_HOST_WAIT_TMO ESP_APPTRACE_TMO_INFINITE
#else
#define SEGGER_HOST_WAIT_TMO CONFIG_SYSVIEW_BUF_WAIT_TMO
#define SEGGER_HOST_WAIT_TMO CONFIG_APPTRACE_SV_BUF_WAIT_TMO
#endif
static uint8_t s_events_buf[SYSVIEW_EVENTS_BUF_SZ];
@ -39,7 +39,7 @@ static uint8_t s_down_buf[SYSVIEW_DOWN_BUF_SIZE];
/*********************************************************************
*
* SEGGER_RTT_ESP32_FlushNoLock()
* SEGGER_RTT_ESP_FlushNoLock()
*
* Function description
* Flushes buffered events.
@ -51,7 +51,7 @@ static uint8_t s_down_buf[SYSVIEW_DOWN_BUF_SIZE];
* Return value
* None.
*/
void SEGGER_RTT_ESP32_FlushNoLock(unsigned long min_sz, unsigned long tmo)
void SEGGER_RTT_ESP_FlushNoLock(unsigned long min_sz, unsigned long tmo)
{
esp_err_t res;
if (s_events_buf_filled > 0) {
@ -70,7 +70,7 @@ void SEGGER_RTT_ESP32_FlushNoLock(unsigned long min_sz, unsigned long tmo)
/*********************************************************************
*
* SEGGER_RTT_ESP32_Flush()
* SEGGER_RTT_ESP_Flush()
*
* Function description
* Flushes buffered events.
@ -82,10 +82,10 @@ void SEGGER_RTT_ESP32_FlushNoLock(unsigned long min_sz, unsigned long tmo)
* Return value
* None.
*/
void SEGGER_RTT_ESP32_Flush(unsigned long min_sz, unsigned long tmo)
void SEGGER_RTT_ESP_Flush(unsigned long min_sz, unsigned long tmo)
{
SEGGER_SYSVIEW_LOCK();
SEGGER_RTT_ESP32_FlushNoLock(min_sz, tmo);
SEGGER_RTT_ESP_FlushNoLock(min_sz, tmo);
SEGGER_SYSVIEW_UNLOCK();
}
@ -147,7 +147,7 @@ unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, u
ESP_LOGE(TAG, "Too large event %u bytes!", NumBytes);
return 0;
}
if (xPortGetCoreID()) { // dual core specific code
if (cpu_hal_get_core_id()) { // dual core specific code
// use the highest - 1 bit of event ID to indicate core ID
// the highest bit can not be used due to event ID encoding method
// this reduces supported ID range to [0..63] (for 1 byte IDs) plus [128..16383] (for 2 bytes IDs)
@ -167,7 +167,7 @@ unsigned SEGGER_RTT_WriteSkipNoLock(unsigned BufferIndex, const void* pBuffer, u
memcpy(&s_events_buf[s_events_buf_filled], pBuffer, NumBytes);
s_events_buf_filled += NumBytes;
if (event_id == SYSVIEW_EVTID_TRACE_STOP) {
SEGGER_RTT_ESP32_FlushNoLock(0, SEGGER_STOP_WAIT_TMO);
SEGGER_RTT_ESP_FlushNoLock(0, SEGGER_STOP_WAIT_TMO);
}
return NumBytes;
}

Wyświetl plik

@ -52,7 +52,7 @@ esp_err_t esp_sysview_heap_trace_start(uint32_t tmo)
esp_err_t esp_sysview_heap_trace_stop(void)
{
ESP_EARLY_LOGV(TAG, "%s", __func__);
SEGGER_RTT_ESP32_Flush(0, ESP_APPTRACE_TMO_INFINITE);
SEGGER_RTT_ESP_Flush(0, ESP_APPTRACE_TMO_INFINITE);
return ESP_OK;
}

Wyświetl plik

@ -88,7 +88,7 @@ static void esp_apptrace_test_timer_init(int timer_group, int timer_idx, uint32_
timer_enable_intr(timer_group, timer_idx);
}
#if CONFIG_SYSVIEW_ENABLE == 0
#if CONFIG_APPTRACE_SV_ENABLE == 0
#define ESP_APPTRACE_TEST_WRITE(_b_, _s_) esp_apptrace_write(ESP_APPTRACE_DEST_TRAX, _b_, _s_, ESP_APPTRACE_TMO_INFINITE)
#define ESP_APPTRACE_TEST_WRITE_FROM_ISR(_b_, _s_) esp_apptrace_write(ESP_APPTRACE_DEST_TRAX, _b_, _s_, 0UL)
#define ESP_APPTRACE_TEST_WRITE_NOWAIT(_b_, _s_) esp_apptrace_write(ESP_APPTRACE_DEST_TRAX, _b_, _s_, 0)
@ -214,7 +214,7 @@ static void esp_apptrace_dummy_task(void *p)
i = 0;
while (!arg->stop) {
ESP_APPTRACE_TEST_LOGD("%x: dummy task work %d.%d", xTaskGetCurrentTaskHandle(), xPortGetCoreID(), i++);
ESP_APPTRACE_TEST_LOGD("%x: dummy task work %d.%d", xTaskGetCurrentTaskHandle(), cpu_hal_get_core_id(), i++);
if (tmo_ticks) {
vTaskDelay(tmo_ticks);
}
@ -259,7 +259,7 @@ static void esp_apptrace_test_task(void *p)
ESP_APPTRACE_TEST_LOGE("Failed to timer_isr_register (%d)!", res);
goto on_fail;
}
*(uint32_t *)arg->timers[i].data.buf = ((uint32_t)inth[i]) | (1 << 31) | (xPortGetCoreID() ? 0x1 : 0);
*(uint32_t *)arg->timers[i].data.buf = ((uint32_t)inth[i]) | (1 << 31) | (cpu_hal_get_core_id() ? 0x1 : 0);
ESP_APPTRACE_TEST_LOGI("%x: start timer %x period %u us", xTaskGetCurrentTaskHandle(), inth[i], arg->timers[i].data.period);
res = timer_start(arg->timers[i].group, arg->timers[i].id);
if (res != ESP_OK) {
@ -269,7 +269,7 @@ static void esp_apptrace_test_task(void *p)
}
}
*(uint32_t *)arg->data.buf = (uint32_t)xTaskGetCurrentTaskHandle() | (xPortGetCoreID() ? 0x1 : 0);
*(uint32_t *)arg->data.buf = (uint32_t)xTaskGetCurrentTaskHandle() | (cpu_hal_get_core_id() ? 0x1 : 0);
arg->data.wr_cnt = 0;
arg->data.wr_err = 0;
while (!arg->stop) {
@ -749,7 +749,7 @@ static void esp_logtrace_task(void *p)
ESP_LOGI(TAG, "%p: sample print 4 %c", xTaskGetCurrentTaskHandle(), ((i & 0xFF) % 95) + 32);
ESP_LOGI(TAG, "%p: sample print 5 %f", xTaskGetCurrentTaskHandle(), 1.0);
ESP_LOGI(TAG, "%p: sample print 6 %f", xTaskGetCurrentTaskHandle(), 3.45);
ESP_LOGI(TAG, "%p: logtrace task work %d.%d", xTaskGetCurrentTaskHandle(), xPortGetCoreID(), i);
ESP_LOGI(TAG, "%p: logtrace task work %d.%d", xTaskGetCurrentTaskHandle(), cpu_hal_get_core_id(), i);
if (++i == 10000) {
break;
}

Wyświetl plik

@ -107,10 +107,6 @@ static uint32_t non_iram_int_mask[SOC_CPU_CORES_NUM];
static uint32_t non_iram_int_disabled[SOC_CPU_CORES_NUM];
static bool non_iram_int_disabled_flag[SOC_CPU_CORES_NUM];
#if CONFIG_SYSVIEW_ENABLE
extern uint32_t port_switch_flag[];
#endif
static portMUX_TYPE spinlock = portMUX_INITIALIZER_UNLOCKED;
//Inserts an item into vector_desc list so that the list is sorted
@ -415,16 +411,12 @@ static void IRAM_ATTR shared_intr_isr(void *arg)
while(sh_vec) {
if (!sh_vec->disabled) {
if ((sh_vec->statusreg == NULL) || (*sh_vec->statusreg & sh_vec->statusmask)) {
#if CONFIG_SYSVIEW_ENABLE
traceISR_ENTER(sh_vec->source+ETS_INTERNAL_INTR_SOURCE_OFF);
#endif
sh_vec->isr(sh_vec->arg);
#if CONFIG_SYSVIEW_ENABLE
// check if we will return to scheduler or to interrupted task after ISR
if (!port_switch_flag[cpu_hal_get_core_id()]) {
if (!os_task_switch_is_pended(cpu_hal_get_core_id())) {
traceISR_EXIT();
}
#endif
}
}
sh_vec=sh_vec->next;
@ -432,18 +424,18 @@ static void IRAM_ATTR shared_intr_isr(void *arg)
portEXIT_CRITICAL_ISR(&spinlock);
}
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
//Common non-shared isr handler wrapper.
static void IRAM_ATTR non_shared_intr_isr(void *arg)
{
non_shared_isr_arg_t *ns_isr_arg=(non_shared_isr_arg_t*)arg;
portENTER_CRITICAL_ISR(&spinlock);
traceISR_ENTER(ns_isr_arg->source+ETS_INTERNAL_INTR_SOURCE_OFF);
// FIXME: can we call ISR and check port_switch_flag after releasing spinlock?
// when CONFIG_SYSVIEW_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
// FIXME: can we call ISR and check os_task_switch_is_pended() after releasing spinlock?
// when CONFIG_APPTRACE_SV_ENABLE = 0 ISRs for non-shared IRQs are called without spinlock
ns_isr_arg->isr(ns_isr_arg->isr_arg);
// check if we will return to scheduler or to interrupted task after ISR
if (!port_switch_flag[cpu_hal_get_core_id()]) {
if (!os_task_switch_is_pended(cpu_hal_get_core_id())) {
traceISR_EXIT();
}
portEXIT_CRITICAL_ISR(&spinlock);
@ -539,7 +531,7 @@ esp_err_t esp_intr_alloc_intrstatus(int source, int flags, uint32_t intrstatusre
//Mark as unusable for other interrupt sources. This is ours now!
vd->flags=VECDESC_FL_NONSHARED;
if (handler) {
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
non_shared_isr_arg_t *ns_isr_arg=malloc(sizeof(non_shared_isr_arg_t));
if (!ns_isr_arg) {
portEXIT_CRITICAL(&spinlock);
@ -688,7 +680,7 @@ esp_err_t esp_intr_free(intr_handle_t handle)
if ((handle->vector_desc->flags&VECDESC_FL_NONSHARED) || free_shared_vector) {
ESP_EARLY_LOGV(TAG, "esp_intr_free: Disabling int, killing handler");
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
if (!free_shared_vector) {
void *isr_arg = interrupt_controller_hal_get_int_handler_arg(handle->vector_desc->intno);
if (isr_arg) {

Wyświetl plik

@ -40,7 +40,7 @@
#if CONFIG_APPTRACE_ENABLE
#include "esp_app_trace.h"
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
#include "SEGGER_RTT.h"
#endif
@ -266,8 +266,8 @@ void esp_panic_handler(panic_info_t *info)
panic_print_str(" and returning...\r\n");
disable_all_wdts();
#if CONFIG_APPTRACE_ENABLE
#if CONFIG_SYSVIEW_ENABLE
SEGGER_RTT_ESP32_FlushNoLock(CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH, APPTRACE_ONPANIC_HOST_FLUSH_TMO);
#if CONFIG_APPTRACE_SV_ENABLE
SEGGER_RTT_ESP_FlushNoLock(CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH, APPTRACE_ONPANIC_HOST_FLUSH_TMO);
#else
esp_apptrace_flush_nolock(ESP_APPTRACE_DEST_TRAX, CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH,
APPTRACE_ONPANIC_HOST_FLUSH_TMO);
@ -307,8 +307,8 @@ void esp_panic_handler(panic_info_t *info)
#if CONFIG_APPTRACE_ENABLE
disable_all_wdts();
#if CONFIG_SYSVIEW_ENABLE
SEGGER_RTT_ESP32_FlushNoLock(CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH, APPTRACE_ONPANIC_HOST_FLUSH_TMO);
#if CONFIG_APPTRACE_SV_ENABLE
SEGGER_RTT_ESP_FlushNoLock(CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH, APPTRACE_ONPANIC_HOST_FLUSH_TMO);
#else
esp_apptrace_flush_nolock(ESP_APPTRACE_DEST_TRAX, CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH,
APPTRACE_ONPANIC_HOST_FLUSH_TMO);
@ -379,8 +379,8 @@ void __attribute__((noreturn, no_sanitize_undefined)) panic_abort(const char *de
s_panic_abort_details = (char *) details;
#if CONFIG_APPTRACE_ENABLE
#if CONFIG_SYSVIEW_ENABLE
SEGGER_RTT_ESP32_FlushNoLock(CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH, APPTRACE_ONPANIC_HOST_FLUSH_TMO);
#if CONFIG_APPTRACE_SV_ENABLE
SEGGER_RTT_ESP_FlushNoLock(CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH, APPTRACE_ONPANIC_HOST_FLUSH_TMO);
#else
esp_apptrace_flush_nolock(ESP_APPTRACE_DEST_TRAX, CONFIG_APPTRACE_POSTMORTEM_FLUSH_THRESH,
APPTRACE_ONPANIC_HOST_FLUSH_TMO);

Wyświetl plik

@ -297,19 +297,6 @@ static void do_core_init(void)
esp_err_t err __attribute__((unused));
// [refactor-todo] move this to secondary init
#if CONFIG_APPTRACE_ENABLE
err = esp_apptrace_init();
assert(err == ESP_OK && "Failed to init apptrace module on PRO CPU!");
#endif
#if CONFIG_SYSVIEW_ENABLE
SEGGER_SYSVIEW_Conf();
#endif
#if CONFIG_ESP_DEBUG_STUBS_ENABLE
esp_dbg_stubs_init();
#endif
err = esp_pthread_init();
assert(err == ESP_OK && "Failed to init pthread module!");
@ -450,6 +437,18 @@ IRAM_ATTR ESP_SYSTEM_INIT_FN(init_components0, BIT(0))
esp_sleep_enable_gpio_switch(true);
#endif
#if CONFIG_APPTRACE_ENABLE
esp_err_t err = esp_apptrace_init();
assert(err == ESP_OK && "Failed to init apptrace module on PRO CPU!");
#endif
#if CONFIG_APPTRACE_SV_ENABLE
SEGGER_SYSVIEW_Conf();
#endif
#if CONFIG_ESP_DEBUG_STUBS_ENABLE
esp_dbg_stubs_init();
#endif
#if defined(CONFIG_PM_ENABLE)
esp_pm_impl_init();
#endif

Wyświetl plik

@ -42,7 +42,7 @@ else() # RISC-V
port/riscv
.)
set(required_components esp_timer)
set(required_components app_trace esp_timer)
endif()
list(APPEND srcs
@ -61,7 +61,7 @@ if(CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY)
list(APPEND srcs "port/xtensa/xtensa_loadstore_handler.S")
endif()
# app_trace is required by FreeRTOS headers only when CONFIG_SYSVIEW_ENABLE=y,
# app_trace is required by FreeRTOS headers only when CONFIG_APPTRACE_SV_ENABLE=y,
# but requirements can't depend on config options, so always require it.
idf_component_register(SRCS "${srcs}"
INCLUDE_DIRS ${include_dirs}

Wyświetl plik

@ -154,4 +154,13 @@ int xt_clock_freq(void) __attribute__((deprecated));
#define configISR_STACK_SIZE ((CONFIG_FREERTOS_ISR_STACKSIZE + configSTACK_ALIGNMENT - 1) & (~(configSTACK_ALIGNMENT - 1)))
#endif
#ifndef __ASSEMBLER__
#if CONFIG_APPTRACE_SV_ENABLE
extern uint32_t port_switch_flag[];
#define os_task_switch_is_pended(_cpu_) (port_switch_flag[_cpu_])
#else
#define os_task_switch_is_pended(_cpu_) (false)
#endif
#endif
#endif // FREERTOS_CONFIG_XTENSA_H

Wyświetl plik

@ -143,7 +143,7 @@ xt_handler xt_set_interrupt_handler(int n, xt_handler f, void * arg)
return ((old == &xt_unhandled_interrupt) ? 0 : old);
}
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
void * xt_get_interrupt_handler_arg(int n)
{
xt_handler_table_entry * entry;

Wyświetl plik

@ -326,10 +326,10 @@ Another useful IDF feature built on top of application tracing library is the sy
How To Use It
"""""""""""""
Support for this feature is enabled by *Component config > Application Level Tracing > FreeRTOS SystemView Tracing* (:ref:`CONFIG_SYSVIEW_ENABLE`) menuconfig option. There are several other options enabled under the same menu:
Support for this feature is enabled by *Component config > Application Level Tracing > FreeRTOS SystemView Tracing* (:ref:`CONFIG_APPTRACE_SV_ENABLE`) menuconfig option. There are several other options enabled under the same menu:
1. {IDF_TARGET_NAME} timer to use as SystemView timestamp source: (:ref:`CONFIG_SYSVIEW_TS_SOURCE`) selects the source of timestamps for SystemView events. In single core mode timestamps are generated using {IDF_TARGET_NAME} internal cycle counter running at maximum 240 Mhz (~4 ns granularity). In dual-core mode external timer working at 40 Mhz is used, so timestamp granularity is 25 ns.
2. Individually enabled or disabled collection of SystemView events (``CONFIG_SYSVIEW_EVT_XXX``):
1. {IDF_TARGET_NAME} timer to use as SystemView timestamp source: (:ref:`CONFIG_APPTRACE_SV_TS_SOURCE`) selects the source of timestamps for SystemView events. In single core mode timestamps are generated using {IDF_TARGET_NAME} internal cycle counter running at maximum 240 Mhz (~4 ns granularity). In dual-core mode external timer working at 40 Mhz is used, so timestamp granularity is 25 ns.
2. Individually enabled or disabled collection of SystemView events (``CONFIG_APPTRACE_SV_EVT_XXX``):
- Trace Buffer Overflow Event
- ISR Enter Event

Wyświetl plik

@ -273,7 +273,7 @@ Once you've identified the code which you think is leaking:
- In the project configuration menu, navigate to ``Component settings`` -> ``Heap Memory Debugging`` -> :ref:`CONFIG_HEAP_TRACING_DEST` and select ``Host-Based``.
- In the project configuration menu, navigate to ``Component settings`` -> ``Application Level Tracing`` -> :ref:`CONFIG_APPTRACE_DESTINATION` and select ``Trace memory``.
- In the project configuration menu, navigate to ``Component settings`` -> ``Application Level Tracing`` -> ``FreeRTOS SystemView Tracing`` and enable :ref:`CONFIG_SYSVIEW_ENABLE`.
- In the project configuration menu, navigate to ``Component settings`` -> ``Application Level Tracing`` -> ``FreeRTOS SystemView Tracing`` and enable :ref:`CONFIG_APPTRACE_SV_ENABLE`.
- Call the function :cpp:func:`heap_trace_init_tohost` early in the program, to initialize JTAG heap tracing module.
- Call the function :cpp:func:`heap_trace_start` to begin recording all mallocs/frees in the system. Call this immediately before the piece of code which you suspect is leaking memory.
In host-based mode argument to this function is ignored and heap tracing module behaves like ``HEAP_TRACE_ALL`` was passed: all allocations and deallocations are sent to the host.

Wyświetl plik

@ -326,10 +326,10 @@ IDF 中另一个基于应用层跟踪库的实用功能是系统级跟踪,它
如何使用
""""""""
若需使用这个功能,需要在 menuconfig 中开启 :ref:`CONFIG_SYSVIEW_ENABLE` 选项,具体路径为: *Component config > Application Level Tracing > FreeRTOS SystemView Tracing* 。在同一个菜单栏下还开启了其他几个选项:
若需使用这个功能,需要在 menuconfig 中开启 :ref:`CONFIG_APPTRACE_SV_ENABLE` 选项,具体路径为: *Component config > Application Level Tracing > FreeRTOS SystemView Tracing* 。在同一个菜单栏下还开启了其他几个选项:
1. *{IDF_TARGET_NAME} timer to use as SystemView timestamp source* :ref:`CONFIG_SYSVIEW_TS_SOURCE`)选择 SystemView 事件使用的时间戳来源。在单核模式下,使用 {IDF_TARGET_NAME} 内部的循环计数器生成时间戳,其最大的工作频率是 240 MHz时间戳粒度大约为 4 ns。在双核模式下使用工作在 40 MHz 的外部定时器,因此时间戳粒度为 25 ns。
2. 可以单独启用或禁用的 SystemView 事件集合(``CONFIG_SYSVIEW_EVT_XXX``
1. *{IDF_TARGET_NAME} timer to use as SystemView timestamp source* :ref:`CONFIG_APPTRACE_SV_TS_SOURCE`)选择 SystemView 事件使用的时间戳来源。在单核模式下,使用 {IDF_TARGET_NAME} 内部的循环计数器生成时间戳,其最大的工作频率是 240 MHz时间戳粒度大约为 4 ns。在双核模式下使用工作在 40 MHz 的外部定时器,因此时间戳粒度为 25 ns。
2. 可以单独启用或禁用的 SystemView 事件集合(``CONFIG_APPTRACE_SV_EVT_XXX``
- Trace Buffer Overflow Event
- ISR Enter Event

Wyświetl plik

@ -1,5 +1,5 @@
# Enable application tracing by default
CONFIG_APPTRACE_DEST_TRAX=y
CONFIG_APPTRACE_DEST_JTAG=y
CONFIG_APPTRACE_ENABLE=y
# Disable WiFi stack by default
CONFIG_WIFI_ENABLED=n

Wyświetl plik

@ -1,4 +1,4 @@
CONFIG_APPTRACE_DEST_TRAX=y
CONFIG_APPTRACE_DEST_JTAG=y
# CONFIG_APPTRACE_DEST_NONE is not set
CONFIG_APPTRACE_ENABLE=y
CONFIG_APPTRACE_LOCK_ENABLE=y

Wyświetl plik

@ -26,7 +26,7 @@ typedef struct {
} example_event_data_t;
#if CONFIG_SYSVIEW_ENABLE
#if CONFIG_APPTRACE_SV_ENABLE
#if !CONFIG_USE_CUSTOM_EVENT_ID
#define SYSVIEW_EXAMPLE_SEND_EVENT_ID 0
@ -165,13 +165,13 @@ void app_main(void)
},
#if CONFIG_FREERTOS_UNICORE == 0
{
.group = TIMER_GROUP_1,
.timer = TIMER_1,
.group = TIMER_GROUP_0,
.timer = TIMER_0,
},
#endif
};
#if CONFIG_SYSVIEW_ENABLE && CONFIG_USE_CUSTOM_EVENT_ID
#if CONFIG_APPTRACE_SV_ENABLE && CONFIG_USE_CUSTOM_EVENT_ID
// Currently OpenOCD does not support requesting module info from target. So do the following...
// Wait untill SystemView module receives START command from host,
// after that data can be sent to the host using onboard API,
@ -183,7 +183,7 @@ void app_main(void)
#endif
example_timer_init(TIMER_GROUP_1, TIMER_0, 2000);
example_timer_init(TIMER_GROUP_1, TIMER_1, 4000);
example_timer_init(TIMER_GROUP_0, TIMER_0, 4000);
xTaskCreatePinnedToCore(example_task, "svtrace0", 2048, &event_data[0], 3, &event_data[0].thnd, 0);
ESP_LOGI(TAG, "Created task %p", event_data[0].thnd);

Wyświetl plik

@ -4,21 +4,21 @@ CONFIG_FREERTOS_UNICORE=y
# 1ms tick period
CONFIG_FREERTOS_HZ=1000
# Enable application tracing by default
CONFIG_APPTRACE_DEST_TRAX=y
CONFIG_APPTRACE_DEST_JTAG=y
CONFIG_APPTRACE_ENABLE=y
# Enable FreeRTOS SystemView Tracing by default
CONFIG_SYSVIEW_ENABLE=y
CONFIG_SYSVIEW_TS_SOURCE_TIMER_00=y
CONFIG_SYSVIEW_EVT_OVERFLOW_ENABLE=y
CONFIG_SYSVIEW_EVT_ISR_ENTER_ENABLE=y
CONFIG_SYSVIEW_EVT_ISR_EXIT_ENABLE=y
CONFIG_SYSVIEW_EVT_ISR_TO_SCHEDULER_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_START_EXEC_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_STOP_EXEC_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_START_READY_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_STOP_READY_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_CREATE_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_TERMINATE_ENABLE=y
CONFIG_SYSVIEW_EVT_IDLE_ENABLE=y
CONFIG_SYSVIEW_EVT_TIMER_ENTER_ENABLE=y
CONFIG_SYSVIEW_EVT_TIMER_EXIT_ENABLE=y
CONFIG_APPTRACE_SV_ENABLE=y
CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_00=y
CONFIG_APPTRACE_SV_EVT_OVERFLOW_ENABLE=y
CONFIG_APPTRACE_SV_EVT_ISR_ENTER_ENABLE=y
CONFIG_APPTRACE_SV_EVT_ISR_EXIT_ENABLE=y
CONFIG_APPTRACE_SV_EVT_ISR_TO_SCHED_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_START_EXEC_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_STOP_EXEC_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_START_READY_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_STOP_READY_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_CREATE_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_TERMINATE_ENABLE=y
CONFIG_APPTRACE_SV_EVT_IDLE_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TIMER_ENTER_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TIMER_EXIT_ENABLE=y

Wyświetl plik

@ -4,24 +4,25 @@ CONFIG_FREERTOS_UNICORE=y
# 1ms tick period
CONFIG_FREERTOS_HZ=1000
# Enable application tracing by default
CONFIG_APPTRACE_DEST_TRAX=y
CONFIG_APPTRACE_DEST_JTAG=y
CONFIG_APPTRACE_MEMBUFS_APPTRACE_PROTO_ENABLE=y
CONFIG_APPTRACE_ENABLE=y
# Enable FreeRTOS SystemView Tracing by default
CONFIG_SYSVIEW_ENABLE=y
CONFIG_SYSVIEW_TS_SOURCE_TIMER_00=y
CONFIG_SYSVIEW_EVT_OVERFLOW_ENABLE=y
CONFIG_SYSVIEW_EVT_ISR_ENTER_ENABLE=y
CONFIG_SYSVIEW_EVT_ISR_EXIT_ENABLE=y
CONFIG_SYSVIEW_EVT_ISR_TO_SCHEDULER_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_START_EXEC_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_STOP_EXEC_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_START_READY_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_STOP_READY_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_CREATE_ENABLE=y
CONFIG_SYSVIEW_EVT_TASK_TERMINATE_ENABLE=y
CONFIG_SYSVIEW_EVT_IDLE_ENABLE=y
CONFIG_SYSVIEW_EVT_TIMER_ENTER_ENABLE=y
CONFIG_SYSVIEW_EVT_TIMER_EXIT_ENABLE=y
CONFIG_APPTRACE_SV_ENABLE=y
CONFIG_APPTRACE_SV_TS_SOURCE_TIMER_00=y
CONFIG_APPTRACE_SV_EVT_OVERFLOW_ENABLE=y
CONFIG_APPTRACE_SV_EVT_ISR_ENTER_ENABLE=y
CONFIG_APPTRACE_SV_EVT_ISR_EXIT_ENABLE=y
CONFIG_APPTRACE_SV_EVT_ISR_TO_SCHED_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_START_EXEC_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_STOP_EXEC_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_START_READY_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_STOP_READY_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_CREATE_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TASK_TERMINATE_ENABLE=y
CONFIG_APPTRACE_SV_EVT_IDLE_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TIMER_ENTER_ENABLE=y
CONFIG_APPTRACE_SV_EVT_TIMER_EXIT_ENABLE=y
# Disable color output in logs
CONFIG_LOG_COLORS=n
# Enable heap tracing to host