kopia lustrzana https://github.com/espressif/esp-idf
esp_common: IPC refactor
- esp_ipc_call_and_wait() can work simultaneously on two CPUs. - This will increase the priority for ipc_task if the current task also wants to use it. - Added the ESP_IPC_USES_CALLERS_PRIORITY option to get back the old IPC behaviour.pull/4345/head
rodzic
b125bb50ea
commit
6071e2f3c7
|
@ -53,6 +53,15 @@ menu "Common ESP-related"
|
|||
It can be shrunk if you are sure that you do not use any custom
|
||||
IPC functionality.
|
||||
|
||||
config ESP_IPC_USES_CALLERS_PRIORITY
|
||||
bool "IPC runs at caller's priority"
|
||||
default y
|
||||
depends on !FREERTOS_UNICORE
|
||||
help
|
||||
If this option is not enabled then the IPC task will keep behavior
|
||||
same as prior to that of ESP-IDF v4.0, and hence IPC task will run
|
||||
at (configMAX_PRIORITIES - 1) priority.
|
||||
|
||||
config ESP_TIMER_TASK_STACK_SIZE
|
||||
int "High-resolution timer task stack size"
|
||||
default 3584
|
||||
|
|
|
@ -25,18 +25,18 @@
|
|||
#include "freertos/semphr.h"
|
||||
|
||||
static TaskHandle_t s_ipc_task_handle[portNUM_PROCESSORS];
|
||||
static SemaphoreHandle_t s_ipc_mutex; // This mutex is used as a global lock for esp_ipc_* APIs
|
||||
static SemaphoreHandle_t s_ipc_mutex[portNUM_PROCESSORS]; // This mutex is used as a global lock for esp_ipc_* APIs
|
||||
static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of ipc tasks
|
||||
static SemaphoreHandle_t s_ipc_ack; // Semaphore used to acknowledge that task was woken up,
|
||||
static SemaphoreHandle_t s_ipc_ack[portNUM_PROCESSORS]; // Semaphore used to acknowledge that task was woken up,
|
||||
// or function has finished running
|
||||
static volatile esp_ipc_func_t s_func; // Function which should be called by high priority task
|
||||
static void * volatile s_func_arg; // Argument to pass into s_func
|
||||
static volatile esp_ipc_func_t s_func[portNUM_PROCESSORS]; // Function which should be called by high priority task
|
||||
static void * volatile s_func_arg[portNUM_PROCESSORS]; // Argument to pass into s_func
|
||||
typedef enum {
|
||||
IPC_WAIT_FOR_START,
|
||||
IPC_WAIT_FOR_END
|
||||
} esp_ipc_wait_t;
|
||||
|
||||
static volatile esp_ipc_wait_t s_ipc_wait; // This variable tells high priority task when it should give
|
||||
static volatile esp_ipc_wait_t s_ipc_wait[portNUM_PROCESSORS];// This variable tells high priority task when it should give
|
||||
// s_ipc_ack semaphore: before s_func is called, or
|
||||
// after it returns
|
||||
|
||||
|
@ -53,15 +53,15 @@ static void IRAM_ATTR ipc_task(void* arg)
|
|||
abort();
|
||||
}
|
||||
|
||||
esp_ipc_func_t func = s_func;
|
||||
void* arg = s_func_arg;
|
||||
esp_ipc_func_t func = s_func[cpuid];
|
||||
void* arg = s_func_arg[cpuid];
|
||||
|
||||
if (s_ipc_wait == IPC_WAIT_FOR_START) {
|
||||
xSemaphoreGive(s_ipc_ack);
|
||||
if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_START) {
|
||||
xSemaphoreGive(s_ipc_ack[cpuid]);
|
||||
}
|
||||
(*func)(arg);
|
||||
if (s_ipc_wait == IPC_WAIT_FOR_END) {
|
||||
xSemaphoreGive(s_ipc_ack);
|
||||
if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_END) {
|
||||
xSemaphoreGive(s_ipc_ack[cpuid]);
|
||||
}
|
||||
}
|
||||
// TODO: currently this is unreachable code. Introduce esp_ipc_uninit
|
||||
|
@ -86,11 +86,11 @@ static void esp_ipc_init(void) __attribute__((constructor));
|
|||
|
||||
static void esp_ipc_init(void)
|
||||
{
|
||||
s_ipc_mutex = xSemaphoreCreateMutex();
|
||||
s_ipc_ack = xSemaphoreCreateBinary();
|
||||
char task_name[15];
|
||||
for (int i = 0; i < portNUM_PROCESSORS; ++i) {
|
||||
snprintf(task_name, sizeof(task_name), "ipc%d", i);
|
||||
s_ipc_mutex[i] = xSemaphoreCreateMutex();
|
||||
s_ipc_ack[i] = xSemaphoreCreateBinary();
|
||||
s_ipc_sem[i] = xSemaphoreCreateBinary();
|
||||
portBASE_TYPE res = xTaskCreatePinnedToCore(ipc_task, task_name, CONFIG_ESP_IPC_TASK_STACK_SIZE, (void*) i,
|
||||
configMAX_PRIORITIES - 1, &s_ipc_task_handle[i], i);
|
||||
|
@ -107,19 +107,30 @@ static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, voi
|
|||
return ESP_ERR_INVALID_STATE;
|
||||
}
|
||||
|
||||
xSemaphoreTake(s_ipc_mutex, portMAX_DELAY);
|
||||
|
||||
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
|
||||
TaskHandle_t task_handler = xTaskGetCurrentTaskHandle();
|
||||
UBaseType_t priority_of_current_task = uxTaskPriorityGet(task_handler);
|
||||
// ipc_task will work with the priority of the caller's task.
|
||||
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
|
||||
UBaseType_t priority_of_running_ipc_task = uxTaskPriorityGet(s_ipc_task_handle[cpu_id]);
|
||||
if (priority_of_running_ipc_task < priority_of_current_task) {
|
||||
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
|
||||
}
|
||||
|
||||
s_func = func;
|
||||
s_func_arg = arg;
|
||||
s_ipc_wait = wait_for;
|
||||
xSemaphoreTake(s_ipc_mutex[cpu_id], portMAX_DELAY);
|
||||
vTaskPrioritySet(s_ipc_task_handle[cpu_id], priority_of_current_task);
|
||||
#else
|
||||
xSemaphoreTake(s_ipc_mutex[0], portMAX_DELAY);
|
||||
#endif
|
||||
|
||||
s_func[cpu_id] = func;
|
||||
s_func_arg[cpu_id] = arg;
|
||||
s_ipc_wait[cpu_id] = wait_for;
|
||||
xSemaphoreGive(s_ipc_sem[cpu_id]);
|
||||
xSemaphoreTake(s_ipc_ack, portMAX_DELAY);
|
||||
xSemaphoreGive(s_ipc_mutex);
|
||||
xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
|
||||
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
|
||||
xSemaphoreGive(s_ipc_mutex[cpu_id]);
|
||||
#else
|
||||
xSemaphoreGive(s_ipc_mutex[0]);
|
||||
#endif
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
|
|
Ładowanie…
Reference in New Issue