kopia lustrzana https://github.com/espressif/esp-idf
freertos: Fix xTaskIncrementTick()
Previously, xTaskIncrementTick() would always trigger a yield (i.e., return pdTRUE) when called on core 1. This would mean core 1 would call vTaskSwitchContext() on every tick, leading to uneccesary CPU overhead. This commit fixes xTaskIncrementTick() in the following ways: - Clearly mark IDF additions in xTaskIncrementTick() - Moved esp_vApplicationTickHook() to be called direclty in xPortSysTickHandler() in order to simplify xTaskIncrementTick(). - Only core 0 calls xTaskIncrementTick() in order to simplify the function's logic. Core 0 is solely responsible for managing xTickCount and xPendedTicks - All other cores call xTaskIncrementTickOtherCores() instead which is a simplified version of xTaskIncrementTick() and handles the following: - Check if core 0 has unblocked a higher priority task to run - Check if the current core requires time slicing - Call vApplicationTickHook()pull/9446/head
rodzic
b59309852a
commit
7a4e1fee99
|
@ -3314,9 +3314,25 @@ BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
|||
* or
|
||||
* + Time slicing is in use and there is a task of equal priority to the
|
||||
* currently running task.
|
||||
*
|
||||
* Note: For SMP, this function must only be called by core 0. Other cores should
|
||||
* call xTaskIncrementTickOtherCores() instead.
|
||||
*/
|
||||
BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
/*
|
||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
|
||||
* INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS
|
||||
* AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
||||
*
|
||||
* Called from all other cores except core 0 when their tick interrupt
|
||||
* occurs. This function will check if the current core requires time slicing,
|
||||
* and also call the application tick hook.
|
||||
*/
|
||||
BaseType_t xTaskIncrementTickOtherCores( void ) PRIVILEGED_FUNCTION;
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/*
|
||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
||||
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
||||
|
|
|
@ -151,6 +151,8 @@ IRAM_ATTR void SysTickIsrHandler(void *arg)
|
|||
|
||||
#endif // CONFIG_FREERTOS_SYSTICK_USES_CCOUNT
|
||||
|
||||
|
||||
extern void esp_vApplicationTickHook(void);
|
||||
/**
|
||||
* @brief Handler of SysTick
|
||||
*
|
||||
|
@ -165,11 +167,27 @@ BaseType_t xPortSysTickHandler(void)
|
|||
portbenchmarkIntLatency();
|
||||
#endif //configBENCHMARK
|
||||
traceISR_ENTER(SYSTICK_INTR_ID);
|
||||
BaseType_t ret = xTaskIncrementTick();
|
||||
if(ret != pdFALSE) {
|
||||
|
||||
// Call IDF Tick Hook
|
||||
esp_vApplicationTickHook();
|
||||
|
||||
// Call FreeRTOS Increment tick function
|
||||
BaseType_t xSwitchRequired;
|
||||
#if CONFIG_FREERTOS_UNICORE
|
||||
xSwitchRequired = xTaskIncrementTick();
|
||||
#else
|
||||
if (xPortGetCoreID() == 0) {
|
||||
xSwitchRequired = xTaskIncrementTick();
|
||||
} else {
|
||||
xSwitchRequired = xTaskIncrementTickOtherCores();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check if yield is required
|
||||
if (xSwitchRequired != pdFALSE) {
|
||||
portYIELD_FROM_ISR();
|
||||
} else {
|
||||
traceISR_EXIT();
|
||||
}
|
||||
return ret;
|
||||
return xSwitchRequired;
|
||||
}
|
||||
|
|
|
@ -58,7 +58,6 @@
|
|||
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
|
||||
#undef _REENT_INIT_PTR
|
||||
#define _REENT_INIT_PTR esp_reent_init
|
||||
extern void esp_vApplicationTickHook(void);
|
||||
extern void esp_vApplicationIdleHook(void);
|
||||
#endif //ESP_PLATFORM
|
||||
|
||||
|
@ -3075,33 +3074,34 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||
|
||||
BaseType_t xTaskIncrementTick( void )
|
||||
{
|
||||
#ifdef ESP_PLATFORM
|
||||
#if ( configNUM_CORES > 1 )
|
||||
{
|
||||
/* Only Core 0 should ever call this function. */
|
||||
configASSERT( xPortGetCoreID() == 0 );
|
||||
}
|
||||
#endif /* ( configNUM_CORES > 1 ) */
|
||||
#endif // ESP_PLATFORM
|
||||
TCB_t * pxTCB;
|
||||
TickType_t xItemValue;
|
||||
BaseType_t xSwitchRequired = pdFALSE;
|
||||
|
||||
/* Only allow core 0 increase the tick count in the case of xPortSysTickHandler processing. */
|
||||
/* And allow core 0 and core 1 to unwind uxPendedTicks during xTaskResumeAll. */
|
||||
|
||||
if (xPortInIsrContext())
|
||||
{
|
||||
#if ( configUSE_TICK_HOOK == 1 )
|
||||
vApplicationTickHook();
|
||||
#endif /* configUSE_TICK_HOOK */
|
||||
esp_vApplicationTickHook();
|
||||
if (xPortGetCoreID() != 0 )
|
||||
{
|
||||
return pdTRUE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Called by the portable layer each time a tick interrupt occurs.
|
||||
* Increments the tick then checks to see if the new tick value will cause any
|
||||
* tasks to be unblocked. */
|
||||
traceTASK_INCREMENT_TICK( xTickCount );
|
||||
|
||||
if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
|
||||
#ifdef ESP_PLATFORM
|
||||
/* We need a critical section here as we are about to access kernel data
|
||||
* structures:
|
||||
* - Other cores could be accessing them simultaneously
|
||||
* - Unlike other ports, we call xTaskIncrementTick() without disabling nested
|
||||
* interrupts, which in turn is disabled by the critical section. */
|
||||
taskENTER_CRITICAL_ISR();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
||||
{
|
||||
taskENTER_CRITICAL_ISR();
|
||||
/* Minor optimisation. The tick count cannot change in this
|
||||
* block. */
|
||||
const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
|
||||
|
@ -3154,7 +3154,7 @@ BaseType_t xTaskIncrementTick( void )
|
|||
* state - so record the item value in
|
||||
* xNextTaskUnblockTime. */
|
||||
xNextTaskUnblockTime = xItemValue;
|
||||
break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
|
||||
break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -3187,7 +3187,14 @@ BaseType_t xTaskIncrementTick( void )
|
|||
* only be performed if the unblocked task has a
|
||||
* priority that is equal to or higher than the
|
||||
* currently executing task. */
|
||||
if( pxTCB->uxPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
|
||||
#if defined(ESP_PLATFORM) && ( configNUM_CORES > 1 )
|
||||
/* Since this function is only run on core 0, we
|
||||
* only need to switch contexts if the unblocked task
|
||||
* can run on core 0. */
|
||||
if( ( pxTCB->xCoreID == 0 || pxTCB->xCoreID == tskNO_AFFINITY ) && (pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority) )
|
||||
#else
|
||||
if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
|
||||
#endif
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
|
@ -3206,7 +3213,7 @@ BaseType_t xTaskIncrementTick( void )
|
|||
* writer has not explicitly turned time slicing off. */
|
||||
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
|
||||
{
|
||||
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[xPortGetCoreID()]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
|
||||
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ 0 ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
|
@ -3216,28 +3223,152 @@ BaseType_t xTaskIncrementTick( void )
|
|||
}
|
||||
}
|
||||
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
#if ( configUSE_TICK_HOOK == 1 )
|
||||
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
|
||||
#endif /* configUSE_TICK_HOOK */
|
||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
||||
taskEXIT_CRITICAL_ISR();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
#if ( configUSE_TICK_HOOK == 1 )
|
||||
{
|
||||
/* Guard against the tick hook being called when the pended tick
|
||||
* count is being unwound (when the scheduler is being unlocked). */
|
||||
#ifdef ESP_PLATFORM
|
||||
if( xPendedCounts == ( TickType_t ) 0 )
|
||||
#else
|
||||
if( xPendedTicks == ( TickType_t ) 0 )
|
||||
#endif
|
||||
{
|
||||
vApplicationTickHook();
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_TICK_HOOK */
|
||||
|
||||
#if ( configUSE_PREEMPTION == 1 )
|
||||
{
|
||||
if( xYieldPending[ 0 ] != pdFALSE )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_PREEMPTION */
|
||||
}
|
||||
else
|
||||
{
|
||||
++xPendedTicks;
|
||||
}
|
||||
#ifdef ESP_PLATFORM
|
||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
||||
taskEXIT_CRITICAL_ISR();
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
#if ( configUSE_PREEMPTION == 1 )
|
||||
{
|
||||
if( xYieldPending[xPortGetCoreID()] != pdFALSE )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
/* The tick hook gets called at regular intervals, even if the
|
||||
* scheduler is locked. */
|
||||
#if ( configUSE_TICK_HOOK == 1 )
|
||||
{
|
||||
vApplicationTickHook();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* configUSE_PREEMPTION */
|
||||
|
||||
return xSwitchRequired;
|
||||
}
|
||||
|
||||
#ifdef ESP_PLATFORM
|
||||
#if ( configNUM_CORES > 1 )
|
||||
BaseType_t xTaskIncrementTickOtherCores( void )
|
||||
{
|
||||
/* Minor optimization. This function can never switch cores mid
|
||||
* execution */
|
||||
BaseType_t xCoreID = xPortGetCoreID();
|
||||
BaseType_t xSwitchRequired = pdFALSE;
|
||||
/* This function should never be called by Core 0. */
|
||||
configASSERT( xCoreID != 0 );
|
||||
|
||||
/* Called by the portable layer each time a tick interrupt occurs.
|
||||
* Increments the tick then checks to see if the new tick value will cause any
|
||||
* tasks to be unblocked. */
|
||||
traceTASK_INCREMENT_TICK( xTickCount );
|
||||
|
||||
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
||||
{
|
||||
/* We need a critical section here as we are about to access kernel data
|
||||
* structures:
|
||||
* - Other cores could be accessing them simultaneously
|
||||
* - Unlike other ports, we call xTaskIncrementTick() without disabling
|
||||
* nested interrupts, which in turn is disabled by the critical
|
||||
* section. */
|
||||
taskENTER_CRITICAL_ISR();
|
||||
|
||||
/* A task being unblocked cannot cause an immediate context switch
|
||||
* if preemption is turned off. */
|
||||
#if ( configUSE_PREEMPTION == 1 )
|
||||
{
|
||||
/* Check if core 0 calling xTaskIncrementTick() has
|
||||
* unblocked a task that can be run. */
|
||||
if( uxTopReadyPriority > pxCurrentTCB[xCoreID]->uxPriority )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
} else {
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Tasks of equal priority to the currently running task will share
|
||||
* processing time (time slice) if preemption is on, and the application
|
||||
* writer has not explicitly turned time slicing off. */
|
||||
#if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
|
||||
{
|
||||
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[ xCoreID ]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
||||
|
||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
||||
taskEXIT_CRITICAL_ISR();
|
||||
|
||||
#if ( configUSE_PREEMPTION == 1 )
|
||||
{
|
||||
if( xYieldPending[ xCoreID ] != pdFALSE )
|
||||
{
|
||||
xSwitchRequired = pdTRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
mtCOVERAGE_TEST_MARKER();
|
||||
}
|
||||
}
|
||||
#endif /* configUSE_PREEMPTION */
|
||||
}
|
||||
|
||||
#if ( configUSE_TICK_HOOK == 1 )
|
||||
{
|
||||
vApplicationTickHook();
|
||||
}
|
||||
#endif
|
||||
|
||||
return xSwitchRequired;
|
||||
}
|
||||
#endif /* ( configNUM_CORES > 1 ) */
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
#if ( configUSE_APPLICATION_TASK_TAG == 1 )
|
||||
|
|
|
@ -175,7 +175,7 @@ The resulting schedule will have Task A running on CPU0 and Task C preempting Ta
|
|||
Time Slicing
|
||||
^^^^^^^^^^^^
|
||||
|
||||
The Vanilla FreeRTOS scheduler implements time slicing meaning that if current highest ready priority contains multiple ready tasks, the scheduler will switch between those tasks periodically in a round robin fashion.
|
||||
The Vanilla FreeRTOS scheduler implements time slicing meaning that if current highest ready priority contains multiple ready tasks, the scheduler will switch between those tasks periodically in a round robin fashion.
|
||||
|
||||
However, in ESP-IDF FreeRTOS, it is not possible to implement perfect Round Robin time slicing due to the fact that a particular task may not be able to run on a particular core due to the following reasons:
|
||||
|
||||
|
@ -267,7 +267,7 @@ Vanilla FreeRTOS requires that a periodic tick interrupt occurs. The tick interr
|
|||
- Checking if time slicing is required (i.e., triggering a context switch)
|
||||
- Executing the application tick hook
|
||||
|
||||
In ESP-IDF FreeRTOS, each core will receive a periodic interrupt and independently run the tick interrupt. The tick interrupts on each core are of the same period but can be out of phase. Furthermore, the tick interrupt responsibilities listed above are not run by all cores:
|
||||
In ESP-IDF FreeRTOS, each core will receive a periodic interrupt and independently run the tick interrupt. The tick interrupts on each core are of the same period but can be out of phase. However, the tick responsibilities listed above are not run by all cores:
|
||||
|
||||
- CPU0 will execute all of the tick interrupt responsibilities listed above
|
||||
- CPU1 will only check for time slicing and execute the application tick hook
|
||||
|
|
Ładowanie…
Reference in New Issue