From 52a929521df6990599a77992e8186e1437e04902 Mon Sep 17 00:00:00 2001 From: Darian Leung Date: Wed, 1 Nov 2023 00:02:58 +0800 Subject: [PATCH] refactor(freertos/idf): Update thread safety convenience macros This commit refactors some of the thread safety convenience macros by removing some repeated definitions and keeping them all in "freertos_idf_additions_priv.h" --- components/freertos/FreeRTOS-Kernel/queue.c | 70 ++------- components/freertos/FreeRTOS-Kernel/tasks.c | 138 +++++------------- .../freertos_tasks_c_additions.h | 8 +- .../esp_private/freertos_idf_additions_priv.h | 122 ++++++++++++---- 4 files changed, 142 insertions(+), 196 deletions(-) diff --git a/components/freertos/FreeRTOS-Kernel/queue.c b/components/freertos/FreeRTOS-Kernel/queue.c index 32e787a26d..185dd47aef 100644 --- a/components/freertos/FreeRTOS-Kernel/queue.c +++ b/components/freertos/FreeRTOS-Kernel/queue.c @@ -54,56 +54,6 @@ * correct privileged Vs unprivileged linkage and placement. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ -/* Some code sections require extra critical sections when building for SMP - * ( configNUMBER_OF_CORES > 1 ). */ -#if ( configNUMBER_OF_CORES > 1 ) -/* Macros that Enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) - - static inline __attribute__( ( always_inline ) ) - void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskENTER_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskENTER_CRITICAL_ISR( pxLock ); - } - } - - static inline __attribute__( ( always_inline ) ) - void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskEXIT_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskEXIT_CRITICAL_ISR( pxLock ); - } - } -#else /* configNUMBER_OF_CORES > 1 */ - /* Macros that Enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) -#endif /* configNUMBER_OF_CORES > 1 */ - /* Single core FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList() * calls are deterministic (as queue locks use scheduler suspension instead of * critical sections). However, the SMP implementation is non-deterministic @@ -3109,7 +3059,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue registry lock in case another * core updates the register simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { if( pcQueueName != NULL ) { @@ -3145,7 +3095,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously taken queue registry lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } #endif /* configQUEUE_REGISTRY_SIZE */ @@ -3162,7 +3112,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue registry lock in case another * core updates the register simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { /* Note there is nothing here to protect against another task adding or * removing entries from the registry while it is being searched. */ @@ -3181,7 +3131,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously taken queue registry lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); return pcReturn; } /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */ @@ -3199,7 +3149,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue registry lock in case another * core updates the register simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvENTER_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); { /* See if the handle of the queue being unregistered in actually in the * registry. */ @@ -3223,7 +3173,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously taken queue registry lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xQueueRegistryLock ); } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ #endif /* configQUEUE_REGISTRY_SIZE */ @@ -3247,7 +3197,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* For SMP, we need to take the queue's xQueueLock as we are about to * access the queue. */ - taskENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); + prvENTER_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); { #if ( queueUSE_LOCKS == 1 ) { @@ -3278,7 +3228,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) #endif /* queueUSE_LOCKS == 1 */ } /* Release the previously taken xQueueLock. */ - taskEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); + prvEXIT_CRITICAL_SMP_ONLY( &( pxQueue->xQueueLock ) ); } #endif /* configUSE_TIMERS */ @@ -3413,7 +3363,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* In SMP, queue sets have their own xQueueLock. Thus we need to also * acquire the queue set's xQueueLock before accessing it. */ - taskENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); + prvENTER_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); { if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) { @@ -3463,7 +3413,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } } /* Release the previously acquired queue set's xQueueLock. */ - taskEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); + prvEXIT_CRITICAL_SAFE_SMP_ONLY( &( pxQueueSetContainer->xQueueLock ) ); return xReturn; } diff --git a/components/freertos/FreeRTOS-Kernel/tasks.c b/components/freertos/FreeRTOS-Kernel/tasks.c index 8cda33056b..bdc0e26225 100644 --- a/components/freertos/FreeRTOS-Kernel/tasks.c +++ b/components/freertos/FreeRTOS-Kernel/tasks.c @@ -65,74 +65,6 @@ #include #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ -/* Some code sections require extra critical sections when building for SMP - * ( configNUMBER_OF_CORES > 1 ). */ -#if ( configNUMBER_OF_CORES > 1 ) - /* Macros that enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) - #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) - /* Macros that enter/exit a critical section only when building for single-core */ - #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - /* Macros that enable/disable interrupts only when building for SMP */ - #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR() - #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus ) - - static inline __attribute__( ( always_inline ) ) - void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskENTER_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskENTER_CRITICAL_ISR( pxLock ); - } - } - - static inline __attribute__( ( always_inline ) ) - void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) - { - if( portCHECK_IF_IN_ISR() == pdFALSE ) - { - taskEXIT_CRITICAL( pxLock ); - } - else - { - #ifdef __clang_analyzer__ - /* Teach clang-tidy that ISR version macro can be different */ - configASSERT( 1 ); - #endif - taskEXIT_CRITICAL_ISR( pxLock ); - } - } - -#else /* configNUMBER_OF_CORES > 1 */ - /* Macros that enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) - #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) - /* Macros that enter/exit a critical section only when building for single-core */ - #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - /* Macros that enable/disable interrupts only when building for SMP */ - #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 ) - #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus ) - -#endif /* configNUMBER_OF_CORES > 1 */ - #if ( configUSE_PREEMPTION == 0 ) /* If the cooperative scheduler is being used then a yield should not be @@ -1475,7 +1407,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Force a reschedule if it is the currently running task that has just * been deleted. */ @@ -1493,7 +1425,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* INCLUDE_vTaskDelete */ @@ -2448,7 +2380,7 @@ void vTaskStartScheduler( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) { @@ -2463,7 +2395,7 @@ void vTaskStartScheduler( void ) xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); /* If configGENERATE_RUN_TIME_STATS is defined then the following * macro must be defined to configure the timer/counter used to generate @@ -2513,12 +2445,12 @@ void vTaskEndScheduler( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { xSchedulerRunning = pdFALSE; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); vPortEndScheduler(); } /*----------------------------------------------------------*/ @@ -2768,7 +2700,7 @@ TickType_t xTaskGetTickCountFromISR( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); { uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { @@ -2777,7 +2709,7 @@ TickType_t xTaskGetTickCountFromISR( void ) portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_ISR_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -3176,7 +3108,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) * the event list too. Interrupts can touch the event list item, * even though the scheduler is suspended, so a critical section * is used. */ - taskENTER_CRITICAL_SC_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SC_ONLY( &xKernelLock ); { if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) { @@ -3192,7 +3124,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) mtCOVERAGE_TEST_MARKER(); } } - taskEXIT_CRITICAL_SC_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SC_ONLY( &xKernelLock ); /* Place the unblocked task into the appropriate ready list. */ prvAddTaskToReadyList( pxTCB ); @@ -3255,7 +3187,7 @@ BaseType_t xTaskIncrementTick( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures (unlike single core which calls this function with * interrupts disabled). */ - taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE ) { @@ -3425,7 +3357,7 @@ BaseType_t xTaskIncrementTick( void ) /* Release the previously taken kernel lock as we have finished accessing * the kernel data structures. */ - taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); #if ( configUSE_TICK_HOOK == 1 ) { @@ -3648,7 +3580,7 @@ void vTaskSwitchContext( void ) /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures (unlike single core which calls this function with * either interrupts disabled or when the scheduler hasn't started yet). */ - taskENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -3733,7 +3665,7 @@ void vTaskSwitchContext( void ) /* Release the previously taken kernel lock as we have finished accessing * the kernel data structures. */ - taskEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SAFE_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3748,7 +3680,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Place the event list item of the TCB in the appropriate event list. * This is placed in the list in priority order so the highest priority task @@ -3766,7 +3698,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3794,7 +3726,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -3811,7 +3743,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } /*-----------------------------------------------------------*/ @@ -3831,7 +3763,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* For SMP, we need to take the kernel lock here as we are about to access * kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Place the event list item of the TCB in the appropriate event list. * In this case it is assume that this is the only task that is going to @@ -4630,7 +4562,7 @@ static void prvCheckTasksWaitingTermination( void ) /* A critical section is required for SMP in case another core modifies * the task simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); @@ -4732,7 +4664,7 @@ static void prvCheckTasksWaitingTermination( void ) } } /* Exit the previously entered critical section. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_TRACE_FACILITY */ @@ -4954,11 +4886,11 @@ static void prvResetNextTaskUnblockTime( void ) * For single-core a critical section is not required as this is not * called from an interrupt and the current TCB will always be the same * for any individual execution thread. */ - uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); + uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY(); { xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; } - taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); + prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); return xReturn; } @@ -4982,7 +4914,7 @@ static void prvResetNextTaskUnblockTime( void ) * * We use the ISR versions of interrupt macros as this function could be * called inside critical sections. */ - uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); + uxSavedInterruptStatus = prvDISABLE_INTERRUPTS_ISR_SMP_ONLY(); { if( xSchedulerRunning == pdFALSE ) { @@ -5000,7 +4932,7 @@ static void prvResetNextTaskUnblockTime( void ) } } } - taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); + prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); return xReturn; } @@ -5017,7 +4949,7 @@ static void prvResetNextTaskUnblockTime( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5100,7 +5032,7 @@ static void prvResetNextTaskUnblockTime( void ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -5117,7 +5049,7 @@ static void prvResetNextTaskUnblockTime( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { if( pxMutexHolder != NULL ) { @@ -5187,7 +5119,7 @@ static void prvResetNextTaskUnblockTime( void ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } @@ -5206,7 +5138,7 @@ static void prvResetNextTaskUnblockTime( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { if( pxMutexHolder != NULL ) { @@ -5302,7 +5234,7 @@ static void prvResetNextTaskUnblockTime( void ) } } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } #endif /* configUSE_MUTEXES */ @@ -5637,7 +5569,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* For SMP, we need to take the kernel lock here to ensure nothing else * modifies the task's event item value simultaneously. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5648,7 +5580,7 @@ TickType_t uxTaskResetEventItemValue( void ) * queues and semaphores. */ listSET_LIST_ITEM_VALUE( &( pxCurrentTCBs[ xCurCoreID ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCBs[ xCurCoreID ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ } - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); /* Release the previously taken kernel lock. */ return uxReturn; @@ -5663,7 +5595,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { /* Get current core ID as we can no longer be preempted. */ const BaseType_t xCurCoreID = portGET_CORE_ID(); @@ -5678,7 +5610,7 @@ TickType_t uxTaskResetEventItemValue( void ) xReturn = pxCurrentTCBs[ xCurCoreID ]; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return xReturn; } diff --git a/components/freertos/esp_additions/freertos_tasks_c_additions.h b/components/freertos/esp_additions/freertos_tasks_c_additions.h index c7a9fe6ab8..dc8cde3522 100644 --- a/components/freertos/esp_additions/freertos_tasks_c_additions.h +++ b/components/freertos/esp_additions/freertos_tasks_c_additions.h @@ -507,12 +507,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) /* For SMP, we need to take the kernel lock here as we are about to * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { ulRunTimeCounter = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); return ulRunTimeCounter; } @@ -539,12 +539,12 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) { /* For SMP, we need to take the kernel lock here as we are about * to access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + prvENTER_CRITICAL_SMP_ONLY( &xKernelLock ); { ulReturn = xIdleTaskHandle[ xCoreID ]->ulRunTimeCounter / ulTotalTime; } /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + prvEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); } else { diff --git a/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h b/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h index 596b313c44..2d520f8d3f 100644 --- a/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h +++ b/components/freertos/esp_additions/include/esp_private/freertos_idf_additions_priv.h @@ -13,6 +13,7 @@ #include "sdkconfig.h" #include "freertos/FreeRTOS.h" +#include "freertos/task.h" /* *INDENT-OFF* */ #ifdef __cplusplus @@ -26,52 +27,117 @@ /* * The following macros are convenience macros used to account for different - * thread safety behavior between Vanilla FreeRTOS (i.e., single-core) and ESP-IDF - * FreeRTOS (i.e., multi-core SMP). + * thread safety behavior between single-core and SMP in ESP-IDF FreeRTOS. * * For thread saftey... * - * - Vanilla FreeRTOS will use the following for thread safety (depending on situation) + * - Single-core will use the following for thread safety (depending on situation) * - `vTaskSuspendAll()`/`xTaskResumeAll()` for non-deterministic operations * - Critical sections or disabling interrupts for deterministic operations - * - ESP-IDF FreeRTOS will always use critical sections (determinism is not supported) - * - * [refactor-todo]: Define these locally in each kernel source file (IDF-8161) + * - SMP will always use critical sections (determinism is not supported) */ #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) ) - #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) taskENTER_CRITICAL( ( x ) ) - #define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) ( { taskEXIT_CRITICAL( ( x ) ); pdFALSE; } ) - #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - taskENTER_CRITICAL_ISR( ( pxLock ) ); \ - ( void ) ( uxInterruptStatus ); \ +/* Macros that use a critical section when building for SMP */ + #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) taskENTER_CRITICAL( ( pxLock ) ) + #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) ( { taskEXIT_CRITICAL( ( pxLock ) ); pdFALSE; } ) + #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \ + { \ + taskENTER_CRITICAL_ISR( ( pxLock ) ); \ + ( void ) ( uxStatus ); \ } - #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - taskEXIT_CRITICAL_ISR( ( pxLock ) ); \ - ( void ) ( uxInterruptStatus ); \ + #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \ + { \ + taskEXIT_CRITICAL_ISR( ( pxLock ) ); \ + ( void ) ( uxStatus ); \ } +/* Macros that enter/exit a critical section only when building for SMP */ + #define prvENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define prvEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) + #define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) + #define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) + + static inline __attribute__( ( always_inline ) ) + void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskENTER_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskENTER_CRITICAL_ISR( pxLock ); + } + } + + static inline __attribute__( ( always_inline ) ) + void prvTaskExitCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + taskEXIT_CRITICAL( pxLock ); + } + else + { + #ifdef __clang_analyzer__ + /* Teach clang-tidy that ISR version macro can be different */ + configASSERT( 1 ); + #endif + taskEXIT_CRITICAL_ISR( pxLock ); + } + } + +/* Macros that enter/exit a critical section only when building for single-core */ + #define prvENTER_CRITICAL_SC_ONLY( pxLock ) + #define prvEXIT_CRITICAL_SC_ONLY( pxLock ) + +/* Macros that enable/disable interrupts only when building for SMP */ + #define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR() + #define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus ) + #elif ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) - #define prvENTER_CRITICAL_OR_SUSPEND_ALL( x ) ( { vTaskSuspendAll(); ( void ) ( x ); } ) - #define prvEXIT_CRITICAL_OR_RESUME_ALL( x ) xTaskResumeAll() - #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - ( uxInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ - ( void ) ( pxLock ); \ +/* Macros that suspend the scheduler or disables interrupts when building for single-core */ + #define prvENTER_CRITICAL_OR_SUSPEND_ALL( pxLock ) ( { vTaskSuspendAll(); ( void ) ( pxLock ); } ) + #define prvEXIT_CRITICAL_OR_RESUME_ALL( pxLock ) xTaskResumeAll() + #define prvENTER_CRITICAL_OR_MASK_ISR( pxLock, uxStatus ) \ + { \ + ( uxStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \ + ( void ) ( pxLock ); \ } - #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxInterruptStatus ) \ - { \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxInterruptStatus ) ); \ - ( void ) ( pxLock ); \ + #define prvEXIT_CRITICAL_OR_UNMASK_ISR( pxLock, uxStatus ) \ + { \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( ( uxStatus ) ); \ + ( void ) ( pxLock ); \ } +/* Macros that enter/exit a critical section only when building for SMP */ + #define prvENTER_CRITICAL_SMP_ONLY( pxLock ) + #define prvEXIT_CRITICAL_SMP_ONLY( pxLock ) + #define prvENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define prvEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) + #define prvENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) + #define prvEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) + +/* Macros that enter/exit a critical section only when building for single-core */ + #define prvENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define prvEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + +/* Macros that enable/disable interrupts only when building for SMP */ + #define prvDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 ) + #define prvENABLE_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus ) + #endif /* ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES == 1 ) ) */ /* - * In ESP-IDF FreeRTOS (i.e., multi-core SMP) uses spinlocks to protect different + * In ESP-IDF FreeRTOS under SMP builds, spinlocks are to protect different * groups of data. This function is a wrapper to take the "xKernelLock" spinlock * of tasks.c. * @@ -87,8 +153,6 @@ * vEventGroupDelete() as both those functions will directly access event lists * (which are kernel data structures). Thus, a wrapper function must be provided * to take/release the "xKernelLock" from outside tasks.c. - * - * [refactor-todo]: Extern this locally in event groups (IDF-8161) */ #if ( !CONFIG_FREERTOS_SMP && ( configNUM_CORES > 1 ) )