freertos: Add portTRY_ENTRY_CRITICAL() and deprecate legacy spinlock fucntions

Add TRY_ENTRY_CRITICAL() API to all for timeouts when entering critical sections.
The following port API were added:
- portTRY_ENTER_CRITICAL()
- portTRY_ENTER_CRITICAL_ISR()
- portTRY_ENTER_CRITICAL_SAFE()

Deprecated legacy spinlock API in favor of spinlock.h. The following API were deprecated:
- vPortCPUInitializeMutex()
- vPortCPUAcquireMutex()
- vPortCPUAcquireMutexTimeout()
- vPortCPUReleaseMutex()

Other Changes:
- Added portMUX_INITIALIZE() to replace vPortCPUInitializeMutex()
- The assembly of the critical section functions ends up being about 50 instructions longer,
  thus the spinlock test pass threshold had to be increased to account for the extra runtime.

Closes https://github.com/espressif/esp-idf/issues/5301
pull/7958/head
Darian Leung 2021-10-30 00:48:19 +08:00
rodzic 7207a3c4d5
commit 9b3796d2f1
19 zmienionych plików z 370 dodań i 299 usunięć

Wyświetl plik

@ -60,16 +60,10 @@ esp_err_t esp_apptrace_lock_take(esp_apptrace_lock_t *lock, esp_apptrace_tmo_t *
int res;
while (1) {
//Todo: Replace the current locking mechanism and int_state with portTRY_ENTER_CRITICAL() instead.
// do not overwrite lock->int_state before we actually acquired the mux
unsigned int_state = portSET_INTERRUPT_MASK_FROM_ISR();
// FIXME: if mux is busy it is not good idea to loop during the whole tmo with disabled IRQs.
// So we check mux state using zero tmo, restore IRQs and let others tasks/IRQs to run on this CPU
// while we are doing our own tmo check.
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0, __FUNCTION__, __LINE__);
#else
bool success = vPortCPUAcquireMutexTimeout(&lock->mux, 0);
#endif
if (success) {
lock->int_state = int_state;
return ESP_OK;
@ -90,11 +84,7 @@ esp_err_t esp_apptrace_lock_give(esp_apptrace_lock_t *lock)
unsigned int_state = lock->int_state;
// after call to the following func we can not be sure that lock->int_state
// is not overwritten by other CPU who has acquired the mux just after we released it. See esp_apptrace_lock_take().
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUReleaseMutex(&lock->mux, __FUNCTION__, __LINE__);
#else
vPortCPUReleaseMutex(&lock->mux);
#endif
portCLEAR_INTERRUPT_MASK_FROM_ISR(int_state);
return ESP_OK;
}

Wyświetl plik

@ -68,7 +68,7 @@ typedef struct {
*/
static inline void esp_apptrace_lock_init(esp_apptrace_lock_t *lock)
{
vPortCPUInitializeMutex(&lock->mux);
portMUX_INITIALIZE(&lock->mux);
lock->int_state = 0;
}

Wyświetl plik

@ -52,6 +52,15 @@ static inline void __attribute__((always_inline)) spinlock_initialize(spinlock_t
/**
* @brief Top level spinlock acquire function, spins until get the lock
*
* This function will:
* - Save current interrupt state, then disable interrupts
* - Spin until lock is acquired or until timeout occurs
* - Restore interrupt state
*
* @note Spinlocks alone do no constitute true critical sections (as this
* function reenables interrupts once the spinlock is acquired). For critical
* sections, use the interface provided by the operating system.
* @param lock - target spinlock object
* @param timeout - cycles to wait, passing SPINLOCK_WAIT_FOREVER blocs indefinitely
*/
@ -125,6 +134,15 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
/**
* @brief Top level spinlock unlock function, unlocks a previously locked spinlock
*
* This function will:
* - Save current interrupt state, then disable interrupts
* - Release the spinlock
* - Restore interrupt state
*
* @note Spinlocks alone do no constitute true critical sections (as this
* function reenables interrupts once the spinlock is acquired). For critical
* sections, use the interface provided by the operating system.
* @param lock - target, locked before, spinlock object
*/
static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *lock)

Wyświetl plik

@ -273,7 +273,7 @@ static void prvInitializeNewRingbuffer(size_t xBufferSize,
pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
}
xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxNewRingbuffer));
vPortCPUInitializeMutex(&pxNewRingbuffer->mux);
portMUX_INITIALIZE(&pxNewRingbuffer->mux);
}
static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)

Wyświetl plik

@ -143,7 +143,7 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
traceEVENT_GROUP_CREATE( pxEventBits );
#ifdef ESP_PLATFORM
vPortCPUInitializeMutex( &pxEventBits->eventGroupMux );
portMUX_INITIALIZE( &pxEventBits->eventGroupMux );
#endif // ESP_PLATFORM
}
else
@ -196,7 +196,7 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
#endif /* configSUPPORT_STATIC_ALLOCATION */
#ifdef ESP_PLATFORM
vPortCPUInitializeMutex( &pxEventBits->eventGroupMux );
portMUX_INITIALIZE( &pxEventBits->eventGroupMux );
#endif // ESP_PLATFORM
traceEVENT_GROUP_CREATE( pxEventBits );

Wyświetl plik

@ -162,69 +162,19 @@ BaseType_t xPortInterruptedFromISRContext(void);
typedef struct {
uint32_t owner;
uint32_t count;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
const char *lastLockedFn;
int lastLockedLine;
#endif
} portMUX_TYPE;
/**< Spinlock initializer */
#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG
#define portMUX_INITIALIZER_UNLOCKED { \
#define portMUX_INITIALIZER_UNLOCKED { \
.owner = portMUX_FREE_VAL, \
.count = 0, \
}
#else
#define portMUX_INITIALIZER_UNLOCKED { \
.owner = portMUX_FREE_VAL, \
.count = 0, \
.lastLockedFn = "(never locked)", \
.lastLockedLine = -1 \
}
#endif /* CONFIG_FREERTOS_PORTMUX_DEBUG */
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
/**
* @brief Initialize a spinlock
*
* - Initializes a spinlock that is used by FreeRTOS SMP critical sections
*
* @note [refactor-todo] We can make this inline or consider making it a macro
* @param[in] mux Spinlock
*/
void vPortCPUInitializeMutex(portMUX_TYPE *mux);
/**
* @brief Acquire a spinlock
*
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this should be inlined
* @param[in] mux Spinlock
*/
void vPortCPUAcquireMutex(portMUX_TYPE *mux);
/**
* @brief Acquire a spinlock but with a specified timeout
*
* @note [refactor-todo] Check if we still need this
* @note [refactor-todo] Check if this should be inlined
* @note [refactor-todo] Check if this function should be renamed (due to bool return type)
* @param[in] mux Spinlock
* @param[in] timeout Timeout in number of CPU cycles
* @return true Spinlock acquired
* @return false Timed out
*/
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles);
/**
* @brief Release a spinlock
*
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this should be inlined
* @param[in] mux Spinlock
*/
void vPortCPUReleaseMutex(portMUX_TYPE *mux);
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
#define portMUX_INITIALIZE(mux) ({ \
(mux)->owner = portMUX_FREE_VAL; \
(mux)->count = 0; \
})
/**
* @brief Wrapper for atomic compare-and-set instruction
@ -398,11 +348,19 @@ static inline BaseType_t IRAM_ATTR xPortGetCoreID(void)
// ------------------ Critical Sections --------------------
#define portENTER_CRITICAL(mux) {(void)mux; vPortEnterCritical();}
#define portEXIT_CRITICAL(mux) {(void)mux; vPortExitCritical();}
#define portENTER_CRITICAL(mux) {(void)mux; vPortEnterCritical();}
#define portEXIT_CRITICAL(mux) {(void)mux; vPortExitCritical();}
#define portTRY_ENTER_CRITICAL(mux, timeout) ({ \
(void)mux; (void)timeout; \
vPortEnterCritical(); \
BaseType_t ret = pdPASS; \
ret; \
})
//In single-core RISC-V, we can use the same critical section API
#define portENTER_CRITICAL_ISR(mux) portENTER_CRITICAL(mux)
#define portEXIT_CRITICAL_ISR(mux) portEXIT_CRITICAL(mux)
#define portENTER_CRITICAL_ISR(mux) portENTER_CRITICAL(mux)
#define portEXIT_CRITICAL_ISR(mux) portEXIT_CRITICAL(mux)
#define portTRY_ENTER_CRITICAL_ISR(mux, timeout) portTRY_ENTER_CRITICAL(mux, timeout)
/* [refactor-todo] on RISC-V, both ISR and non-ISR cases result in the same call. We can redefine this macro */
#define portENTER_CRITICAL_SAFE(mux) ({ \
if (xPortInIsrContext()) { \
@ -418,6 +376,7 @@ static inline BaseType_t IRAM_ATTR xPortGetCoreID(void)
portEXIT_CRITICAL(mux); \
} \
})
#define portTRY_ENTER_CRITICAL_SAFE(mux, timeout) portENTER_CRITICAL_SAFE(mux, timeout)
// ---------------------- Yielding -------------------------

Wyświetl plik

@ -32,3 +32,63 @@ static inline void __attribute__((deprecated)) portEXIT_CRITICAL_NESTED(UBaseTyp
{
portCLEAR_INTERRUPT_MASK_FROM_ISR(prev_level);
}
/* ---------------------- Spinlocks --------------------- */
/**
* @brief Deprecated placed holder function to initialize a spinlock
*
* Currently does nothing.
*
* @deprecated This function is deprecated. If on multi-core, use spinlock_initialize() instead
* @param[in] mux Spinlock
*/
static inline void __attribute__((deprecated)) __attribute__((always_inline)) vPortCPUInitializeMutex(portMUX_TYPE *mux)
{
(void)mux;
}
/**
* @brief Deprecated placed holder function to acquire a spinlock
*
* Currently does nothing.
*
* @deprecated This function is deprecated. If on multi-core, use spinlock_acquire() instead
* @param[in] mux Spinlock
*/
static inline void __attribute__((deprecated)) __attribute__((always_inline)) vPortCPUAcquireMutex(portMUX_TYPE *mux)
{
(void)mux;
}
/**
* @brief Deprecated placed holder function to acquire a spinlock but with a specified timeout
*
* Currently just returns true
*
* @deprecated This function is deprecated. If on multi-core, use spinlock_acquire() instead
* @note Does not have deprecated attribute due to usage in app_trace_util.c
* @param[in] mux Spinlock
* @param[in] timeout Timeout in number of CPU cycles
* @return true Always returns true
*/
static inline bool __attribute__((always_inline)) vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles)
{
(void)mux;
(void)timeout_cycles;
return true;
}
/**
* @brief Deprecated placed holder function to release a spinlock
*
* Currently does nothing.
*
* @deprecated This function is deprecated. If on multi-core, use spinlock_release() instead
* @note Does not have deprecated attribute due to usage in app_trace_util.c
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortCPUReleaseMutex(portMUX_TYPE *mux)
{
(void)mux;
}

Wyświetl plik

@ -260,27 +260,7 @@ BaseType_t IRAM_ATTR xPortInterruptedFromISRContext(void)
// ---------------------- Spinlocks ------------------------
void vPortCPUInitializeMutex(portMUX_TYPE *mux)
{
(void)mux; //TODO: IDF-2393
}
void vPortCPUAcquireMutex(portMUX_TYPE *mux)
{
(void)mux; //TODO: IDF-2393
}
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles)
{
(void)mux; //TODO: IDF-2393
(void)timeout_cycles;
return true;
}
void vPortCPUReleaseMutex(portMUX_TYPE *mux)
{
(void)mux; //TODO: IDF-2393
}
// ------------------ Critical Sections --------------------

Wyświetl plik

@ -179,128 +179,112 @@ static inline void vPortClearInterruptMaskFromISR(UBaseType_t prev_level);
* @note [refactor-todo] Check if these comments are still true
* ------------------------------------------------------ */
typedef spinlock_t portMUX_TYPE; /**< Spinlock type used by FreeRTOS critical sections */
#define portMUX_INITIALIZER_UNLOCKED SPINLOCK_INITIALIZER /**< Spinlock initializer */
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
/**
* @brief Initialize a spinlock
*
* - Initializes a spinlock that is used by FreeRTOS SMP critical sections
*
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortCPUInitializeMutex(portMUX_TYPE *mux);
/**
* @brief Acquire a spinlock
*
* @note [refactor-todo] check if we still need this
*
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortCPUAcquireMutex(portMUX_TYPE *mux);
/**
* @brief Acquire a spinlock but with a specified timeout
*
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this function should be renamed (due to bool return type)
*
* @param[in] mux Spinlock
* @param timeout
* @return true Spinlock acquired
* @return false Timed out
*/
static inline bool __attribute__((always_inline)) vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout);
/**
* @brief Release a spinlock
*
* @note [refactor-todo] check if we still need this
*
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortCPUReleaseMutex(portMUX_TYPE *mux);
/**
* @brief Wrapper for atomic compare-and-set instruction
*
* This subroutine will atomically compare *addr to 'compare'. If *addr == compare, *addr is set to *set. *set is
* updated with the previous value of *addr (either 'compare' or some other value.)
*
* @warning From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the "bitwise inverse" of
* the old mem if the mem wasn't written. This doesn't seem to happen on the ESP32 (portMUX assertions would
* fail).
*
* @note [refactor-todo] check if we still need this
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
/**
* @brief Wrapper for atomic compare-and-set instruction in external RAM
*
* Atomic compare-and-set but the target address is placed in external RAM
*
* @note [refactor-todo] check if we still need this
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
typedef spinlock_t portMUX_TYPE; /**< Spinlock type used by FreeRTOS critical sections */
#define portMUX_INITIALIZER_UNLOCKED SPINLOCK_INITIALIZER /**< Spinlock initializer */
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
#define portMUX_INITIALIZE(mux) spinlock_initialize(mux) /*< Initialize a spinlock to its unlocked state */
// ------------------ Critical Sections --------------------
/**
* @brief Enter a SMP critical section with a timeout
*
* This function enters an SMP critical section by disabling interrupts then
* taking a spinlock with a specified timeout.
*
* This function can be called in a nested manner.
*
* @note This function is made non-inline on purpose to reduce code size
* @param mux Spinlock
* @param timeout Timeout to wait for spinlock in number of CPU cycles.
* Use portMUX_NO_TIMEOUT to wait indefinitely
* Use portMUX_TRY_LOCK to only getting the spinlock a single time
* @retval pdPASS Critical section entered (spinlock taken)
* @retval pdFAIL If timed out waiting for spinlock (will not occur if using portMUX_NO_TIMEOUT)
*/
BaseType_t xPortEnterCriticalTimeout(portMUX_TYPE *mux, BaseType_t timeout);
/**
* @brief Enter a SMP critical section
*
* - Disable interrupts
* - Takes spinlock
* - Can be nested
* This function enters an SMP critical section by disabling interrupts then
* taking a spinlock with an unlimited timeout.
*
* This function can be called in a nested manner
*
* @param[in] mux Spinlock
*/
void vPortEnterCritical(portMUX_TYPE *mux);
static inline void __attribute__((always_inline)) vPortEnterCritical(portMUX_TYPE *mux);
/**
* @brief Exit a SMP critical section
*
* - Releases spinlock
* - Reenables interrupts
* - Can be nested
* This function can be called in a nested manner. On the outer most level of nesting, this function will:
*
* - Release the spinlock
* - Restore the previous interrupt level before the critical section was entered
*
* If still nesting, this function simply decrements a critical nesting count
*
* @note This function is made non-inline on purpose to reduce code size
* @param[in] mux Spinlock
*/
void vPortExitCritical(portMUX_TYPE *mux);
/**
* @brief FreeRTOS compliant version of enter critical
* @brief FreeRTOS Compliant version of xPortEnterCriticalTimeout()
*
* - Ensures that critical section is only entered from task context
* Compliant version of xPortEnterCriticalTimeout() will ensure that this is
* called from a task context only. An abort is called otherwise.
*
* @note This function is made non-inline on purpose to reduce code size
*
* @param mux Spinlock
* @param timeout Timeout
* @return BaseType_t
*/
BaseType_t xPortEnterCriticalTimeoutCompliance(portMUX_TYPE *mux, BaseType_t timeout);
/**
* @brief FreeRTOS compliant version of vPortEnterCritical()
*
* Compliant version of vPortEnterCritical() will ensure that this is
* called from a task context only. An abort is called otherwise.
*
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortEnterCriticalCompliance(portMUX_TYPE *mux);
/**
* @brief FreeRTOS compliant version of exit critical
* @brief FreeRTOS compliant version of vPortExitCritical()
*
* Compliant version of vPortExitCritical() will ensure that this is
* called from a task context only. An abort is called otherwise.
*
* @note This function is made non-inline on purpose to reduce code size
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortExitCriticalCompliance(portMUX_TYPE *mux);
void vPortExitCriticalCompliance(portMUX_TYPE *mux);
/**
* @brief Safe version of enter critical timeout
*
* Safe version of enter critical will automatically select between
* portTRY_ENTER_CRITICAL() and portTRY_ENTER_CRITICAL_ISR()
*
* @param mux Spinlock
* @param timeout Timeout
* @return BaseType_t
*/
static inline BaseType_t __attribute__((always_inline)) xPortEnterCriticalTimeoutSafe(portMUX_TYPE *mux, BaseType_t timeout);
/**
* @brief Safe version of enter critical
*
* - This function can be used to enter a critical section from both task and ISR contexts
* Safe version of enter critical will automatically select between
* portENTER_CRITICAL() and portENTER_CRITICAL_ISR()
*
* @param[in] mux Spinlock
*/
@ -309,6 +293,9 @@ static inline void __attribute__((always_inline)) vPortEnterCriticalSafe(portMUX
/**
* @brief Safe version of exit critical
*
* Safe version of enter critical will automatically select between
* portEXIT_CRITICAL() and portEXIT_CRITICAL_ISR()
*
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortExitCriticalSafe(portMUX_TYPE *mux);
@ -397,6 +384,38 @@ void vPortSetStackWatchpoint( void *pxStackStart );
*/
static inline BaseType_t IRAM_ATTR xPortGetCoreID(void);
/**
* @brief Wrapper for atomic compare-and-set instruction
*
* This subroutine will atomically compare *addr to 'compare'. If *addr == compare, *addr is set to *set. *set is
* updated with the previous value of *addr (either 'compare' or some other value.)
*
* @warning From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the "bitwise inverse" of
* the old mem if the mem wasn't written. This doesn't seem to happen on the ESP32 (portMUX assertions would
* fail).
*
* @note [refactor-todo] Check if this can be deprecated
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
/**
* @brief Wrapper for atomic compare-and-set instruction in external RAM
*
* Atomic compare-and-set but the target address is placed in external RAM
*
* @note [refactor-todo] Check if this can be deprecated
*
* @param[inout] addr Pointer to target address
* @param[in] compare Compare value
* @param[inout] set Pointer to set value
*/
static inline void __attribute__((always_inline)) uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
/* ------------------------------------------- FreeRTOS Porting Interface ----------------------------------------------
@ -449,16 +468,22 @@ static inline BaseType_t IRAM_ATTR xPortGetCoreID(void);
* - Safe versions can be called from either contexts
*/
#ifdef CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE
#define portENTER_CRITICAL(mux) vPortEnterCriticalCompliance(mux)
#define portEXIT_CRITICAL(mux) vPortExitCriticalCompliance(mux)
#define portTRY_ENTER_CRITICAL(mux, timeout) xPortEnterCriticalTimeoutCompliance(mux, timeout)
#define portENTER_CRITICAL(mux) vPortEnterCriticalCompliance(mux)
#define portEXIT_CRITICAL(mux) vPortExitCriticalCompliance(mux)
#else
#define portENTER_CRITICAL(mux) vPortEnterCritical(mux)
#define portEXIT_CRITICAL(mux) vPortExitCritical(mux)
#define portTRY_ENTER_CRITICAL(mux, timeout) xPortEnterCriticalTimeout(mux, timeout)
#define portENTER_CRITICAL(mux) vPortEnterCritical(mux)
#define portEXIT_CRITICAL(mux) vPortExitCritical(mux)
#endif /* CONFIG_FREERTOS_CHECK_PORT_CRITICAL_COMPLIANCE */
#define portENTER_CRITICAL_ISR(mux) vPortEnterCritical(mux)
#define portEXIT_CRITICAL_ISR(mux) vPortExitCritical(mux)
#define portENTER_CRITICAL_SAFE(mux) vPortEnterCriticalSafe(mux)
#define portEXIT_CRITICAL_SAFE(mux) vPortExitCriticalSafe(mux)
#define portTRY_ENTER_CRITICAL_ISR(mux, timeout) xPortEnterCriticalTimeout(mux, timeout)
#define portENTER_CRITICAL_ISR(mux) vPortEnterCritical(mux)
#define portEXIT_CRITICAL_ISR(mux) vPortExitCritical(mux)
#define portTRY_ENTER_CRITICAL_SAFE(mux, timeout) xPortEnterCriticalTimeoutSafe(mux)
#define portENTER_CRITICAL_SAFE(mux) vPortEnterCriticalSafe(mux)
#define portEXIT_CRITICAL_SAFE(mux) vPortExitCriticalSafe(mux)
// ---------------------- Yielding -------------------------
@ -546,71 +571,32 @@ static inline void vPortClearInterruptMaskFromISR(UBaseType_t prev_level)
XTOS_RESTORE_JUST_INTLEVEL(prev_level);
}
// ---------------------- Spinlocks ------------------------
static inline void __attribute__((always_inline)) vPortCPUInitializeMutex(portMUX_TYPE *mux)
{
spinlock_initialize(mux);
}
static inline void __attribute__((always_inline)) vPortCPUAcquireMutex(portMUX_TYPE *mux)
{
spinlock_acquire(mux, portMUX_NO_TIMEOUT);
}
static inline bool __attribute__((always_inline)) vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout)
{
return (spinlock_acquire(mux, timeout));
}
static inline void __attribute__((always_inline)) vPortCPUReleaseMutex(portMUX_TYPE *mux)
{
spinlock_release(mux);
}
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#ifdef CONFIG_SPIRAM
compare_and_set_extram(addr, compare, set);
#endif
}
// ------------------ Critical Sections --------------------
static inline void __attribute__((always_inline)) vPortEnterCritical(portMUX_TYPE *mux)
{
xPortEnterCriticalTimeout(mux, portMUX_NO_TIMEOUT);
}
static inline void __attribute__((always_inline)) vPortEnterCriticalCompliance(portMUX_TYPE *mux)
{
if (!xPortInIsrContext()) {
vPortEnterCritical(mux);
} else {
esp_rom_printf("%s:%d (%s)- port*_CRITICAL called from ISR context!\n",
__FILE__, __LINE__, __FUNCTION__);
abort();
}
xPortEnterCriticalTimeoutCompliance(mux, portMUX_NO_TIMEOUT);
}
static inline void __attribute__((always_inline)) vPortExitCriticalCompliance(portMUX_TYPE *mux)
static inline BaseType_t __attribute__((always_inline)) xPortEnterCriticalTimeoutSafe(portMUX_TYPE *mux, BaseType_t timeout)
{
if (!xPortInIsrContext()) {
vPortExitCritical(mux);
BaseType_t ret;
if (xPortInIsrContext()) {
ret = portTRY_ENTER_CRITICAL_ISR(mux, timeout);
} else {
esp_rom_printf("%s:%d (%s)- port*_CRITICAL called from ISR context!\n",
__FILE__, __LINE__, __FUNCTION__);
abort();
ret = portTRY_ENTER_CRITICAL(mux, timeout);
}
return ret;
}
static inline void __attribute__((always_inline)) vPortEnterCriticalSafe(portMUX_TYPE *mux)
{
if (xPortInIsrContext()) {
portENTER_CRITICAL_ISR(mux);
} else {
portENTER_CRITICAL(mux);
}
xPortEnterCriticalTimeoutSafe(mux, portMUX_NO_TIMEOUT);
}
static inline void __attribute__((always_inline)) vPortExitCriticalSafe(portMUX_TYPE *mux)
@ -648,6 +634,18 @@ static inline BaseType_t IRAM_ATTR xPortGetCoreID(void)
return (uint32_t) cpu_hal_get_core_id();
}
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
compare_and_set_native(addr, compare, set);
}
static inline void __attribute__((always_inline)) uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
{
#ifdef CONFIG_SPIRAM
compare_and_set_extram(addr, compare, set);
#endif
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------

Wyświetl plik

@ -32,3 +32,62 @@ static inline void __attribute__((deprecated)) portEXIT_CRITICAL_NESTED(UBaseTyp
{
portCLEAR_INTERRUPT_MASK_FROM_ISR(prev_level);
}
/* ---------------------- Spinlocks --------------------- */
/**
* @brief Initialize a spinlock
*
* Does the exact same thing as spinlock_initialize();
*
* @deprecated This function is deprecated. Call spinlock_initialize() instead
* @param[in] mux Spinlock
*/
static inline void __attribute__((deprecated)) __attribute__((always_inline)) vPortCPUInitializeMutex(portMUX_TYPE *mux)
{
spinlock_initialize(mux);
}
/**
* @brief Acquire a spinlock
*
* Does the exact same thing as spinlock_acquire() with unlimited timeout
*
* @deprecated This function is deprecated. Call spinlock_acquire() instead
* @param[in] mux Spinlock
*/
static inline void __attribute__((deprecated)) __attribute__((always_inline)) vPortCPUAcquireMutex(portMUX_TYPE *mux)
{
spinlock_acquire(mux, portMUX_NO_TIMEOUT);
}
/**
* @brief Acquire a spinlock
*
* Does the exact same thing as spinlock_acquire() with a specified timeout
*
* @deprecated This function is deprecated. Call spinlock_acquire() instead
* @note Does not have deprecated attribute due to usage in app_trace_util.c
* @param[in] mux Spinlock
* @param timeout
* @return true Spinlock acquired
* @return false Timed out
*/
static inline bool __attribute__((always_inline)) vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout)
{
return (spinlock_acquire(mux, timeout));
}
/**
* @brief Release a spinlock
*
* Does the exact same thing as spinlock_release()
*
* @deprecated This function is deprecated. Call spinlock_release() instead
* @note Does not have deprecated attribute due to usage in app_trace_util.c
* @param[in] mux Spinlock
*/
static inline void __attribute__((always_inline)) vPortCPUReleaseMutex(portMUX_TYPE *mux)
{
spinlock_release(mux);
}

Wyświetl plik

@ -273,42 +273,76 @@ BaseType_t IRAM_ATTR xPortInterruptedFromISRContext(void)
// ------------------ Critical Sections --------------------
void __attribute__((optimize("-O3"))) vPortEnterCritical(portMUX_TYPE *mux)
BaseType_t __attribute__((optimize("-O3"))) xPortEnterCriticalTimeout(portMUX_TYPE *mux, BaseType_t timeout)
{
BaseType_t oldInterruptLevel = portSET_INTERRUPT_MASK_FROM_ISR();
/* Interrupts may already be disabled (because we're doing this recursively)
* but we can't get the interrupt level after
* vPortCPUAquireMutex, because it also may mess with interrupts.
* Get it here first, then later figure out if we're nesting
* and save for real there.
*/
vPortCPUAcquireMutex( mux );
/* Interrupts may already be disabled (if this function is called in nested
* manner). However, there's no atomic operation that will allow us to check,
* thus we have to disable interrupts again anyways.
*
* However, if this is call is NOT nested (i.e., the first call to enter a
* critical section), we will save the previous interrupt level so that the
* saved level can be restored on the last call to exit the critical.
*/
BaseType_t xOldInterruptLevel = portSET_INTERRUPT_MASK_FROM_ISR();
if (!spinlock_acquire(mux, timeout)) {
//Timed out attempting to get spinlock. Restore previous interrupt level and return
portCLEAR_INTERRUPT_MASK_FROM_ISR(xOldInterruptLevel);
return pdFAIL;
}
//Spinlock acquired. Increment the critical nesting count.
BaseType_t coreID = xPortGetCoreID();
BaseType_t newNesting = port_uxCriticalNesting[coreID] + 1;
port_uxCriticalNesting[coreID] = newNesting;
//If this is the first entry to a critical section. Save the old interrupt level.
if ( newNesting == 1 ) {
//This is the first time we get called. Save original interrupt level.
port_uxOldInterruptState[coreID] = oldInterruptLevel;
port_uxOldInterruptState[coreID] = xOldInterruptLevel;
}
return pdPASS;
}
void __attribute__((optimize("-O3"))) vPortExitCritical(portMUX_TYPE *mux)
{
vPortCPUReleaseMutex( mux );
/* This function may be called in a nested manner. Therefore, we only need
* to reenable interrupts if this is the last call to exit the critical. We
* can use the nesting count to determine whether this is the last exit call.
*/
spinlock_release(mux);
BaseType_t coreID = xPortGetCoreID();
BaseType_t nesting = port_uxCriticalNesting[coreID];
if (nesting > 0) {
nesting--;
port_uxCriticalNesting[coreID] = nesting;
//This is the last exit call, restore the saved interrupt level
if ( nesting == 0 ) {
portCLEAR_INTERRUPT_MASK_FROM_ISR(port_uxOldInterruptState[coreID]);
}
}
}
BaseType_t xPortEnterCriticalTimeoutCompliance(portMUX_TYPE *mux, BaseType_t timeout)
{
BaseType_t ret;
if (!xPortInIsrContext()) {
ret = xPortEnterCriticalTimeout(mux, timeout);
} else {
esp_rom_printf("port*_CRITICAL called from ISR context. Aborting!\n");
abort();
ret = pdFAIL;
}
return ret;
}
void vPortExitCriticalCompliance(portMUX_TYPE *mux)
{
if (!xPortInIsrContext()) {
vPortExitCritical(mux);
} else {
esp_rom_printf("port*_CRITICAL called from ISR context. Aborting!\n");
abort();
}
}
// ---------------------- Yielding -------------------------
void vPortYieldOtherCore( BaseType_t coreid )

Wyświetl plik

@ -289,7 +289,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
#ifdef ESP_PLATFORM
if( xNewQueue == pdTRUE )
{
vPortCPUInitializeMutex(&pxQueue->mux);
portMUX_INITIALIZE(&pxQueue->mux);
}
#endif // ESP_PLATFORM
@ -538,7 +538,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
/* In case this is a recursive mutex. */
pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
#ifdef ESP_PLATFORM
vPortCPUInitializeMutex(&pxNewQueue->mux);
portMUX_INITIALIZE(&pxNewQueue->mux);
#endif // ESP_PLATFORM
traceCREATE_MUTEX( pxNewQueue );

Wyświetl plik

@ -1351,7 +1351,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;
pxStreamBuffer->ucFlags = ucFlags;
#ifdef ESP_PLATFORM
vPortCPUInitializeMutex( &pxStreamBuffer->xStreamBufferMux );
portMUX_INITIALIZE( &pxStreamBuffer->xStreamBufferMux );
#endif // ESP_PLATFORM
}

Wyświetl plik

@ -991,10 +991,6 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
/* Check that the list from which active timers are referenced, and the
* queue used to communicate with the timer service, have been
* initialised. */
#ifdef ESP_PLATFORM
if( xTimerQueue == NULL ) vPortCPUInitializeMutex( &xTimerMux );
#endif // ESP_PLATFORM
taskENTER_CRITICAL();
{
if( xTimerQueue == NULL )

Wyświetl plik

@ -90,7 +90,7 @@ static void task_shared_value_increment(void *ignore)
TEST_CASE("portMUX cross-core locking", "[freertos]")
{
done_sem = xSemaphoreCreateCounting(2, 0);
vPortCPUInitializeMutex(&shared_mux);
portMUX_INITIALIZE(&shared_mux);
shared_value = 0;
BENCHMARK_START();
@ -114,7 +114,7 @@ TEST_CASE("portMUX high contention", "[freertos]")
{
const int TOTAL_TASKS = 8; /* half on each core */
done_sem = xSemaphoreCreateCounting(TOTAL_TASKS, 0);
vPortCPUInitializeMutex(&shared_mux);
portMUX_INITIALIZE(&shared_mux);
shared_value = 0;
BENCHMARK_START();

Wyświetl plik

@ -1,16 +1,8 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#ifdef MULTI_HEAP_FREERTOS
@ -44,7 +36,7 @@ typedef portMUX_TYPE multi_heap_lock_t;
} while(0)
#define MULTI_HEAP_LOCK_INIT(PLOCK) do { \
vPortCPUInitializeMutex((PLOCK)); \
portMUX_INITIALIZE((PLOCK)); \
} while(0)
#define MULTI_HEAP_LOCK_STATIC_INITIALIZER portMUX_INITIALIZER_UNLOCKED

Wyświetl plik

@ -8,7 +8,7 @@
*/
#ifndef IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP
#define IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP 200
#define IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP 250
#endif
#ifndef IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP_PSRAM
#define IDF_PERFORMANCE_MAX_FREERTOS_SPINLOCK_CYCLES_PER_OP_PSRAM 300

Wyświetl plik

@ -121,10 +121,6 @@ typedef struct {
* If mux is locked, count is non-zero & represents the number of recursive locks on the mux.
*/
uint32_t count;
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
const char *lastLockedFn;
int lastLockedLine;
#endif
} portMUX_TYPE;
#define portMUX_FREE_VAL SPINLOCK_FREE
@ -134,13 +130,8 @@ typedef struct {
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /* Try to acquire the spinlock a single time only */
// Keep this in sync with the portMUX_TYPE struct definition please.
#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG
#define portMUX_INITIALIZER_UNLOCKED \
{ .owner = portMUX_FREE_VAL, .count = 0, }
#else
#define portMUX_INITIALIZER_UNLOCKED \
{ .owner = portMUX_FREE_VAL, .count = 0, .lastLockedFn = "(never locked)", .lastLockedLine = -1 }
#endif
/* Scheduler utilities. */
extern void vPortYield(void);
@ -162,11 +153,6 @@ extern void vPortYieldFromISR(void);
extern int vPortSetInterruptMask(void);
extern void vPortClearInterruptMask(int);
void vPortCPUInitializeMutex(portMUX_TYPE *mux);
void vPortCPUAcquireMutex(portMUX_TYPE *mux);
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles);
void vPortCPUReleaseMutex(portMUX_TYPE *mux);
extern void vPortEnterCritical(void);
extern void vPortExitCritical(void);

Wyświetl plik

@ -1398,7 +1398,6 @@ components/heap/include/multi_heap.h
components/heap/include/soc/soc_memory_layout.h
components/heap/multi_heap_config.h
components/heap/multi_heap_internal.h
components/heap/multi_heap_platform.h
components/heap/multi_heap_poisoning.c
components/heap/test/test_aligned_alloc_caps.c
components/heap/test/test_allocator_timings.c