2021-11-06 09:25:49 +00:00
|
|
|
/*
|
2022-01-18 02:32:56 +00:00
|
|
|
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
|
2021-11-06 09:25:49 +00:00
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2016-10-25 14:12:07 +00:00
|
|
|
|
|
|
|
#include <sys/lock.h>
|
2016-08-17 15:08:22 +00:00
|
|
|
#include <stdlib.h>
|
2016-10-25 14:12:07 +00:00
|
|
|
#include <sys/reent.h>
|
2016-08-23 09:09:44 +00:00
|
|
|
#include "esp_attr.h"
|
2016-08-17 15:08:22 +00:00
|
|
|
#include "freertos/FreeRTOS.h"
|
2016-08-23 09:09:44 +00:00
|
|
|
#include "freertos/semphr.h"
|
2016-08-17 15:08:22 +00:00
|
|
|
#include "freertos/task.h"
|
2017-02-27 08:34:19 +00:00
|
|
|
#include "freertos/portable.h"
|
2021-05-18 04:54:38 +00:00
|
|
|
#include "esp_rom_caps.h"
|
2016-08-17 15:08:22 +00:00
|
|
|
|
2016-08-23 09:09:44 +00:00
|
|
|
/* Notes on our newlib lock implementation:
|
|
|
|
*
|
2016-08-24 12:49:06 +00:00
|
|
|
* - Use FreeRTOS mutex semaphores as locks.
|
2022-02-08 09:39:38 +00:00
|
|
|
* - lock_t is int, but we store an SemaphoreHandle_t there.
|
2016-08-23 09:09:44 +00:00
|
|
|
* - Locks are no-ops until the FreeRTOS scheduler is running.
|
2016-08-24 12:49:06 +00:00
|
|
|
* - Due to this, locks need to be lazily initialised the first time
|
|
|
|
* they are acquired. Initialisation/deinitialisation of locks is
|
|
|
|
* protected by lock_init_spinlock.
|
|
|
|
* - Race conditions around lazy initialisation (via lock_acquire) are
|
|
|
|
* protected against.
|
|
|
|
* - Anyone calling lock_close is reponsible for ensuring noone else
|
|
|
|
* is holding the lock at this time.
|
|
|
|
* - Race conditions between lock_close & lock_init (for the same lock)
|
|
|
|
* are the responsibility of the caller.
|
2016-08-23 09:09:44 +00:00
|
|
|
*/
|
|
|
|
|
2016-08-24 12:49:06 +00:00
|
|
|
static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
|
2016-08-17 15:08:22 +00:00
|
|
|
|
2017-06-29 11:30:28 +00:00
|
|
|
/* Initialize the given lock by allocating a new mutex semaphore
|
2016-08-24 12:49:06 +00:00
|
|
|
as the _lock_t value.
|
2017-06-29 11:30:28 +00:00
|
|
|
|
|
|
|
Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have
|
|
|
|
been initialised (to zero only) before the RTOS scheduler started.
|
2016-08-23 09:09:44 +00:00
|
|
|
*/
|
|
|
|
static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) {
|
2016-08-24 12:49:06 +00:00
|
|
|
portENTER_CRITICAL(&lock_init_spinlock);
|
|
|
|
if (*lock) {
|
2016-08-23 09:09:44 +00:00
|
|
|
/* Lock already initialised (either we didn't check earlier,
|
|
|
|
or it got initialised while we were waiting for the
|
|
|
|
spinlock.) */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-08-24 12:49:06 +00:00
|
|
|
/* Create a new semaphore
|
2016-08-23 09:09:44 +00:00
|
|
|
|
|
|
|
this is a bit of an API violation, as we're calling the
|
|
|
|
private function xQueueCreateMutex(x) directly instead of
|
|
|
|
the xSemaphoreCreateMutex / xSemaphoreCreateRecursiveMutex
|
|
|
|
wrapper functions...
|
|
|
|
|
|
|
|
The better alternative would be to pass pointers to one of
|
|
|
|
the two xSemaphoreCreate___Mutex functions, but as FreeRTOS
|
|
|
|
implements these as macros instead of inline functions
|
|
|
|
(*party like it's 1998!*) it's not possible to do this
|
|
|
|
without writing wrappers. Doing it this way seems much less
|
|
|
|
spaghetti-like.
|
|
|
|
*/
|
2022-02-08 09:39:38 +00:00
|
|
|
SemaphoreHandle_t new_sem = xQueueCreateMutex(mutex_type);
|
2016-08-23 09:09:44 +00:00
|
|
|
if (!new_sem) {
|
|
|
|
abort(); /* No more semaphores available or OOM */
|
|
|
|
}
|
2016-08-24 12:49:06 +00:00
|
|
|
*lock = (_lock_t)new_sem;
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
2016-08-24 12:49:06 +00:00
|
|
|
portEXIT_CRITICAL(&lock_init_spinlock);
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 09:09:44 +00:00
|
|
|
void IRAM_ATTR _lock_init(_lock_t *lock) {
|
2017-06-29 11:30:28 +00:00
|
|
|
*lock = 0; // In case lock's memory is uninitialized
|
2016-08-23 09:09:44 +00:00
|
|
|
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 09:09:44 +00:00
|
|
|
void IRAM_ATTR _lock_init_recursive(_lock_t *lock) {
|
2017-06-29 11:30:28 +00:00
|
|
|
*lock = 0; // In case lock's memory is uninitialized
|
2016-08-23 09:09:44 +00:00
|
|
|
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2016-08-24 12:49:06 +00:00
|
|
|
/* Free the mutex semaphore pointed to by *lock, and zero it out.
|
2016-08-23 09:09:44 +00:00
|
|
|
|
|
|
|
Note that FreeRTOS doesn't account for deleting mutexes while they
|
|
|
|
are held, and neither do we... so take care not to delete newlib
|
|
|
|
locks while they may be held by other tasks!
|
2017-06-29 11:30:28 +00:00
|
|
|
|
|
|
|
Also, deleting a lock in this way will cause it to be lazily
|
|
|
|
re-initialised if it is used again. Caller has to avoid doing
|
|
|
|
this!
|
2016-08-23 09:09:44 +00:00
|
|
|
*/
|
|
|
|
void IRAM_ATTR _lock_close(_lock_t *lock) {
|
2016-08-24 12:49:06 +00:00
|
|
|
portENTER_CRITICAL(&lock_init_spinlock);
|
|
|
|
if (*lock) {
|
2022-02-08 09:39:38 +00:00
|
|
|
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
|
2016-08-23 09:09:44 +00:00
|
|
|
#if (INCLUDE_xSemaphoreGetMutexHolder == 1)
|
2016-08-24 12:49:06 +00:00
|
|
|
configASSERT(xSemaphoreGetMutexHolder(h) == NULL); /* mutex should not be held */
|
2016-08-23 09:09:44 +00:00
|
|
|
#endif
|
2016-08-24 12:49:06 +00:00
|
|
|
vSemaphoreDelete(h);
|
2016-08-25 06:27:36 +00:00
|
|
|
*lock = 0;
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
2016-08-24 12:49:06 +00:00
|
|
|
portEXIT_CRITICAL(&lock_init_spinlock);
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2018-12-07 04:14:58 +00:00
|
|
|
void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
|
2018-09-26 12:46:15 +00:00
|
|
|
|
2016-08-24 12:49:06 +00:00
|
|
|
/* Acquire the mutex semaphore for lock. wait up to delay ticks.
|
2016-08-23 09:09:44 +00:00
|
|
|
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
|
|
|
|
*/
|
|
|
|
static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) {
|
2022-02-08 09:39:38 +00:00
|
|
|
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
|
2016-08-23 09:09:44 +00:00
|
|
|
if (!h) {
|
2016-08-24 12:49:06 +00:00
|
|
|
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
|
2016-08-23 09:09:44 +00:00
|
|
|
return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
|
|
|
|
}
|
2020-11-06 04:00:07 +00:00
|
|
|
/* lazy initialise lock - might have had a static initializer (that we don't use) */
|
2016-08-23 09:09:44 +00:00
|
|
|
lock_init_generic(lock, mutex_type);
|
2022-02-08 09:39:38 +00:00
|
|
|
h = (SemaphoreHandle_t)(*lock);
|
2016-08-25 06:27:36 +00:00
|
|
|
configASSERT(h != NULL);
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
|
|
|
|
2020-11-06 04:00:07 +00:00
|
|
|
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
|
|
|
|
return 0; /* locking is a no-op before scheduler is up, so this "succeeds" */
|
|
|
|
}
|
2016-08-23 09:09:44 +00:00
|
|
|
BaseType_t success;
|
2019-10-28 10:43:35 +00:00
|
|
|
if (!xPortCanYield()) {
|
2016-08-23 09:09:44 +00:00
|
|
|
/* In ISR Context */
|
|
|
|
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
|
|
|
|
abort(); /* recursive mutexes make no sense in ISR context */
|
|
|
|
}
|
|
|
|
BaseType_t higher_task_woken = false;
|
2016-08-24 12:49:06 +00:00
|
|
|
success = xSemaphoreTakeFromISR(h, &higher_task_woken);
|
2016-08-23 09:09:44 +00:00
|
|
|
if (!success && delay > 0) {
|
|
|
|
abort(); /* Tried to block on mutex from ISR, couldn't... rewrite your program to avoid libc interactions in ISRs! */
|
|
|
|
}
|
2016-08-24 12:49:06 +00:00
|
|
|
if (higher_task_woken) {
|
|
|
|
portYIELD_FROM_ISR();
|
|
|
|
}
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* In task context */
|
|
|
|
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
|
2016-08-24 12:49:06 +00:00
|
|
|
success = xSemaphoreTakeRecursive(h, delay);
|
2016-08-23 09:09:44 +00:00
|
|
|
} else {
|
2016-08-24 12:49:06 +00:00
|
|
|
success = xSemaphoreTake(h, delay);
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (success == pdTRUE) ? 0 : -1;
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 09:09:44 +00:00
|
|
|
void IRAM_ATTR _lock_acquire(_lock_t *lock) {
|
|
|
|
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) {
|
|
|
|
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
int IRAM_ATTR _lock_try_acquire(_lock_t *lock) {
|
|
|
|
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) {
|
|
|
|
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
|
|
|
|
}
|
|
|
|
|
2016-08-24 12:49:06 +00:00
|
|
|
/* Release the mutex semaphore for lock.
|
2016-08-23 09:09:44 +00:00
|
|
|
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
|
|
|
|
*/
|
|
|
|
static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) {
|
2020-11-06 04:00:07 +00:00
|
|
|
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
|
|
|
|
return; /* locking is a no-op before scheduler is up */
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
2022-02-08 09:39:38 +00:00
|
|
|
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
|
2020-11-06 04:00:07 +00:00
|
|
|
assert(h);
|
2016-08-23 09:09:44 +00:00
|
|
|
|
2019-10-28 10:43:35 +00:00
|
|
|
if (!xPortCanYield()) {
|
2016-08-23 09:09:44 +00:00
|
|
|
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
|
|
|
|
abort(); /* indicates logic bug, it shouldn't be possible to lock recursively in ISR */
|
|
|
|
}
|
|
|
|
BaseType_t higher_task_woken = false;
|
2016-08-24 12:49:06 +00:00
|
|
|
xSemaphoreGiveFromISR(h, &higher_task_woken);
|
|
|
|
if (higher_task_woken) {
|
|
|
|
portYIELD_FROM_ISR();
|
|
|
|
}
|
2016-08-23 09:09:44 +00:00
|
|
|
} else {
|
|
|
|
if (mutex_type == queueQUEUE_TYPE_RECURSIVE_MUTEX) {
|
2016-08-24 12:49:06 +00:00
|
|
|
xSemaphoreGiveRecursive(h);
|
2016-08-23 09:09:44 +00:00
|
|
|
} else {
|
2016-08-24 12:49:06 +00:00
|
|
|
xSemaphoreGive(h);
|
2016-08-23 09:09:44 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 09:09:44 +00:00
|
|
|
void IRAM_ATTR _lock_release(_lock_t *lock) {
|
|
|
|
lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
|
|
|
|
2016-08-23 09:09:44 +00:00
|
|
|
void IRAM_ATTR _lock_release_recursive(_lock_t *lock) {
|
|
|
|
lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
|
2016-08-17 15:08:22 +00:00
|
|
|
}
|
2019-03-18 03:47:00 +00:00
|
|
|
|
2020-06-10 18:03:32 +00:00
|
|
|
/* To ease the transition to newlib 3.3.0, this part is kept under an ifdef.
|
|
|
|
* After the toolchain with newlib 3.3.0 is released and merged, the ifdefs
|
|
|
|
* can be removed.
|
|
|
|
*
|
|
|
|
* Also the retargetable locking functions still rely on the previous
|
|
|
|
* implementation. Once support for !_RETARGETABLE_LOCKING is removed,
|
|
|
|
* the code can be simplified, removing support for lazy initialization of
|
|
|
|
* locks. At the same time, IDF code which relies on _lock_acquire/_lock_release
|
|
|
|
* will have to be updated to not depend on lazy initialization.
|
|
|
|
*
|
|
|
|
* Explanation of the different lock types:
|
|
|
|
*
|
|
|
|
* Newlib 2.2.0 and 3.0.0:
|
|
|
|
* _lock_t is defined as int, stores SemaphoreHandle_t.
|
|
|
|
*
|
|
|
|
* Newlib 3.3.0:
|
|
|
|
* struct __lock is (or contains) StaticSemaphore_t
|
|
|
|
* _LOCK_T is a pointer to struct __lock, equivalent to SemaphoreHandle_t.
|
|
|
|
* It has the same meaning as _lock_t in the previous implementation.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* This ensures the platform-specific definition in lock.h is correct.
|
|
|
|
* We use "greater or equal" since the size of StaticSemaphore_t may
|
|
|
|
* vary by 2 words, depending on whether configUSE_TRACE_FACILITY is enabled.
|
2019-03-18 03:47:00 +00:00
|
|
|
*/
|
2020-06-10 18:03:32 +00:00
|
|
|
_Static_assert(sizeof(struct __lock) >= sizeof(StaticSemaphore_t),
|
|
|
|
"Incorrect size of struct __lock");
|
|
|
|
|
|
|
|
/* FreeRTOS configuration check */
|
|
|
|
_Static_assert(configSUPPORT_STATIC_ALLOCATION,
|
|
|
|
"FreeRTOS should be configured with static allocation support");
|
|
|
|
|
|
|
|
/* These 2 locks are used instead of 9 distinct newlib static locks,
|
|
|
|
* as most of the locks are required for lesser-used features, so
|
|
|
|
* the chance of performance degradation due to lock contention is low.
|
|
|
|
*/
|
|
|
|
static StaticSemaphore_t s_common_mutex;
|
|
|
|
static StaticSemaphore_t s_common_recursive_mutex;
|
|
|
|
|
|
|
|
|
2021-11-06 09:25:49 +00:00
|
|
|
#if ESP_ROM_HAS_RETARGETABLE_LOCKING
|
2021-05-18 04:54:38 +00:00
|
|
|
/* C3 and S3 ROMs are built without Newlib static lock symbols exported, and
|
2020-06-10 18:03:32 +00:00
|
|
|
* with an extra level of _LOCK_T indirection in mind.
|
|
|
|
* The following is a workaround for this:
|
|
|
|
* - on startup, we call esp_rom_newlib_init_common_mutexes to set
|
|
|
|
* the two mutex pointers to magic values.
|
|
|
|
* - if in __retarget_lock_acquire*, we check if the argument dereferences
|
|
|
|
* to the magic value. If yes, we lock the correct mutex defined in the app,
|
|
|
|
* instead.
|
|
|
|
* Casts from &StaticSemaphore_t to _LOCK_T are okay because _LOCK_T
|
|
|
|
* (which is SemaphoreHandle_t) is a pointer to the corresponding
|
|
|
|
* StaticSemaphore_t structure. This is ensured by asserts below.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define ROM_NEEDS_MUTEX_OVERRIDE
|
2021-11-06 09:25:49 +00:00
|
|
|
#endif // ESP_ROM_HAS_RETARGETABLE_LOCKING
|
2020-06-10 18:03:32 +00:00
|
|
|
|
|
|
|
#ifdef ROM_NEEDS_MUTEX_OVERRIDE
|
|
|
|
#define ROM_MUTEX_MAGIC 0xbb10c433
|
|
|
|
/* This is a macro, since we are overwriting the argument */
|
|
|
|
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) \
|
|
|
|
if (*(int*)_lock == ROM_MUTEX_MAGIC) { \
|
|
|
|
(_lock) = (_LOCK_T) (_lock_to_use_instead); \
|
|
|
|
}
|
|
|
|
#else // ROM_NEEDS_MUTEX_OVERRIDE
|
|
|
|
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
|
|
|
|
#endif // ROM_NEEDS_MUTEX_OVERRIDE
|
|
|
|
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
|
|
|
|
{
|
|
|
|
*lock = NULL; /* In case lock's memory is uninitialized */
|
|
|
|
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
|
|
|
|
{
|
|
|
|
*lock = NULL; /* In case lock's memory is uninitialized */
|
|
|
|
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
_lock_close(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
|
2019-03-18 03:47:00 +00:00
|
|
|
{
|
2020-06-10 18:03:32 +00:00
|
|
|
_lock_close_recursive(&lock);
|
2019-03-18 03:47:00 +00:00
|
|
|
}
|
2020-06-10 18:03:32 +00:00
|
|
|
|
|
|
|
/* Separate function, to prevent generating multiple assert strings */
|
|
|
|
static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
assert(lock != NULL && "Uninitialized lock used");
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
check_lock_nonzero(lock);
|
|
|
|
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
|
|
|
|
_lock_acquire(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
check_lock_nonzero(lock);
|
|
|
|
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
|
|
|
|
_lock_acquire_recursive(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
check_lock_nonzero(lock);
|
|
|
|
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
|
|
|
|
return _lock_try_acquire(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
check_lock_nonzero(lock);
|
|
|
|
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
|
|
|
|
return _lock_try_acquire_recursive(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
check_lock_nonzero(lock);
|
|
|
|
_lock_release(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
|
|
|
|
{
|
|
|
|
check_lock_nonzero(lock);
|
|
|
|
_lock_release_recursive(&lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* When _RETARGETABLE_LOCKING is enabled, newlib expects the following locks to be provided: */
|
|
|
|
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sinit_recursive_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___malloc_recursive_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___env_recursive_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___sfp_recursive_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_recursive_mutex"))) __lock___atexit_recursive_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___at_quick_exit_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___tz_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___dd_hash_mutex;
|
|
|
|
extern StaticSemaphore_t __attribute__((alias("s_common_mutex"))) __lock___arc4random_mutex;
|
|
|
|
|
|
|
|
void esp_newlib_locks_init(void)
|
|
|
|
{
|
|
|
|
/* Initialize the two mutexes used for the locks above.
|
|
|
|
* Asserts below check our assumption that SemaphoreHandle_t will always
|
|
|
|
* point to the corresponding StaticSemaphore_t structure.
|
|
|
|
*/
|
|
|
|
SemaphoreHandle_t handle;
|
|
|
|
handle = xSemaphoreCreateMutexStatic(&s_common_mutex);
|
|
|
|
assert(handle == (SemaphoreHandle_t) &s_common_mutex);
|
|
|
|
handle = xSemaphoreCreateRecursiveMutexStatic(&s_common_recursive_mutex);
|
|
|
|
assert(handle == (SemaphoreHandle_t) &s_common_recursive_mutex);
|
|
|
|
(void) handle;
|
|
|
|
|
|
|
|
/* Chip ROMs are built with older versions of newlib, and rely on different lock variables.
|
|
|
|
* Initialize these locks to the same values.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_IDF_TARGET_ESP32
|
|
|
|
/* Newlib 2.2.0 is used in ROM, the following lock symbols are defined: */
|
|
|
|
extern _lock_t __sfp_lock;
|
|
|
|
__sfp_lock = (_lock_t) &s_common_recursive_mutex;
|
|
|
|
extern _lock_t __sinit_lock;
|
|
|
|
__sinit_lock = (_lock_t) &s_common_recursive_mutex;
|
|
|
|
extern _lock_t __env_lock_object;
|
|
|
|
__env_lock_object = (_lock_t) &s_common_mutex;
|
|
|
|
extern _lock_t __tz_lock_object;
|
|
|
|
__tz_lock_object = (_lock_t) &s_common_recursive_mutex;
|
2021-03-17 10:48:05 +00:00
|
|
|
#elif defined(CONFIG_IDF_TARGET_ESP32S2)
|
2020-06-10 18:03:32 +00:00
|
|
|
/* Newlib 3.0.0 is used in ROM, the following lock symbols are defined: */
|
|
|
|
extern _lock_t __sinit_recursive_mutex;
|
|
|
|
__sinit_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
|
|
|
|
extern _lock_t __sfp_recursive_mutex;
|
|
|
|
__sfp_recursive_mutex = (_lock_t) &s_common_recursive_mutex;
|
2022-01-18 02:32:56 +00:00
|
|
|
#elif defined(CONFIG_IDF_TARGET_ESP32C3) || defined(CONFIG_IDF_TARGET_ESP32S3) || defined(CONFIG_IDF_TARGET_ESP32H2) || defined(CONFIG_IDF_TARGET_ESP32C2)
|
2020-06-10 18:03:32 +00:00
|
|
|
/* Newlib 3.3.0 is used in ROM, built with _RETARGETABLE_LOCKING.
|
|
|
|
* No access to lock variables for the purpose of ECO forward compatibility,
|
|
|
|
* however we have an API to initialize lock variables used in the ROM.
|
|
|
|
*/
|
|
|
|
extern void esp_rom_newlib_init_common_mutexes(_LOCK_T, _LOCK_T);
|
|
|
|
/* See notes about ROM_NEEDS_MUTEX_OVERRIDE above */
|
|
|
|
int magic_val = ROM_MUTEX_MAGIC;
|
|
|
|
_LOCK_T magic_mutex = (_LOCK_T) &magic_val;
|
|
|
|
esp_rom_newlib_init_common_mutexes(magic_mutex, magic_mutex);
|
|
|
|
#else // other target
|
|
|
|
#error Unsupported target
|
|
|
|
#endif
|
|
|
|
}
|