freertos: Disable portUSING_MPU_WRAPPERS in FreeRTOS SMP Xtensa port

This commit disables portUSING_MPU_WRAPPERS for the FreeRTOS SMP xtensa port.
This was previously enabled due to the need to keep a CPSA (coprocessor save
area pointer) in the TCB. The CPSA pointer is now calculated at run time.
pull/8812/head
Darian Leung 2022-04-07 14:20:46 +08:00
rodzic 8b4e032255
commit 8c92d0b2af
6 zmienionych plików z 77 dodań i 138 usunięć

Wyświetl plik

@ -196,7 +196,7 @@ This file get's pulled into assembly sources. Therefore, some includes need to b
#elif CONFIG_FREERTOS_CHECK_STACKOVERFLOW_CANARY
#define configCHECK_FOR_STACK_OVERFLOW 2
#endif
#define configRECORD_STACK_HIGH_ADDRESS 1
#define configRECORD_STACK_HIGH_ADDRESS 1 // This must be set as the port requires TCB.pxEndOfStack
// ------------------- Run-time Stats ----------------------

Wyświetl plik

@ -247,54 +247,6 @@ static inline BaseType_t __attribute__((always_inline)) xPortGetCoreID( void )
return (BaseType_t) cpu_hal_get_core_id();
}
/* ------------------------------------------------------ Misc ---------------------------------------------------------
* - Miscellaneous porting macros
* - These are not part of the FreeRTOS porting interface, but are used by other FreeRTOS dependent components
* - [refactor-todo] Remove dependency on MPU wrappers by modifying TCB
* ------------------------------------------------------------------------------------------------------------------ */
// -------------------- Co-Processor -----------------------
// When coprocessors are defined, we maintain a pointer to coprocessors area.
// We currently use a hack: redefine field xMPU_SETTINGS in TCB block as a structure that can hold:
// MPU wrappers, coprocessor area pointer, trace code structure, and more if needed.
// The field is normally used for memory protection. FreeRTOS should create another general purpose field.
typedef struct {
#if XCHAL_CP_NUM > 0
volatile StackType_t *coproc_area; // Pointer to coprocessor save area; MUST BE FIRST
#endif
#if portUSING_MPU_WRAPPERS
// Define here mpu_settings, which is port dependent
int mpu_setting; // Just a dummy example here; MPU not ported to Xtensa yet
#endif
} xMPU_SETTINGS;
// Main hack to use MPU_wrappers even when no MPU is defined (warning: mpu_setting should not be accessed; otherwise move this above xMPU_SETTINGS)
#if (XCHAL_CP_NUM > 0) && !portUSING_MPU_WRAPPERS // If MPU wrappers not used, we still need to allocate coproc area
#undef portUSING_MPU_WRAPPERS
#define portUSING_MPU_WRAPPERS 1 // Enable it to allocate coproc area
#define MPU_WRAPPERS_H // Override mpu_wrapper.h to disable unwanted code
#define PRIVILEGED_FUNCTION
#define PRIVILEGED_DATA
#endif
void _xt_coproc_release(volatile void *coproc_sa_base);
/*
* The structures and methods of manipulating the MPU are contained within the
* port layer.
*
* Fills the xMPUSettings structure with the memory region information
* contained in xRegions.
*/
#if( portUSING_MPU_WRAPPERS == 1 )
struct xMEMORY_REGION;
void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION *const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth ) PRIVILEGED_FUNCTION;
void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings );
#endif
/* ------------------------------------------------ IDF Compatibility --------------------------------------------------
* - These macros and functions need to be defined for IDF to compile
* ------------------------------------------------------------------------------------------------------------------ */

Wyświetl plik

@ -437,17 +437,19 @@ static void vPortTaskWrapper(TaskFunction_t pxCode, void *pvParameters)
}
#endif
const DRAM_ATTR uint32_t offset_pxEndOfStack = offsetof(StaticTask_t, pxDummy8);
const DRAM_ATTR uint32_t offset_uxCoreAffinityMask = offsetof(StaticTask_t, uxDummy25);
const DRAM_ATTR uint32_t offset_cpsa = XT_CP_SIZE;
#if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged )
StackType_t * pxEndOfStack,
TaskFunction_t pxCode,
void * pvParameters )
#else
StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
TaskFunction_t pxCode,
void * pvParameters,
BaseType_t xRunPrivileged )
void * pvParameters )
#endif
{
StackType_t *sp, *tp;
@ -624,34 +626,3 @@ void vApplicationMinimalIdleHook( void )
esp_vApplicationIdleHook(); //Run IDF style hooks
}
#endif // CONFIG_FREERTOS_USE_MINIMAL_IDLE_HOOK
/* ---------------------------------------------- Misc Implementations -------------------------------------------------
*
* ------------------------------------------------------------------------------------------------------------------ */
// -------------------- Co-Processor -----------------------
/*
* Used to set coprocessor area in stack. Current hack is to reuse MPU pointer for coprocessor area.
*/
#if portUSING_MPU_WRAPPERS
void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMORY_REGION *const xRegions, StackType_t *pxBottomOfStack, uint32_t usStackDepth )
{
#if XCHAL_CP_NUM > 0
xMPUSettings->coproc_area = ( StackType_t * ) ( ( uint32_t ) ( pxBottomOfStack + usStackDepth - 1 ));
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) xMPUSettings->coproc_area ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) );
xMPUSettings->coproc_area = ( StackType_t * ) ( ( ( uint32_t ) xMPUSettings->coproc_area - XT_CP_SIZE ) & ~0xf );
/* NOTE: we cannot initialize the coprocessor save area here because FreeRTOS is going to
* clear the stack area after we return. This is done in pxPortInitialiseStack().
*/
#endif
}
void vPortReleaseTaskMPUSettings( xMPU_SETTINGS *xMPUSettings )
{
/* If task has live floating point registers somewhere, release them */
_xt_coproc_release( xMPUSettings->coproc_area );
}
#endif /* portUSING_MPU_WRAPPERS */

Wyświetl plik

@ -32,9 +32,34 @@
#include "sdkconfig.h"
#define TOPOFSTACK_OFFS 0x00 /* StackType_t *pxTopOfStack */
#define CP_TOPOFSTACK_OFFS 0x04 /* xMPU_SETTINGS.coproc_area */
.extern pxCurrentTCBs
.extern offset_pxEndOfStack
.extern offset_cpsa
/*
Macro to get a task's coprocessor save area (CPSA) from its TCB
Entry:
- reg_A contains a pointer to the TCB
Exit:
- reg_A contains a pointer to the CPSA
- reg_B destroyed
*/
.macro get_cpsa_from_tcb reg_A reg_B
// Get TCB.pxEndOfStack from reg_A
movi \reg_B, offset_pxEndOfStack /* Move &offset_pxEndOfStack into reg_B */
l32i \reg_B, \reg_B, 0 /* Load offset_pxEndOfStack into reg_B */
add \reg_A, \reg_A, \reg_B /* Calculate &pxEndOfStack to reg_A (&TCB + offset_pxEndOfStack) */
l32i \reg_A, \reg_A, 0 /* Load TCB.pxEndOfStack into reg_A */
//Offset to start of coproc save area
movi \reg_B, offset_cpsa /* Move &offset_cpsa into reg_B */
l32i \reg_B, \reg_B, 0 /* Load offset_cpsa into reg_B */
sub \reg_A, \reg_A, \reg_B /* Subtract offset_cpsa from pxEndOfStack to get to start of CP save area (unaligned) */
//Align down start of CP save area to 16 byte boundary
movi \reg_B, ~(0xF)
and \reg_A, \reg_A, \reg_B /* Align CPSA pointer to 16 bytes */
.endm
/*
*******************************************************************************
@ -135,23 +160,19 @@ _frxt_int_enter:
mull a2, a4, a2
add a1, a1, a2 /* for current proc */
#ifdef CONFIG_FREERTOS_FPU_IN_ISR
#if XCHAL_CP_NUM > 0
#if CONFIG_FREERTOS_FPU_IN_ISR && XCHAL_CP_NUM > 0
rsr a3, CPENABLE /* Restore thread scope CPENABLE */
addi sp, sp,-4 /* ISR will manage FPU coprocessor by forcing */
s32i a3, a1, 0 /* its trigger */
#endif
#endif
.Lnested:
1:
#ifdef CONFIG_FREERTOS_FPU_IN_ISR
#if XCHAL_CP_NUM > 0
#if CONFIG_FREERTOS_FPU_IN_ISR && XCHAL_CP_NUM > 0
movi a3, 0 /* whilst ISRs pending keep CPENABLE exception active */
wsr a3, CPENABLE
rsync
#endif
#endif
mov a0, a12 /* restore return addr and return */
ret
@ -189,14 +210,12 @@ _frxt_int_exit:
s32i a2, a3, 0 /* save nesting count */
bnez a2, .Lnesting /* !=0 after decr so still nested */
#ifdef CONFIG_FREERTOS_FPU_IN_ISR
#if XCHAL_CP_NUM > 0
#if CONFIG_FREERTOS_FPU_IN_ISR && XCHAL_CP_NUM > 0
l32i a3, sp, 0 /* Grab last CPENABLE before leave ISR */
addi sp, sp, 4
wsr a3, CPENABLE
rsync /* ensure CPENABLE was modified */
#endif
#endif
movi a2, pxCurrentTCBs
addx4 a2, a4, a2
@ -474,11 +493,11 @@ _frxt_dispatch:
#if XCHAL_CP_NUM > 0
/* Restore CPENABLE from task's co-processor save area. */
movi a3, pxCurrentTCBs /* cp_state = */
getcoreid a2
addx4 a3, a2, a3
l32i a3, a3, 0
l32i a2, a3, CP_TOPOFSTACK_OFFS /* StackType_t *pxStack; */
movi a2, pxCurrentTCBs /* cp_state = */
getcoreid a3
addx4 a2, a3, a2
l32i a2, a2, 0
get_cpsa_from_tcb a2, a3 /* After this, pointer to CP save area is in a2, a3 is destroyed */
l16ui a3, a2, XT_CPENABLE /* CPENABLE = cp_state->cpenable; */
wsr a3, CPENABLE
#endif
@ -573,7 +592,7 @@ vPortYield:
#if XCHAL_CP_NUM > 0
/* Clear CPENABLE, also in task's co-processor state save area. */
l32i a2, a2, CP_TOPOFSTACK_OFFS /* a2 = pxCurrentTCBs->cp_state */
get_cpsa_from_tcb a2, a3 /* After this, pointer to CP save area is in a2, a3 is destroyed */
movi a3, 0
wsr a3, CPENABLE
beqz a2, 1f
@ -614,12 +633,12 @@ vPortYieldFromInt:
#if XCHAL_CP_NUM > 0
/* Save CPENABLE in task's co-processor save area, and clear CPENABLE. */
movi a3, pxCurrentTCBs /* cp_state = */
getcoreid a2
addx4 a3, a2, a3
l32i a3, a3, 0
movi a2, pxCurrentTCBs /* cp_state = */
getcoreid a3
addx4 a2, a3, a2
l32i a2, a2, 0
l32i a2, a3, CP_TOPOFSTACK_OFFS
get_cpsa_from_tcb a2, a3 /* After this, pointer to CP save area is in a2, a3 is destroyed */
rsr a3, CPENABLE
s16i a3, a2, XT_CPENABLE /* cp_state->cpenable = CPENABLE; */
@ -673,7 +692,7 @@ _frxt_task_coproc_state:
l32i a15, a15, 0 /* && pxCurrentTCBs != 0) { */
beqz a15, 2f
l32i a15, a15, CP_TOPOFSTACK_OFFS
get_cpsa_from_tcb a15, a3 /* After this, pointer to CP save area is in a15, a3 is destroyed */
ret
1: movi a15, 0

Wyświetl plik

@ -107,8 +107,8 @@
Define for workaround: pin no-cpu-affinity tasks to a cpu when fpu is used.
Please change this when the tcb structure is changed
*/
#define TASKTCB_XCOREID_OFFSET (0x38+configMAX_TASK_NAME_LEN+3)&~3
.extern pxCurrentTCBs
.extern offset_uxCoreAffinityMask
/*
--------------------------------------------------------------------------------
@ -908,35 +908,34 @@ _xt_coproc_exc:
/* Get co-processor state save area of new owner thread. */
call0 XT_RTOS_CP_STATE /* a15 = new owner's save area */
#ifndef CONFIG_FREERTOS_FPU_IN_ISR
beqz a15, .L_goto_invalid
#if CONFIG_FREERTOS_FPU_IN_ISR
beqz a15, .L_skip_core_pin /* CP used in ISR, skip task pinning */
#else
beqz a15, .L_goto_invalid /* not in a thread (invalid) */
#endif
/*When FPU in ISR is enabled we could deal with zeroed a15 */
/* CP operations are incompatible with unpinned tasks. Thus we pin the task
to the current running core. */
movi a2, pxCurrentTCBs
getcoreid a3 /* a3 = current core ID */
addx4 a2, a3, a2
l32i a2, a2, 0 /* a2 = start of pxCurrentTCBs[cpuid] */
movi a4, offset_uxCoreAffinityMask
l32i a4, a4, 0 /* a4 = offset_uxCoreAffinityMask */
add a2, a2, a4 /* a2 = &TCB.uxCoreAffinityMask */
ssl a3 /* Use core ID as shift amount */
movi a4, 1
sll a4, a4 /* a4 = uxCoreAffinityMask = (1 << core ID) */
s32i a4, a2, 0 /* Store affinity mask to TCB.uxCoreAffinityMask */
#if CONFIG_FREERTOS_FPU_IN_ISR
.L_skip_core_pin:
#endif
/* Enable the co-processor's bit in CPENABLE. */
movi a0, _xt_coproc_mask
rsr a4, CPENABLE /* a4 = CPENABLE */
addx4 a0, a5, a0 /* a0 = &_xt_coproc_mask[n] */
l32i a0, a0, 0 /* a0 = (n << 16) | (1 << n) */
/* FPU operations are incompatible with non-pinned tasks. If we have a FPU operation
here, to keep the entire thing from crashing, it's better to pin the task to whatever
core we're running on now. */
movi a2, pxCurrentTCBs
getcoreid a3
addx4 a2, a3, a2
l32i a2, a2, 0 /* a2 = start of pxCurrentTCBs[cpuid] */
addi a2, a2, TASKTCB_XCOREID_OFFSET /* offset to xCoreID in tcb struct */
s32i a3, a2, 0 /* store current cpuid */
/* Grab correct xt_coproc_owner_sa for this core */
movi a2, XCHAL_CP_MAX << 2
mull a2, a2, a3 /* multiply by current processor id */
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
add a3, a3, a2 /* a3 = owner area needed for this processor */
extui a2, a0, 0, 16 /* coprocessor bitmask portion */
or a4, a4, a2 /* a4 = CPENABLE | (1 << n) */
wsr a4, CPENABLE
@ -946,7 +945,12 @@ Keep loading _xt_coproc_owner_sa[n] atomic (=load once, then use that value
everywhere): _xt_coproc_release assumes it works like this in order not to need
locking.
*/
/* Grab correct xt_coproc_owner_sa for this core */
getcoreid a3 /* a3 = current core ID */
movi a2, XCHAL_CP_MAX << 2
mull a2, a2, a3 /* multiply by current processor id */
movi a3, _xt_coproc_owner_sa /* a3 = base of owner array */
add a3, a3, a2 /* a3 = owner area needed for this processor */
/* Get old coprocessor owner thread (save area ptr) and assign new one. */
addx4 a3, a5, a3 /* a3 = &_xt_coproc_owner_sa[n] */

Wyświetl plik

@ -2835,13 +2835,6 @@ static BaseType_t prvCreateIdleTasks( void )
#endif
}
#endif /* configSUPPORT_STATIC_ALLOCATION */
#ifdef ESP_PLATFORM
#if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 )
//Don't forget to pin the created IDLE tasks
vTaskCoreAffinitySet(xIdleTaskHandle[ xCoreID ], (1 << xCoreID) );
#endif
#endif
}
return xReturn;