Merge branch 'feature/dualcore_spi_flash_api' into 'master'

Dual core compatible spi flash APIs

Implementation of spi flash APIs which work in dual core mode.

This also includes other changes:
- fixes to FreeRTOS tasks.c made by Jeroen and me — these were originally made to fix issues with high priority tasks seen in WiFi stack, but were required to get stuff in ipc.c working as well
- initial base idea with two high-priority tasks was separated into a separate API — esp_ipc.h. That API allows one to run a function on the other CPU from a high-priority task. Because we don't have preemption across CPUs, this is currently not very fast: it requires on average half a tick until the second CPU switches to the high priority task. Currently that API can be either blocking, or non-blocking with queue length of 1. I haven't yet found a use case for arbitrary sized queue of IPC requests. Anyone who needs that can trivially add that later by replacing a binary semaphore with a generic queue.

With these changes i can run application with WiFi stack and NVS on both cores.

See merge request !25
pull/21/head
Ivan Grokhotkov 2016-09-13 13:58:54 +08:00
commit f9013d9ff8
10 zmienionych plików z 488 dodań i 82 usunięć

Wyświetl plik

@ -7,7 +7,6 @@ config WIFI_ENABLED
This compiles in the low-level WiFi stack.
Temporarily, this option requires that FreeRTOS runs in single core mode.
depends on FREERTOS_UNICORE
config WIFI_AUTO_STARTUP
bool "Start WiFi with system startup"

Wyświetl plik

@ -36,6 +36,8 @@
#include "esp_spi_flash.h"
#include "nvs_flash.h"
#include "esp_event.h"
#include "esp_spi_flash.h"
#include "esp_ipc.h"
static void IRAM_ATTR user_start_cpu0(void);
static void IRAM_ATTR call_user_start_cpu1();
@ -180,37 +182,38 @@ void IRAM_ATTR call_user_start_cpu1() {
user_start_cpu1();
}
extern volatile int port_xSchedulerRunning;
extern int xPortStartScheduler();
extern volatile int port_xSchedulerRunning[2];
void user_start_cpu1(void) {
//Wait for the freertos initialization is finished on CPU0
while (port_xSchedulerRunning == 0) ;
ets_printf("Core0 started initializing FreeRTOS. Jumping to scheduler.\n");
//Okay, start the scheduler!
void IRAM_ATTR user_start_cpu1(void) {
// Wait for FreeRTOS initialization to finish on PRO CPU
while (port_xSchedulerRunning[0] == 0) {
;
}
ets_printf("Starting scheduler on APP CPU.\n");
xPortStartScheduler();
}
extern void (*__init_array_start)(void);
extern void (*__init_array_end)(void);
extern esp_err_t app_main();
static void do_global_ctors(void) {
void (**p)(void);
for(p = &__init_array_start; p != &__init_array_end; ++p)
(*p)();
}
extern esp_err_t app_main();
void user_start_cpu0(void) {
ets_setup_syscalls();
do_global_ctors();
esp_ipc_init();
spi_flash_init();
#if CONFIG_WIFI_ENABLED
ets_printf("nvs_flash_init\n");
esp_err_t ret = nvs_flash_init(5, 3);
if (ret != ESP_OK) {
ets_printf("nvs_flash_init fail, ret=%d\n", ret);
printf("nvs_flash_init failed, ret=%d\n", ret);
}
system_init();
@ -227,6 +230,7 @@ void user_start_cpu0(void) {
app_main();
#endif
ets_printf("Starting scheduler on PRO CPU.\n");
vTaskStartScheduler();
}

Wyświetl plik

@ -27,7 +27,10 @@ typedef int32_t esp_err_t;
#define ESP_OK 0
#define ESP_FAIL -1
#define ESP_ERR_NO_MEM 0x101
#define ESP_ERR_NO_MEM 0x101
#define ESP_ERR_INVALID_ARG 0x102
#define ESP_ERR_INVALID_STATE 0x103
#ifdef __cplusplus
}

Wyświetl plik

@ -0,0 +1,88 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef __ESP_IPC_H__
#define __ESP_IPC_H__
#include <esp_err.h>
typedef void (*esp_ipc_func_t)(void* arg);
/**
* @brief Inter-processor call APIs
*
* FreeRTOS provides several APIs which can be used to communicate between
* different tasks, including tasks running on different CPUs.
* This module provides additional APIs to run some code on the other CPU.
*
* These APIs can only be used when FreeRTOS scheduler is running.
*/
/**
* @brief Initialize inter-processor call module.
*
* This function start two tasks, one on each CPU. These tasks are started
* with high priority. These tasks are normally inactive, waiting until one of
* the esp_ipc_call_* functions to be used. One of these tasks will be
* woken up to execute the callback provided to esp_ipc_call_nonblocking or
* esp_ipc_call_blocking.
*/
void esp_ipc_init();
/**
* @brief Execute function on the given CPU
*
* This will wake a high-priority task on CPU indicated by cpu_id argument,
* and run func(arg) in the context of that task.
* This function returns as soon as the high-priority task is woken up.
* If another IPC call is already being executed, this function will also wait
* for it to complete.
*
* In single-core mode, returns ESP_ERR_INVALID_ARG for cpu_id 1.
*
* @param cpu_id CPU where function should be executed (0 or 1)
* @param func pointer to a function which should be executed
* @param arg arbitrary argument to be passed into function
*
* @return ESP_ERR_INVALID_ARG if cpu_id is invalid
* ESP_ERR_INVALID_STATE if FreeRTOS scheduler is not running
* ESP_OK otherwise
*/
esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg);
/**
* @brief Execute function on the given CPU and wait for it to finish
*
* This will wake a high-priority task on CPU indicated by cpu_id argument,
* and run func(arg) in the context of that task.
* This function waits for func to return.
*
* In single-core mode, returns ESP_ERR_INVALID_ARG for cpu_id 1.
*
* @param cpu_id CPU where function should be executed (0 or 1)
* @param func pointer to a function which should be executed
* @param arg arbitrary argument to be passed into function
*
* @return ESP_ERR_INVALID_ARG if cpu_id is invalid
* ESP_ERR_INVALID_STATE if FreeRTOS scheduler is not running
* ESP_OK otherwise
*/
esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg);
#endif /* __ESP_IPC_H__ */

Wyświetl plik

@ -0,0 +1,117 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "esp_err.h"
#include "esp_ipc.h"
#include "esp_attr.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
static TaskHandle_t s_ipc_tasks[portNUM_PROCESSORS]; // Two high priority tasks, one for each CPU
static SemaphoreHandle_t s_ipc_mutex; // This mutex is used as a global lock for esp_ipc_* APIs
static SemaphoreHandle_t s_ipc_sem[portNUM_PROCESSORS]; // Two semaphores used to wake each of s_ipc_tasks
static SemaphoreHandle_t s_ipc_ack; // Semaphore used to acknowledge that task was woken up,
// or function has finished running
static volatile esp_ipc_func_t s_func; // Function which should be called by high priority task
static void * volatile s_func_arg; // Argument to pass into s_func
typedef enum {
IPC_WAIT_FOR_START,
IPC_WAIT_FOR_END
} esp_ipc_wait_t;
static volatile esp_ipc_wait_t s_ipc_wait; // This variable tells high priority task when it should give
// s_ipc_ack semaphore: before s_func is called, or
// after it returns
static void IRAM_ATTR ipc_task(void* arg)
{
const uint32_t cpuid = (uint32_t) arg;
assert(cpuid == xPortGetCoreID());
while (true) {
// Wait for IPC to be initiated.
// This will be indicated by giving the semaphore corresponding to
// this CPU.
if (xSemaphoreTake(s_ipc_sem[cpuid], portMAX_DELAY) != pdTRUE) {
// TODO: when can this happen?
abort();
}
esp_ipc_func_t func = s_func;
void* arg = s_func_arg;
if (s_ipc_wait == IPC_WAIT_FOR_START) {
xSemaphoreGive(s_ipc_ack);
}
(*func)(arg);
if (s_ipc_wait == IPC_WAIT_FOR_END) {
xSemaphoreGive(s_ipc_ack);
}
}
// TODO: currently this is unreachable code. Introduce esp_ipc_uninit
// function which will signal to both tasks that they can shut down.
// Not critical at this point, we don't have a use case for stopping
// IPC yet.
// Also need to delete the semaphore here.
vTaskDelete(NULL);
}
void esp_ipc_init()
{
s_ipc_mutex = xSemaphoreCreateMutex();
s_ipc_ack = xSemaphoreCreateBinary();
const char* task_names[2] = {"ipc0", "ipc1"};
for (int i = 0; i < portNUM_PROCESSORS; ++i) {
s_ipc_sem[i] = xSemaphoreCreateBinary();
xTaskCreatePinnedToCore(ipc_task, task_names[i], XT_STACK_MIN_SIZE, (void*) i,
configMAX_PRIORITIES - 1, &s_ipc_tasks[i], i);
}
}
static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, void* arg, esp_ipc_wait_t wait_for)
{
if (cpu_id >= portNUM_PROCESSORS) {
return ESP_ERR_INVALID_ARG;
}
if (xTaskGetSchedulerState() != taskSCHEDULER_RUNNING) {
return ESP_ERR_INVALID_STATE;
}
xSemaphoreTake(s_ipc_mutex, portMAX_DELAY);
s_func = func;
s_func_arg = arg;
s_ipc_wait = IPC_WAIT_FOR_START;
xSemaphoreGive(s_ipc_sem[cpu_id]);
xSemaphoreTake(s_ipc_ack, portMAX_DELAY);
xSemaphoreGive(s_ipc_mutex);
return ESP_OK;
}
esp_err_t esp_ipc_call(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_START);
}
esp_err_t esp_ipc_call_blocking(uint32_t cpu_id, esp_ipc_func_t func, void* arg)
{
return esp_ipc_call_and_wait(cpu_id, func, arg, IPC_WAIT_FOR_END);
}

Wyświetl plik

@ -167,6 +167,9 @@ typedef struct {
#define portDISABLE_INTERRUPTS() do { XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); } while (0)
#define portENABLE_INTERRUPTS() do { portbenchmarkINTERRUPT_RESTORE(0); XTOS_SET_INTLEVEL(0); } while (0)
#define portASSERT_IF_IN_ISR() vPortAssertIfInISR()
void vPortAssertIfInISR();
#define portCRITICAL_NESTING_IN_TCB 1
/*
@ -213,6 +216,7 @@ portBASE_TYPE vPortCPUReleaseMutex(portMUX_TYPE *mux);
// Cleaner and preferred solution allows nested interrupts disabling and restoring via local registers or stack.
// They can be called from interrupts too.
//NOT SMP-COMPATIBLE! Use only if all you want is to disable the interrupts locally!
static inline unsigned portENTER_CRITICAL_NESTED() { unsigned state = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL); portbenchmarkINTERRUPT_DISABLE(); return state; }
#define portEXIT_CRITICAL_NESTED(state) do { portbenchmarkINTERRUPT_RESTORE(state); XTOS_RESTORE_JUST_INTLEVEL(state); } while (0)

Wyświetl plik

@ -248,14 +248,17 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS *xMPUSettings, const struct xMEMOR
#endif
void vPortAssertIfInISR()
{
configASSERT(port_interruptNesting[xPortGetCoreID()]==0)
}
/*
* Wrapper for the Xtensa compare-and-set instruction. This subroutine will atomically compare
* *mux to compare, and if it's the same, will set *mux to set. It will return the old value
* of *addr.
*
* Note: the NOPs are needed on the ESP31 processor but superfluous on the ESP32.
*
* Warning: From the ISA docs: in some (unspecified) cases, the s32c1i instruction may return the
* *bitwise inverse* of the old mem if the mem wasn't written. This doesn't seem to happen on the
* ESP32, though. (Would show up directly if it did because the magic wouldn't match.)

Wyświetl plik

@ -163,6 +163,7 @@ typedef struct tskTaskControlBlock
#if ( portCRITICAL_NESTING_IN_TCB == 1 )
UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
uint32_t uxOldInterruptState; /*< Interrupt state before the outer taskEnterCritical was called */
#endif
#if ( configUSE_TRACE_FACILITY == 1 )
@ -2595,8 +2596,7 @@ BaseType_t xReturn;
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
called from a critical section within an ISR. */
//That makes the taskENTER_CRITICALs here unnecessary, right? -JD
// taskENTER_CRITICAL(&xTaskQueueMutex);
taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
/* The event list is sorted in priority order, so the first in the list can
be removed as it is known to be the highest priority. Remove the TCB from
the delayed list, and add it to the ready list.
@ -2654,7 +2654,7 @@ BaseType_t xReturn;
prvResetNextTaskUnblockTime();
}
#endif
// taskEXIT_CRITICAL(&xTaskQueueMutex);
taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
return xReturn;
}
@ -3614,6 +3614,7 @@ In fact, nothing below this line has/is.
{
if( pxTCB->uxPriority < pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
{
taskENTER_CRITICAL(&xTaskQueueMutex);
/* Adjust the mutex holder state to account for its new
priority. Only reset the event list item value if the value is
not being used for anything else. */
@ -3649,6 +3650,8 @@ In fact, nothing below this line has/is.
pxTCB->uxPriority = pxCurrentTCB[ xPortGetCoreID() ]->uxPriority;
}
taskEXIT_CRITICAL(&xTaskQueueMutex);
traceTASK_PRIORITY_INHERIT( pxTCB, pxCurrentTCB[ xPortGetCoreID() ]->uxPriority );
}
else
@ -3686,6 +3689,7 @@ In fact, nothing below this line has/is.
/* Only disinherit if no other mutexes are held. */
if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
{
taskENTER_CRITICAL(&xTaskQueueMutex);
/* A task can only have an inhertied priority if it holds
the mutex. If the mutex is held by a task then it cannot be
given from an interrupt, and if a mutex is given by the
@ -3720,6 +3724,7 @@ In fact, nothing below this line has/is.
switch should occur when the last mutex is returned whether
a task is waiting on it or not. */
xReturn = pdTRUE;
taskEXIT_CRITICAL(&xTaskQueueMutex);
}
else
{
@ -3761,7 +3766,14 @@ scheduler will re-enable the interrupts instead. */
void vTaskEnterCritical( portMUX_TYPE *mux )
#endif
{
portDISABLE_INTERRUPTS();
BaseType_t oldInterruptLevel=0;
if( xSchedulerRunning != pdFALSE )
{
//Interrupts may already be disabled (because we're doing this recursively) but we can't get the interrupt level after
//vPortCPUAquireMutex, because it also may mess with interrupts. Get it here first, then later figure out if we're nesting
//and save for real there.
oldInterruptLevel=portENTER_CRITICAL_NESTED();
}
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
vPortCPUAcquireMutex( mux, function, line );
#else
@ -3772,16 +3784,30 @@ scheduler will re-enable the interrupts instead. */
{
( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting )++;
/* This is not the interrupt safe version of the enter critical
if( xSchedulerRunning != pdFALSE && pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 1 )
{
//This is the first time we get called. Save original interrupt level.
pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState=oldInterruptLevel;
}
/* Original FreeRTOS comment, saved for reference:
This is not the interrupt safe version of the enter critical
function so assert() if it is being called from an interrupt
context. Only API functions that end in "FromISR" can be used in an
interrupt. Only assert if the critical nesting count is 1 to
protect against recursive calls if the assert function also uses a
critical section. */
/* DISABLED in the esp32 port - because of SMP, vTaskEnterCritical
has to be used in way more places than before, and some are called
both from ISR as well as non-ISR code, thus we re-organized
vTaskEnterCritical to also work in ISRs. */
#if 0
if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 1 )
{
portASSERT_IF_IN_ISR();
}
#endif
}
else
@ -3814,7 +3840,7 @@ scheduler will re-enable the interrupts instead. */
if( pxCurrentTCB[ xPortGetCoreID() ]->uxCriticalNesting == 0U )
{
portENABLE_INTERRUPTS();
portEXIT_CRITICAL_NESTED(pxCurrentTCB[ xPortGetCoreID() ]->uxOldInterruptState);
}
else
{

Wyświetl plik

@ -12,15 +12,204 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdlib.h>
#include <assert.h>
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <freertos/semphr.h>
#include <esp_spi_flash.h>
#include <rom/spi_flash.h>
#include <rom/cache.h>
#include <esp_attr.h>
#include <soc/soc.h>
#include <soc/dport_reg.h>
#include "sdkconfig.h"
#include "esp_ipc.h"
#include "esp_attr.h"
#include "esp_spi_flash.h"
/*
Driver for SPI flash read/write/erase operations
In order to perform some flash operations, we need to make sure both CPUs
are not running any code from flash for the duration of the flash operation.
In a single-core setup this is easy: we disable interrupts/scheduler and do
the flash operation. In the dual-core setup this is slightly more complicated.
We need to make sure that the other CPU doesn't run any code from flash.
When SPI flash API is called on CPU A (can be PRO or APP), we start
spi_flash_op_block_func function on CPU B using esp_ipc_call API. This API
wakes up high priority task on CPU B and tells it to execute given function,
in this case spi_flash_op_block_func. This function disables cache on CPU B and
signals that cache is disabled by setting s_flash_op_can_start flag.
Then the task on CPU A disables cache as well, and proceeds to execute flash
operation.
While flash operation is running, interrupts can still run on CPU B.
We assume that all interrupt code is placed into RAM.
Once flash operation is complete, function on CPU A sets another flag,
s_flash_op_complete, to let the task on CPU B know that it can re-enable
cache and release the CPU. Then the function on CPU A re-enables the cache on
CPU A as well and returns control to the calling code.
Additionally, all API functions are protected with a mutex (s_flash_op_mutex).
In a single core environment (CONFIG_FREERTOS_UNICORE enabled), we simply
disable both caches, no inter-CPU communication takes place.
*/
static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc);
static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state);
static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state);
static uint32_t s_flash_op_cache_state[2];
#ifndef CONFIG_FREERTOS_UNICORE
static SemaphoreHandle_t s_flash_op_mutex;
static bool s_flash_op_can_start = false;
static bool s_flash_op_complete = false;
#endif //CONFIG_FREERTOS_UNICORE
#ifndef CONFIG_FREERTOS_UNICORE
static void IRAM_ATTR spi_flash_op_block_func(void* arg)
{
// Disable scheduler on this CPU
vTaskSuspendAll();
uint32_t cpuid = (uint32_t) arg;
// Disable cache so that flash operation can start
spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
s_flash_op_can_start = true;
while (!s_flash_op_complete) {
// until we have a way to use interrupts for inter-CPU communication,
// busy loop here and wait for the other CPU to finish flash operation
}
// Flash operation is complete, re-enable cache
spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
// Re-enable scheduler
xTaskResumeAll();
}
void spi_flash_init()
{
s_flash_op_mutex = xSemaphoreCreateMutex();
}
static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu()
{
// Take the API lock
xSemaphoreTake(s_flash_op_mutex, portMAX_DELAY);
const uint32_t cpuid = xPortGetCoreID();
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
// Scheduler hasn't been started yet, it means that spi_flash API is being
// called from the 2nd stage bootloader or from user_start_cpu0, i.e. from
// PRO CPU. APP CPU is either in reset or spinning inside user_start_cpu1,
// which is in IRAM. So it is safe to disable cache for the other_cpuid here.
assert(other_cpuid == 1);
spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]);
} else {
// Signal to the spi_flash_op_block_task on the other CPU that we need it to
// disable cache there and block other tasks from executing.
s_flash_op_can_start = false;
s_flash_op_complete = false;
esp_ipc_call(other_cpuid, &spi_flash_op_block_func, (void*) other_cpuid);
while (!s_flash_op_can_start) {
// Busy loop and wait for spi_flash_op_block_func to disable cache
// on the other CPU
}
// Disable scheduler on CPU cpuid
vTaskSuspendAll();
// This is guaranteed to run on CPU <cpuid> because the other CPU is now
// occupied by highest priority task
assert(xPortGetCoreID() == cpuid);
}
// Disable cache on this CPU as well
spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
}
static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu()
{
const uint32_t cpuid = xPortGetCoreID();
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
// Re-enable cache on this CPU
spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
// Scheduler is not running yet — this means we are running on PRO CPU.
// other_cpuid is APP CPU, and it is either in reset or is spinning in
// user_start_cpu1, which is in IRAM. So we can simply reenable cache.
assert(other_cpuid == 1);
spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]);
} else {
// Signal to spi_flash_op_block_task that flash operation is complete
s_flash_op_complete = true;
// Resume tasks on the current CPU
xTaskResumeAll();
}
// Release API lock
xSemaphoreGive(s_flash_op_mutex);
}
#else // CONFIG_FREERTOS_UNICORE
void spi_flash_init()
{
// No-op in single core mode
}
static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu()
{
vTaskSuspendAll();
spi_flash_disable_cache(0, &s_flash_op_cache_state[0]);
}
static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu()
{
spi_flash_restore_cache(0, s_flash_op_cache_state[0]);
xTaskResumeAll();
}
#endif // CONFIG_FREERTOS_UNICORE
esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec)
{
spi_flash_disable_interrupts_caches_and_other_cpu();
SpiFlashOpResult rc;
rc = SPIUnlock();
if (rc == SPI_FLASH_RESULT_OK) {
rc = SPIEraseSector(sec);
}
spi_flash_enable_interrupts_caches_and_other_cpu();
return spi_flash_translate_rc(rc);
}
esp_err_t IRAM_ATTR spi_flash_write(uint32_t dest_addr, const uint32_t *src, uint32_t size)
{
spi_flash_disable_interrupts_caches_and_other_cpu();
SpiFlashOpResult rc;
rc = SPIUnlock();
if (rc == SPI_FLASH_RESULT_OK) {
rc = SPIWrite(dest_addr, src, (int32_t) size);
}
spi_flash_enable_interrupts_caches_and_other_cpu();
return spi_flash_translate_rc(rc);
}
esp_err_t IRAM_ATTR spi_flash_read(uint32_t src_addr, uint32_t *dest, uint32_t size)
{
spi_flash_disable_interrupts_caches_and_other_cpu();
SpiFlashOpResult rc;
rc = SPIRead(src_addr, dest, (int32_t) size);
spi_flash_enable_interrupts_caches_and_other_cpu();
return spi_flash_translate_rc(rc);
}
static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc)
{
@ -35,72 +224,36 @@ static esp_err_t spi_flash_translate_rc(SpiFlashOpResult rc)
}
}
extern void Cache_Flush(int);
static const uint32_t cache_mask = DPORT_APP_CACHE_MASK_OPSDRAM | DPORT_APP_CACHE_MASK_DROM0 |
DPORT_APP_CACHE_MASK_DRAM1 | DPORT_APP_CACHE_MASK_IROM0 |
DPORT_APP_CACHE_MASK_IRAM1 | DPORT_APP_CACHE_MASK_IRAM0;
static void IRAM_ATTR spi_flash_disable_interrupts_caches_and_app_cpu()
static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t* saved_state)
{
vTaskSuspendAll();
// SET_PERI_REG_MASK(APPCPU_CTRL_REG_C, DPORT_APPCPU_RUNSTALL);
Cache_Read_Disable(0);
Cache_Read_Disable(1);
}
static void IRAM_ATTR spi_flash_enable_interrupts_caches_and_app_cpu()
{
Cache_Read_Enable(0);
Cache_Read_Enable(1);
// CLEAR_PERI_REG_MASK(APPCPU_CTRL_REG_C, DPORT_APPCPU_RUNSTALL);
xTaskResumeAll();
}
esp_err_t IRAM_ATTR spi_flash_erase_sector(uint16_t sec)
{
spi_flash_disable_interrupts_caches_and_app_cpu();
SpiFlashOpResult rc;
if (xPortGetCoreID() == 1) {
rc = SPI_FLASH_RESULT_ERR;
}
else {
rc = SPIUnlock();
if (rc == SPI_FLASH_RESULT_OK) {
rc = SPIEraseSector(sec);
uint32_t ret = 0;
if (cpuid == 0) {
ret |= GET_PERI_REG_BITS2(PRO_CACHE_CTRL1_REG, cache_mask, 0);
while (GET_PERI_REG_BITS2(PRO_DCACHE_DBUG_REG0, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1) {
;
}
}
Cache_Flush(0);
Cache_Flush(1);
spi_flash_enable_interrupts_caches_and_app_cpu();
return spi_flash_translate_rc(rc);
}
esp_err_t IRAM_ATTR spi_flash_write(uint32_t dest_addr, const uint32_t *src, uint32_t size)
{
spi_flash_disable_interrupts_caches_and_app_cpu();
SpiFlashOpResult rc;
if (xPortGetCoreID() == 1) {
rc = SPI_FLASH_RESULT_ERR;
}
else {
rc = SPIUnlock();
if (rc == SPI_FLASH_RESULT_OK) {
rc = SPIWrite(dest_addr, src, (int32_t) size);
SET_PERI_REG_BITS(PRO_CACHE_CTRL_REG, 1, 0, DPORT_PRO_CACHE_ENABLE_S);
} else {
ret |= GET_PERI_REG_BITS2(APP_CACHE_CTRL1_REG, cache_mask, 0);
while (GET_PERI_REG_BITS2(APP_DCACHE_DBUG_REG0, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1) {
;
}
SET_PERI_REG_BITS(APP_CACHE_CTRL_REG, 1, 0, DPORT_APP_CACHE_ENABLE_S);
}
Cache_Flush(0);
Cache_Flush(1);
spi_flash_enable_interrupts_caches_and_app_cpu();
return spi_flash_translate_rc(rc);
*saved_state = ret;
}
esp_err_t IRAM_ATTR spi_flash_read(uint32_t src_addr, uint32_t *dest, uint32_t size)
static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state)
{
spi_flash_disable_interrupts_caches_and_app_cpu();
SpiFlashOpResult rc;
if (xPortGetCoreID() == 1) {
rc = SPI_FLASH_RESULT_ERR;
if (cpuid == 0) {
SET_PERI_REG_BITS(PRO_CACHE_CTRL_REG, 1, 1, DPORT_PRO_CACHE_ENABLE_S);
SET_PERI_REG_BITS(PRO_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
} else {
SET_PERI_REG_BITS(APP_CACHE_CTRL_REG, 1, 1, DPORT_APP_CACHE_ENABLE_S);
SET_PERI_REG_BITS(APP_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
}
else {
rc = SPIRead(src_addr, dest, (int32_t) size);
}
spi_flash_enable_interrupts_caches_and_app_cpu();
return spi_flash_translate_rc(rc);
}

Wyświetl plik

@ -28,6 +28,15 @@ extern "C" {
#define SPI_FLASH_SEC_SIZE 4096 /**< SPI Flash sector size */
/**
* @brief Initialize SPI flash access driver
*
* This function must be called exactly once, before any other
* spi_flash_* functions are called.
*
*/
void spi_flash_init();
/**
* @brief Erase the Flash sector.
*