esp_mm: new virtual memory mapping apis via mmu

pull/10730/head
Armando 2022-11-02 19:11:45 +08:00
rodzic 1fc55af6af
commit d6844051fc
91 zmienionych plików z 3461 dodań i 723 usunięć

Wyświetl plik

@ -91,6 +91,7 @@
/components/esp_hw_support/ @esp-idf-codeowners/system @esp-idf-codeowners/peripherals
/components/esp_lcd/ @esp-idf-codeowners/peripherals
/components/esp_local_ctrl/ @esp-idf-codeowners/app-utilities
/components/esp_mm/ @esp-idf-codeowners/peripherals
/components/esp_netif/ @esp-idf-codeowners/network
/components/esp_netif_stack/ @esp-idf-codeowners/network
/components/esp_partition/ @esp-idf-codeowners/storage

Wyświetl plik

@ -139,8 +139,8 @@ static const char *TAG = "bootloader_flash";
63th block for bootloader_flash_read
*/
#define MMU_BLOCK0_VADDR SOC_DROM_LOW
#define MMU_SIZE (DRAM0_CACHE_ADDRESS_HIGH - DRAM0_CACHE_ADDRESS_LOW - SPI_FLASH_MMU_PAGE_SIZE) // This mmu size means that the mmu size to be mapped
#define MMU_BLOCK63_VADDR (MMU_BLOCK0_VADDR + MMU_SIZE)
#define MMAP_MMU_SIZE (DRAM0_CACHE_ADDRESS_HIGH - DRAM0_CACHE_ADDRESS_LOW) // This mmu size means that the mmu size to be mapped
#define MMU_BLOCK63_VADDR (MMU_BLOCK0_VADDR + MMAP_MMU_SIZE - SPI_FLASH_MMU_PAGE_SIZE)
#define FLASH_READ_VADDR MMU_BLOCK63_VADDR
#endif

Wyświetl plik

@ -466,11 +466,11 @@ static void test_rmt_multi_channels_trans(size_t channel0_mem_block_symbols, siz
#define TEST_RMT_CHANS 2
#define TEST_LED_NUM 24
#define TEST_STOP_TIME_NO_SYNCHRO_DELTA 150
#if CONFIG_IDF_TARGET_ESP32C6
#if CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C6
#define TEST_STOP_TIME_SYNCHRO_DELTA 400
#else
#define TEST_STOP_TIME_SYNCHRO_DELTA 10
#endif // CONFIG_IDF_TARGET_ESP32C6
#endif // #if CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C6
rmt_tx_channel_config_t tx_channel_cfg = {
.clk_src = RMT_CLK_SRC_DEFAULT,
.resolution_hz = 10000000, // 10MHz, 1 tick = 0.1us (led strip needs a high resolution)

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/

Wyświetl plik

@ -0,0 +1,7 @@
# Documentation: .gitlab/ci/README.md#manifest-file-to-control-the-buildtest-apps
components/spi_flash/test_apps/mmap:
disable_test:
- if: IDF_TARGET == "esp32h2"
temporary: true
reason: h2 not supported yet

Wyświetl plik

@ -0,0 +1,24 @@
idf_build_get_property(target IDF_TARGET)
set(includes "include")
# Note: requires spi_flash for cache_utils, will be refactored
set(priv_requires heap spi_flash)
set(srcs)
if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
set(srcs "esp_mmu_map.c"
"port/${target}/ext_mem_layout.c")
endif()
idf_component_register(SRCS ${srcs}
INCLUDE_DIRS ${includes}
PRIV_REQUIRES ${priv_requires})
if(NOT BOOTLOADER_BUILD)
if(CONFIG_SPIRAM)
# Use esp_psram for `esp_psram_extram_writeback_cache()` on ESP32
idf_component_optional_requires(PRIVATE esp_psram)
endif()
endif()

Wyświetl plik

@ -0,0 +1,8 @@
menu "ESP Memory Management"
# Add MMU setting menu here
# Add Cache setting menu here
orsource "./Kconfig.mmap"
endmenu # ESP Memory Management

Wyświetl plik

@ -0,0 +1,3 @@
menu "MMAP Configuration"
endmenu

Wyświetl plik

@ -0,0 +1,720 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <inttypes.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_heap_caps.h"
#include "soc/soc_caps.h"
#include "hal/cache_types.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include "hal/mmu_types.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/cache.h"
#endif
#include "esp_private/cache_utils.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif
#include "esp_private/esp_mmu_map_private.h"
#include "ext_mem_layout.h"
#include "esp_mmu_map.h"
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
//This flag indicates the memory region is merged, we don't care about it anymore
#define MEM_REGION_MERGED -1
/**
* We have some hw related tests for vaddr region capabilites
* Use this macro to disable paddr check as we need to reuse certain paddr blocks
*/
#define ENABLE_PADDR_CHECK !ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR
static DRAM_ATTR const char *TAG = "mmap";
/**
* @brief MMU Memory Mapping Driver
*
* Driver Backgrounds:
*
* --------------------------------------------------------------------------------------------------------
* Memory Pool |
* --------------------------------------------------------------------------------------------------------
* | Memory Region 0 | Memory Region 1 | ... |
* --------------------------------------------------------------------------------------------------------
* | Block 0 | Slot 0 | Block 1 | Block 2 | ... | Slot 1 (final slot) | ... |
* --------------------------------------------------------------------------------------------------------
*
* - A block is a piece of vaddr range that is dynamically mapped. Blocks are doubly linked:
* Block 0 <-> Block 1 <-> Block 2
* - A Slot is the vaddr range between 2 blocks.
*/
/**
* Struct for a block
*/
typedef struct mem_block_ {
uint32_t laddr_start; //linear address start of this block
uint32_t laddr_end; //linear address end of this block
intptr_t vaddr_start; //virtual address start of this block
intptr_t vaddr_end; //virtual address end of this block
size_t size; //size of this block, should be aligned to MMU page size
int caps; //caps of this block, `mmu_mem_caps_t`
uint32_t paddr_start; //physical address start of this block
uint32_t paddr_end; //physical address end of this block
mmu_target_t target; //physical target that this block is mapped to
TAILQ_ENTRY(mem_block_) entries; //link entry
} mem_block_t;
/**
* Struct for a memory region
*/
typedef struct mem_region_ {
cache_bus_mask_t bus_id; //cache bus mask of this region
uint32_t start; //linear address start of this region
uint32_t end; //linear address end of this region
size_t region_size; //region size, in bytes
uint32_t free_head; //linear address free head of this region
size_t max_slot_size; //max slot size within this region
int caps; //caps of this region, `mmu_mem_caps_t`
mmu_target_t targets; //physical targets that this region is supported
TAILQ_HEAD(mem_block_head_, mem_block_) mem_block_head; //link head of allocated blocks within this region
} mem_region_t;
typedef struct {
/**
* number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
*/
uint32_t num_regions;
/**
* This saves the available MMU linear address regions,
* after reserving flash .rodata and .text, and after coalescing.
* Only the first `num_regions` items are valid
*/
mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
{
/**
* We follow the way how 1st bootloader load flash .text:
*
* - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
* we strongly rely on this to calculate the .text length
*/
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].region_size <= irom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].max_slot_size = 0;
irom_len_to_reserve -= hw_mem_regions[i].region_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
hw_mem_regions[i].max_slot_size -= irom_len_to_reserve;
}
}
}
}
static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
{
/**
* Similarly, we follow the way how 1st bootloader load flash .rodata:
*/
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].region_size <= drom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].max_slot_size = 0;
drom_len_to_reserve -= hw_mem_regions[i].region_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
hw_mem_regions[i].max_slot_size -= drom_len_to_reserve;
}
}
}
}
#endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
void esp_mmu_map_init(void)
{
mem_region_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
hw_mem_regions[i].region_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].max_slot_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
hw_mem_regions[i].targets = g_mmu_mem_regions[i].targets;
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
#endif
assert(hw_mem_regions[i].region_size % CONFIG_MMU_PAGE_SIZE == 0);
}
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
//First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
#endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
//Now we can coalesce adjacent regions
for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
mem_region_t *a = &hw_mem_regions[i - 1];
mem_region_t *b = &hw_mem_regions[i];
if ((b->free_head == a->end) && (b->caps == a->caps) && (b->targets == a->targets)) {
a->caps = MEM_REGION_MERGED;
b->bus_id |= a->bus_id;
b->start = a->start;
b->region_size += a->region_size;
b->free_head = a->free_head;
b->max_slot_size += a->max_slot_size;
}
}
}
//Count the mem regions left after coalescing
uint32_t region_num = 0;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if(hw_mem_regions[i].caps != MEM_REGION_MERGED) {
region_num++;
}
}
ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
//Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
uint32_t available_region_idx = 0;
s_mmu_ctx.num_regions = region_num;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
continue;
}
memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mem_region_t));
available_region_idx++;
}
for (int i = 0; i < available_region_idx; i++) {
TAILQ_INIT(&s_mmu_ctx.mem_regions[i].mem_block_head);
}
assert(available_region_idx == region_num);
}
static esp_err_t s_mem_caps_check(mmu_mem_caps_t caps)
{
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
caps |= MMU_MEM_CAP_32BIT;
}
return ESP_OK;
}
esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len)
{
ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
*out_len = 0;
size_t max = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if (((s_mmu_ctx.mem_regions[i].caps & caps) == caps) && ((s_mmu_ctx.mem_regions[i].targets & target) == target)) {
if (s_mmu_ctx.mem_regions[i].max_slot_size > max) {
max = s_mmu_ctx.mem_regions[i].max_slot_size;
}
}
}
*out_len = max;
return ESP_OK;
}
static int32_t s_find_available_region(mem_region_t *mem_regions, uint32_t region_nums, size_t size, mmu_mem_caps_t caps, mmu_target_t target)
{
int32_t found_region_id = -1;
for (int i = 0; i < region_nums; i++) {
if (((mem_regions[i].caps & caps) == caps) && ((mem_regions[i].targets & target) == target)) {
if (mem_regions[i].max_slot_size >= size) {
found_region_id = i;
break;
}
}
}
return found_region_id;
}
esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr)
{
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
uint32_t laddr = 0;
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}
laddr = (uint32_t)s_mmu_ctx.mem_regions[found_region_id].free_head;
s_mmu_ctx.mem_regions[found_region_id].free_head += aligned_size;
s_mmu_ctx.mem_regions[found_region_id].max_slot_size -= aligned_size;
ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
uint32_t vaddr = 0;
if (caps & MMU_MEM_CAP_EXEC) {
vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION);
} else {
vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA);
}
*out_ptr = (void *)vaddr;
return ESP_OK;
}
#if CONFIG_IDF_TARGET_ESP32
/**
* On ESP32, due to hardware limitation, we don't have an
* easy way to sync between cache and external memory wrt
* certain range. So we do a full sync here
*/
static void IRAM_ATTR NOINLINE_ATTR s_cache_sync(void)
{
#if CONFIG_SPIRAM
esp_psram_extram_writeback_cache();
#endif //#if CONFIG_SPIRAM
Cache_Flush(0);
#if !CONFIG_FREERTOS_UNICORE
Cache_Flush(1);
#endif // !CONFIG_FREERTOS_UNICORE
}
#endif //#if CONFIG_IDF_TARGET_ESP32
static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start, uint32_t size)
{
#if CONFIG_IDF_TARGET_ESP32
s_cache_sync();
#else //Other chips
cache_hal_invalidate_addr(vaddr_start, size);
#endif // CONFIG_IDF_TARGET_ESP32
}
static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
{
/**
* Disable Cache, after this function, involved code and data should be placed in internal RAM.
*
* @note we call this for now, but this will be refactored to move out of `spi_flash`
*/
spi_flash_disable_interrupts_caches_and_other_cpu();
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len);
#if (SOC_MMU_PERIPH_NUM == 2)
#if !CONFIG_FREERTOS_UNICORE
mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len);
#endif // #if !CONFIG_FREERTOS_UNICORE
#endif // #if (SOC_MMU_PERIPH_NUM == 2)
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
cache_ll_l1_enable_bus(0, bus_mask);
#if !CONFIG_FREERTOS_UNICORE
bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
cache_ll_l1_enable_bus(1, bus_mask);
#endif
s_do_cache_invalidate(vaddr_start, size);
//enable Cache, after this function, internal RAM access is no longer mandatory
spi_flash_enable_interrupts_caches_and_other_cpu();
ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len);
}
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr)
{
esp_err_t ret = ESP_FAIL;
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
#if !SOC_SPIRAM_SUPPORTED || CONFIG_IDF_TARGET_ESP32
ESP_RETURN_ON_FALSE(!(target & MMU_TARGET_PSRAM0), ESP_ERR_NOT_SUPPORTED, TAG, "PSRAM is not supported");
#endif
ESP_RETURN_ON_FALSE((paddr_start % CONFIG_MMU_PAGE_SIZE == 0), ESP_ERR_INVALID_ARG, TAG, "paddr must be rounded up to the nearest multiple of CONFIG_MMU_PAGE_SIZE");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}
//Now we're sure we can find an available block inside a certain region
mem_region_t *found_region = &s_mmu_ctx.mem_regions[found_region_id];
mem_block_t *dummy_head = NULL;
mem_block_t *dummy_tail = NULL;
mem_block_t *new_block = NULL;
if (TAILQ_EMPTY(&found_region->mem_block_head)) {
dummy_head = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(dummy_head, ESP_ERR_NO_MEM, err, TAG, "no mem");
dummy_head->laddr_start = found_region->free_head;
dummy_head->laddr_end = found_region->free_head;
//We don't care vaddr or paddr address for dummy head
dummy_head->size = 0;
dummy_head->caps = caps;
TAILQ_INSERT_HEAD(&found_region->mem_block_head, dummy_head, entries);
dummy_tail = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(dummy_tail, ESP_ERR_NO_MEM, err, TAG, "no mem");
dummy_tail->laddr_start = found_region->end;
dummy_tail->laddr_end = found_region->end;
//We don't care vaddr or paddr address for dummy tail
dummy_tail->size = 0;
dummy_tail->caps = caps;
TAILQ_INSERT_TAIL(&found_region->mem_block_head, dummy_tail, entries);
}
//Check if paddr is overlapped
mem_block_t *mem_block = NULL;
#if ENABLE_PADDR_CHECK
bool is_mapped = false;
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
if (target == mem_block->target) {
if ((paddr_start >= mem_block->paddr_start) && ((paddr_start + aligned_size) <= mem_block->paddr_end)) {
//the to-be-mapped paddr region is mapped already
is_mapped = true;
break;
}
}
}
if (is_mapped) {
ESP_LOGW(TAG, "paddr region is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
*out_ptr = (void *)mem_block->vaddr_start;
return ESP_ERR_INVALID_STATE;
}
#endif //#if ENABLE_PADDR_CHECK
new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(new_block, ESP_ERR_NO_MEM, err, TAG, "no mem");
//Reserve this block as it'll be mapped
bool found = false;
// Get the end address of the dummy_head block, which is always first block on the list
uint32_t last_end = TAILQ_FIRST(&found_region->mem_block_head)->laddr_end;
size_t slot_len = 0;
size_t max_slot_len = 0;
mem_block_t *found_block = NULL; //This stands for the block we found, whose slot between its prior block is where we will insert the new block to
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
slot_len = mem_block->laddr_start - last_end;
if (!found) {
if (slot_len >= aligned_size) {
//Found it
found = true;
found_block = mem_block;
slot_len -= aligned_size;
new_block->laddr_start = last_end;
}
}
max_slot_len = (slot_len > max_slot_len) ? slot_len : max_slot_len;
last_end = mem_block->laddr_end;
}
assert(found);
//insert the to-be-mapped new block to the list
TAILQ_INSERT_BEFORE(found_block, new_block, entries);
//Finally, we update the max_slot_size
found_region->max_slot_size = max_slot_len;
//Now we fill others according to the found `new_block->laddr_start`
new_block->laddr_end = new_block->laddr_start + aligned_size;
new_block->size = aligned_size;
new_block->caps = caps;
if (caps & MMU_MEM_CAP_EXEC) {
new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION);
new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION);
} else {
new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA);
new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA);
}
new_block->paddr_start = paddr_start;
new_block->paddr_end = paddr_start + aligned_size;
new_block->target = target;
//do mapping
s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size);
*out_ptr = (void *)new_block->vaddr_start;
return ESP_OK;
err:
if (new_block) {
free(new_block);
}
if (dummy_tail) {
free(dummy_tail);
}
if (dummy_head) {
free(dummy_head);
}
return ret;
}
static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_t size)
{
/**
* Disable Cache, after this function, involved code and data should be placed in internal RAM.
*
* @note we call this for now, but this will be refactored to move out of `spi_flash`
*/
spi_flash_disable_interrupts_caches_and_other_cpu();
mmu_hal_unmap_region(0, vaddr_start, size);
#if (SOC_MMU_PERIPH_NUM == 2)
#if !CONFIG_FREERTOS_UNICORE
mmu_hal_unmap_region(1, vaddr_start, size);
#endif // #if !CONFIG_FREERTOS_UNICORE
#endif // #if (SOC_MMU_PERIPH_NUM == 2)
//enable Cache, after this function, internal RAM access is no longer mandatory
spi_flash_enable_interrupts_caches_and_other_cpu();
}
esp_err_t esp_mmu_unmap(void *ptr)
{
ESP_RETURN_ON_FALSE(ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
mem_region_t *region = NULL;
mem_block_t *mem_block = NULL;
uint32_t ptr_laddr = mmu_ll_vaddr_to_laddr((uint32_t)ptr);
size_t slot_len = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if (ptr_laddr >= s_mmu_ctx.mem_regions[i].free_head && ptr_laddr < s_mmu_ctx.mem_regions[i].end) {
region = &s_mmu_ctx.mem_regions[i];
}
}
ESP_RETURN_ON_FALSE(region, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer is outside external memory regions");
bool found = false;
mem_block_t *found_block = NULL;
TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
if (mem_block == TAILQ_FIRST(&region->mem_block_head) || mem_block == TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
//we don't care the dummy_head and the dummy_tail
continue;
}
//now we are only traversing the actual dynamically allocated blocks, dummy_head and dummy_tail are excluded already
if (mem_block->laddr_start == ptr_laddr) {
slot_len = TAILQ_NEXT(mem_block, entries)->laddr_start - TAILQ_PREV(mem_block, mem_block_head_, entries)->laddr_end;
region->max_slot_size = (slot_len > region->max_slot_size) ? slot_len : region->max_slot_size;
found = true;
found_block = mem_block;
break;
}
}
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer isn't mapped yet");
//do unmap
s_do_unmapping(mem_block->vaddr_start, mem_block->size);
//remove the already unmapped block from the list
TAILQ_REMOVE(&region->mem_block_head, found_block, entries);
free(found_block);
return ESP_OK;
}
esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream)
{
char line[100];
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
fprintf(stream, "region %d:\n", i);
fprintf(stream, "%-15s %-14s %-14s %-12s %-12s %-12s\n", "Bus ID", "Start", "Free Head", "End", "Caps", "Max Slot Size");
char *buf = line;
size_t len = sizeof(line);
memset(line, 0x0, len);
snprintf(buf, len, "0x%-13x 0x%-12"PRIx32" 0x%-11"PRIx32" 0x%-10"PRIx32" 0x%-10x 0x%-8x\n",
s_mmu_ctx.mem_regions[i].bus_id,
s_mmu_ctx.mem_regions[i].start,
s_mmu_ctx.mem_regions[i].free_head,
s_mmu_ctx.mem_regions[i].end,
s_mmu_ctx.mem_regions[i].caps,
s_mmu_ctx.mem_regions[i].max_slot_size);
fputs(line, stream);
fprintf(stream, "mapped blocks:\n");
fprintf(stream, "%-4s %-13s %-12s %-12s %-6s %-13s %-11s\n", "ID", "Vaddr Start", "Vaddr End", "Block Size", "Caps", "Paddr Start", "Paddr End");
mem_region_t *region = &s_mmu_ctx.mem_regions[i];
mem_block_t *mem_block = NULL;
int id = 0;
TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
snprintf(buf, len, "%-4d 0x%-11x 0x%-10x 0x%-10x 0x%-4x 0x%-11"PRIx32" 0x%-8"PRIx32"\n",
id,
mem_block->vaddr_start,
mem_block->vaddr_end,
mem_block->size,
mem_block->caps,
mem_block->paddr_start,
mem_block->paddr_end);
fputs(line, stream);
id++;
}
}
fprintf(stream, "\n");
}
return ESP_OK;
}
/*---------------------------------------------------------------
Private dump functions, IRAM Safe
---------------------------------------------------------------*/
esp_err_t IRAM_ATTR esp_mmu_map_dump_mapped_blocks_private(void)
{
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
mem_region_t *region = &s_mmu_ctx.mem_regions[i];
mem_block_t *mem_block = NULL;
TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
ESP_DRAM_LOGI(TAG, "block vaddr_start: 0x%x", mem_block->vaddr_start);
ESP_DRAM_LOGI(TAG, "block vaddr_end: 0x%x", mem_block->vaddr_end);
ESP_DRAM_LOGI(TAG, "block size: 0x%x", mem_block->size);
ESP_DRAM_LOGI(TAG, "block caps: 0x%x\n", mem_block->caps);
ESP_DRAM_LOGI(TAG, "block paddr_start: 0x%x\n", mem_block->paddr_start);
ESP_DRAM_LOGI(TAG, "block paddr_end: 0x%x\n", mem_block->paddr_end);
}
}
ESP_DRAM_LOGI(TAG, "region bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
ESP_DRAM_LOGI(TAG, "region start: 0x%x", s_mmu_ctx.mem_regions[i].start);
ESP_DRAM_LOGI(TAG, "region end: 0x%x", s_mmu_ctx.mem_regions[i].end);
ESP_DRAM_LOGI(TAG, "region caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
}
return ESP_OK;
}
/*---------------------------------------------------------------
Helper APIs for conversion between vaddr and paddr
---------------------------------------------------------------*/
static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
//we call this for now, but this will be refactored to move out of `spi_flash`
spi_flash_disable_interrupts_caches_and_other_cpu();
//On ESP32, core 1 settings should be the same as the core 0
bool is_mapped = mmu_hal_vaddr_to_paddr(0, vaddr, out_paddr, out_target);
spi_flash_enable_interrupts_caches_and_other_cpu();
return is_mapped;
}
esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_FALSE(mmu_ll_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
esp_paddr_t paddr = 0;
mmu_target_t target = 0;
bool is_mapped = s_vaddr_to_paddr((uint32_t)vaddr, &paddr, &target);
ESP_RETURN_ON_FALSE(is_mapped, ESP_ERR_NOT_FOUND, TAG, "vaddr isn't mapped");
*out_paddr = paddr;
*out_target = target;
return ESP_OK;
}
static bool NOINLINE_ATTR IRAM_ATTR s_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
{
//we call this for now, but this will be refactored to move out of `spi_flash`
spi_flash_disable_interrupts_caches_and_other_cpu();
//On ESP32, core 1 settings should be the same as the core 0
bool found = mmu_hal_paddr_to_vaddr(0, paddr, target, type, out_vaddr);
spi_flash_enable_interrupts_caches_and_other_cpu();
return found;
}
esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr)
{
ESP_RETURN_ON_FALSE(out_vaddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
uint32_t vaddr = 0;
bool found = false;
found = s_paddr_to_vaddr(paddr, target, type, &vaddr);
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "paddr isn't mapped");
*out_vaddr = (void *)vaddr;
return ESP_OK;
}

Wyświetl plik

@ -10,6 +10,7 @@
#include "sdkconfig.h"
#include "soc/soc_caps.h"
#include "hal/cache_types.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
extern "C" {
@ -17,11 +18,12 @@ extern "C" {
typedef struct {
intptr_t start;
intptr_t end;
size_t size;
cache_bus_mask_t bus_id;
uint32_t caps;
uint32_t start; //laddr start
uint32_t end; //laddr end
size_t size; //region size
cache_bus_mask_t bus_id; //bus_id mask, for accessible cache buses
mmu_target_t targets; //region supported physical targets
uint32_t caps; //vaddr capabilities
} mmu_mem_region_t;
//These regions is referring to linear address

Wyświetl plik

@ -0,0 +1,142 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* MMU Memory Mapping Driver APIs for MMU supported memory
*
*
* Driver Backgrounds:
*
* --------------------------------------------------------------------------------------------------------
* Memory Pool |
* --------------------------------------------------------------------------------------------------------
* | Memory Region 0 | Memory Region 1 | ... |
* --------------------------------------------------------------------------------------------------------
* | Block 0 | Slot 0 | Block 1 | Block 2 | ... | Slot 1 (final slot) | ... |
* --------------------------------------------------------------------------------------------------------
*
* - A memory pool stands for the whole virtual address range that can be mapped to physical memory
* - A memory region is a range of virtual address with same attributes
* - A block is a piece of vaddr range that is dynamically mapped.
* - A Slot is the vaddr range between 2 blocks.
*/
/**
* @brief Physical memory type
*/
typedef uint32_t esp_paddr_t;
/**
* @brief Map a physical memory block to external virtual address block, with given capabilities.
*
* @note This API does not guarantee thread safety
*
* @param[in] paddr_start Start address of the physical memory block
* @param[in] size Size to be mapped. Size will be rounded up by to the nearest multiple of MMU page size
* @param[in] caps Memory capabilities, see `mmu_mem_caps_t`
* @param[in] target Physical memory target you're going to map to, see `mmu_target_t`
* @param[out] out_ptr Start address of the mapped virtual memory
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Invalid argument, see printed logs
* - ESP_ERR_NOT_SUPPORTED: Only on ESP32, PSRAM is not a supported physical memory target
* - ESP_ERR_NOT_FOUND: No enough size free block to use
* - ESP_ERR_NO_MEM: Out of memory, this API will allocate some heap memory for internal usage
* - ESP_ERR_INVALID_STATE: Paddr is mapped already, this API will return corresponding vaddr_start of the previously mapped block.
* Only to-be-mapped paddr block is totally enclosed by a previously mapped block will lead to this error:
* new_block_start new_block_end
* |-------- New Block --------|
* |--------------- Block ---------------|
* block_start block_end
*
*/
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr);
/**
* @brief Unmap a previously mapped virtual memory block
*
* @note This API does not guarantee thread safety
*
* @param[in] ptr Start address of the virtual memory
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Null pointer
* - ESP_ERR_NOT_FOUND: Vaddr is not in external memory, or it's not mapped yet
*/
esp_err_t esp_mmu_unmap(void *ptr);
/**
* @brief Get largest consecutive free external virtual memory block size, with given capabilities and given physical target
*
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[in] target Physical memory target you're going to map to, see `mmu_target_t`.
* @param[out] out_len Largest free block length, in bytes.
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be null pointer
*/
esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len);
/**
* Dump all the previously mapped blocks
*
* @note This API shall not be called from an ISR.
* @note This API does not guarantee thread safety
*
* @param stream stream to print information to; use stdout or stderr to print
* to the console; use fmemopen/open_memstream to print to a
* string buffer.
* @return
* - ESP_OK
*/
esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream);
/**
* @brief Convert virtual address to physical address
*
* @param[in] vaddr Virtual address
* @param[out] out_paddr Physical address
* @param[out] out_target Physical memory target, see `mmu_target_t`
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Null pointer, or vaddr is not within external memory
* - ESP_ERR_NOT_FOUND: Vaddr is not mapped yet
*/
esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target);
/**
* @brief Convert physical address to virtual address
*
* @param[in] paddr Physical address
* @param[in] target Physical memory target, see `mmu_target_t`
* @param[in] type Virtual address type, could be either instruction or data
* @param[out] out_vaddr Virtual address
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Null pointer
* - ESP_ERR_NOT_FOUND: Paddr is not mapped yet
*/
esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr);
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Memory Mapping Private APIs for MMU supported memory
*/
/**
* @brief Initialise the MMU MMAP driver
*
* This is called once in the IDF startup code. Don't call it in applications
*/
void esp_mmu_map_init(void);
/**
* @brief Reserve a consecutive external virtual memory block, with given capabilities and size
*
* @note This private API shall be only called internally during startup stage. DO NOT call
* this API in your applications
*
* @param[in] size Size, in bytes, the amount of memory to find
* @param[in] caps Bitwise OR of `mmu_mem_caps_t` flags indicating the memory block capability
* @param[in] target Target memory type. See `mmu_target_t`
* @param[out] out_ptr Pointer to start address of the memory block that is reserved
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be wrong caps makeup, or null pointer
* - ESP_ERR_NOT_FOUND: Didn't find enough memory with give caps
*/
esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr);
/*
* @brief Dump all mapped blocks
*
* @return
* - ESP_OK
*/
esp_err_t esp_mmu_map_dump_mapped_blocks_private(void);
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT,
},
[1] = {
.start = SOC_MMU_DROM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DROM0_LINEAR),
.bus_id = CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
[2] = {
.start = SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DRAM1_LINEAR),
.bus_id = CACHE_BUS_DBUS1,
.targets = MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,26 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
#include "hal/mmu_types.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,19 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
#include "hal/mmu_types.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {},
};

Wyświetl plik

@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT,
},
[1] = {
.start = SOC_MMU_DROM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DROM0_LINEAR),
.bus_id = CACHE_BUS_IBUS2,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
[2] = {
.start = SOC_MMU_DPORT_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DPORT_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DPORT_LINEAR),
.bus_id = CACHE_BUS_DBUS2,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT,
},
[3] = {
.start = SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DRAM1_LINEAR),
.bus_id = CACHE_BUS_DBUS1,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
[4] = {
.start = SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DRAM0_LINEAR),
.bus_id = CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

Wyświetl plik

@ -0,0 +1,20 @@
# This is the project CMakeLists.txt file for the test subproject
cmake_minimum_required(VERSION 3.16)
set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components")
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(mmap_test)
if(CONFIG_COMPILER_DUMP_RTL_FILES)
add_custom_target(check_test_app_sections ALL
COMMAND ${PYTHON} $ENV{IDF_PATH}/tools/ci/check_callgraph.py
--rtl-dir ${CMAKE_BINARY_DIR}/esp-idf/esp_mm/
--elf-file ${CMAKE_BINARY_DIR}/mmap_test.elf
find-refs
--from-sections=.iram0.text
--to-sections=.flash.text,.flash.rodata
--exit-code
DEPENDS ${elf}
)
endif()

Wyświetl plik

@ -0,0 +1,2 @@
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C6 | ESP32-H2 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | -------- | -------- |

Wyświetl plik

@ -0,0 +1,15 @@
set(srcs "test_app_main.c")
list(APPEND srcs "test_mmap.c"
"test_mmap_hw.c")
# In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRCS ${srcs}
PRIV_REQUIRES test_utils spi_flash esp_mm
WHOLE_ARCHIVE)
idf_component_get_property(lib_name esp_mm COMPONENT_LIB)
# Add this to skip checking mapping to a paddr range that is enclosed by a previous mapped paddr range
target_compile_definitions(${lib_name} PRIVATE ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR)

Wyświetl plik

@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "unity.h"
#include "unity_test_utils.h"
#include "esp_heap_caps.h"
/**
* Hardware related tests, e.g.
* - traversing all vaddr range to check their attributes
*
* These tests need certain number of internal resources (heap memory), as they uses up the vaddr ranges
* On ESP32, it should be around 450
* On ESP32S2, it should be around 600
* On other chips, it should be around 400
*/
#define TEST_MEMORY_LEAK_THRESHOLD (-650)
static size_t before_free_8bit;
static size_t before_free_32bit;
static void check_leak(size_t before_free, size_t after_free, const char *type)
{
ssize_t delta = after_free - before_free;
printf("MALLOC_CAP_%s: Before %u bytes free, After %u bytes free (delta %d)\n", type, before_free, after_free, delta);
TEST_ASSERT_MESSAGE(delta >= TEST_MEMORY_LEAK_THRESHOLD, "memory leak");
}
void setUp(void)
{
before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
}
void tearDown(void)
{
size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
check_leak(before_free_8bit, after_free_8bit, "8BIT");
check_leak(before_free_32bit, after_free_32bit, "32BIT");
}
void app_main(void)
{
/*
_____ ___________ ___ ______ ___ ___ ______ _____ _____ _____ _____
| ___/ ___| ___ \ | \/ || \/ | / _ \ | ___ \ |_ _| ___/ ___|_ _|
| |__ \ `--.| |_/ / | . . || . . |/ /_\ \| |_/ / | | | |__ \ `--. | |
| __| `--. \ __/ | |\/| || |\/| || _ || __/ | | | __| `--. \ | |
| |___/\__/ / | | | | || | | || | | || | | | | |___/\__/ / | |
\____/\____/\_| \_| |_/\_| |_/\_| |_/\_| \_/ \____/\____/ \_/
*/
printf(" _____ ___________ ___ ______ ___ ___ ______ _____ _____ _____ _____\r\n");
printf("| ___/ ___| ___ \\ | \\/ || \\/ | / _ \\ | ___ \\ |_ _| ___/ ___|_ _|\r\n");
printf("| |__ \\ `--.| |_/ / | . . || . . |/ /_\\ \\| |_/ / | | | |__ \\ `--. | |\r\n");
printf("| __| `--. \\ __/ | |\\/| || |\\/| || _ || __/ | | | __| `--. \\ | |\r\n");
printf("| |___/\\__/ / | | | | || | | || | | || | | | | |___/\\__/ / | |\r\n");
printf("\\____/\\____/\\_| \\_| |_/\\_| |_/\\_| |_/\\_| \\_/ \\____/\\____/ \\_/\r\n");
unity_run_menu();
}

Wyświetl plik

@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <sys/param.h>
#include <string.h>
#include "inttypes.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "unity.h"
#include "esp_heap_caps.h"
#include "esp_partition.h"
#include "esp_mmu_map.h"
#include "esp_rom_sys.h"
#define TEST_BLOCK_SIZE CONFIG_MMU_PAGE_SIZE
const static char *TAG = "MMU_TEST";
static const esp_partition_t *s_get_partition(void)
{
//Find the "storage1" partition defined in `partitions.csv`
const esp_partition_t *result = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, "storage1");
if (!result) {
ESP_LOGE(TAG, "Can't find the partition, please define it correctly in `partitions.csv`");
abort();
}
return result;
}
TEST_CASE("Can dump mapped block stats", "[mmu]")
{
const esp_partition_t *part = s_get_partition();
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
void *ptr0 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr0));
void *ptr1 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr1));
void *ptr2 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr2));
esp_mmu_map_dump_mapped_blocks(stdout);
TEST_ESP_OK(esp_mmu_unmap(ptr0));
TEST_ESP_OK(esp_mmu_unmap(ptr1));
TEST_ESP_OK(esp_mmu_unmap(ptr2));
}

Wyświetl plik

@ -0,0 +1,193 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <sys/param.h>
#include <string.h>
#include <sys/queue.h>
#include "inttypes.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "unity.h"
#include "esp_heap_caps.h"
#include "esp_partition.h"
#include "esp_flash.h"
#include "esp_mmu_map.h"
#include "esp_rom_sys.h"
/**
* This file contains simple hw tests for vaddr memory regions
*
* Traversing all vaddr memory regions to see if they have correct capabilities
*/
const static char *TAG = "MMU_TEST";
static const esp_partition_t *s_get_partition(void)
{
//Find the "storage1" partition defined in `partitions.csv`
const esp_partition_t *result = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, "storage1");
if (!result) {
ESP_LOGE(TAG, "Can't find the partition, please define it correctly in `partitions.csv`");
abort();
}
return result;
}
/**
* Do following two tests:
* - test all readable vaddr can map to flash
* - test all executable vaddr can map to flash
*
* manually. Do a reset before a test start, as each of the tests
* will map as much as possible, and don't do unmap.
*/
#define TEST_BLOCK_SIZE CONFIG_MMU_PAGE_SIZE
typedef struct test_block_info_ {
uint32_t vaddr;
LIST_ENTRY(test_block_info_) entries;
} test_block_info_t;
static LIST_HEAD(test_block_list_head_, test_block_info_) test_block_head;
static DRAM_ATTR uint8_t sector_buf[TEST_BLOCK_SIZE];
static void s_fill_random_data(uint8_t *buffer, size_t size, int random_seed)
{
srand(random_seed);
for (int i = 0 ; i < size; i++) {
buffer[i] = rand() % 0xff;
}
}
static bool s_test_mmap_data_by_random(uint8_t *mblock_ptr, size_t size, int random_seed)
{
srand(random_seed);
uint8_t *test_ptr = mblock_ptr;
for (int i = 0; i < size; i++) {
uint8_t test_data = rand() % 0xff;
if(test_data != test_ptr[i]) {
printf("i: %d\n", i);
printf("test_data: %d\n", test_data);
printf("test_ptr[%d]: %d\n", i, test_ptr[i]);
printf("sector_buf[%d]: %d\n", i, sector_buf[i]);
ESP_EARLY_LOGE(TAG, "FAIL!!!!!!");
return false;
}
}
return true;
}
TEST_CASE("test all readable vaddr can map to flash", "[mmu]")
{
//Get the partition used for SPI1 erase operation
const esp_partition_t *part = s_get_partition();
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
//Erase whole region
TEST_ESP_OK(esp_flash_erase_region(part->flash_chip, part->address, part->size));
ESP_LOGI(TAG, "TEST_BLOCK_SIZE: 0x%x", TEST_BLOCK_SIZE);
int test_seed = 299;
s_fill_random_data(sector_buf, sizeof(sector_buf), test_seed);
ESP_LOGV(TAG, "rand seed: %d, write flash addr: %p...", test_seed, (void *)part->address);
TEST_ESP_OK(esp_flash_write(part->flash_chip, sector_buf, part->address, sizeof(sector_buf)));
esp_err_t ret = ESP_FAIL;
int count = 0;
LIST_INIT(&test_block_head);
while (1) {
test_block_info_t *block_info = heap_caps_calloc(1, sizeof(test_block_info_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
TEST_ASSERT(block_info && "no mem");
void *ptr = NULL;
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr);
if (ret == ESP_OK) {
ESP_LOGI(TAG, "ptr is %p", ptr);
bool success = s_test_mmap_data_by_random((uint8_t *)ptr, sizeof(sector_buf), test_seed);
TEST_ASSERT(success);
} else if (ret == ESP_ERR_NOT_FOUND) {
free(block_info);
break;
} else {
ESP_LOGI(TAG, "ret: 0x%x", ret);
TEST_ASSERT(false);
}
block_info->vaddr = (uint32_t)ptr;
LIST_INSERT_HEAD(&test_block_head, block_info, entries);
count++;
}
ESP_LOGI(TAG, "no more free block, finish test, test block size: 0x%x, count: 0d%d", TEST_BLOCK_SIZE, count);
test_block_info_t *block_to_free = LIST_FIRST(&test_block_head);
test_block_info_t *temp = NULL;
while (block_to_free) {
temp = block_to_free;
TEST_ESP_OK(esp_mmu_unmap((void *)block_to_free->vaddr));
block_to_free = LIST_NEXT(block_to_free, entries);
free(temp);
}
}
TEST_CASE("test all executable vaddr can map to flash", "[mmu]")
{
//Get the partition used for SPI1 erase operation
const esp_partition_t *part = s_get_partition();
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
//Erase whole region
TEST_ESP_OK(esp_flash_erase_region(part->flash_chip, part->address, part->size));
esp_err_t ret = ESP_FAIL;
int count = 0;
LIST_INIT(&test_block_head);
while (1) {
test_block_info_t *block_info = heap_caps_calloc(1, sizeof(test_block_info_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
TEST_ASSERT(block_info && "no mem");
void *ptr = NULL;
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr);
if (ret == ESP_OK) {
ESP_LOGI(TAG, "ptr is %p", ptr);
for (int i = 0; i < TEST_BLOCK_SIZE; i += 0x100) {
uint32_t vaddr = (uint32_t)ptr + i;
uint32_t paddr = 0;
mmu_target_t mem_target = 0;
TEST_ESP_OK(esp_mmu_vaddr_to_paddr((void *)vaddr, &paddr, &mem_target));
TEST_ASSERT(paddr == part->address + i);
ESP_LOGV(TAG, "paddr: %p, on %s", (void *)paddr, (mem_target) == MMU_TARGET_FLASH0 ? "Flash" : "PSRAM");
}
}
else if (ret == ESP_ERR_NOT_FOUND) {
free(block_info);
break;
} else {
TEST_ASSERT(false);
}
block_info->vaddr = (uint32_t)ptr;
LIST_INSERT_HEAD(&test_block_head, block_info, entries);
count++;
}
ESP_LOGI(TAG, "no more free block, finish test, test block size: 0x%x, count: 0d%d", TEST_BLOCK_SIZE, count);
test_block_info_t *block_to_free = LIST_FIRST(&test_block_head);
test_block_info_t *temp = NULL;
while (block_to_free) {
temp = block_to_free;
TEST_ESP_OK(esp_mmu_unmap((void *)block_to_free->vaddr));
block_to_free = LIST_NEXT(block_to_free, entries);
free(temp);
}
}

Wyświetl plik

@ -0,0 +1,6 @@
# Name, Type, SubType, Offset, Size, Flags
# Note: if you have increased the bootloader size, make sure to update the offsets to avoid overlap
nvs, data, nvs, 0x9000, 0x6000,
phy_init, data, phy, 0xf000, 0x1000,
factory, app, factory, 0x10000, 1M,
storage1, data, fat, , 512K,
1 # Name, Type, SubType, Offset, Size, Flags
2 # Note: if you have increased the bootloader size, make sure to update the offsets to avoid overlap
3 nvs, data, nvs, 0x9000, 0x6000,
4 phy_init, data, phy, 0xf000, 0x1000,
5 factory, app, factory, 0x10000, 1M,
6 storage1, data, fat, , 512K,

Wyświetl plik

@ -0,0 +1,18 @@
# SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
@pytest.mark.supported_targets
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'release',
],
indirect=True,
)
def test_mmap(dut: Dut) -> None:
dut.run_all_single_board_cases(group='mmu', timeout=600)

Wyświetl plik

@ -0,0 +1,6 @@
# set compilier optimization level
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
# we can silent the assertion to save the binary footprint
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y

Wyświetl plik

@ -0,0 +1,7 @@
CONFIG_ESP_TASK_WDT=n
CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv"
CONFIG_PARTITION_TABLE_FILENAME="partitions.csv"
CONFIG_COMPILER_DUMP_RTL_FILES=y

Wyświetl plik

@ -2,7 +2,7 @@ idf_build_get_property(target IDF_TARGET)
set(includes "include")
set(priv_requires heap spi_flash)
set(priv_requires heap spi_flash esp_mm)
if(${target} STREQUAL "esp32")
list(APPEND priv_requires bootloader_support)
# [refactor-todo]: requires "driver" for `spicommon_periph_claim`
@ -13,9 +13,7 @@ set(srcs)
if(CONFIG_SPIRAM)
list(APPEND srcs "esp_psram.c"
"mmu.c"
"mmu_psram_flash.c"
"ext_mem_layout.c")
"mmu_psram_flash.c")
if(${target} STREQUAL "esp32")
list(APPEND srcs "esp32/esp_psram_extram_cache.c"

Wyświetl plik

@ -26,7 +26,8 @@
#include "esp_private/mmu_psram_flash.h"
#include "esp_psram_impl.h"
#include "esp_psram.h"
#include "mmu.h"
#include "esp_private/esp_mmu_map_private.h"
#include "esp_mmu_map.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/himem.h"
@ -184,15 +185,6 @@ esp_err_t esp_psram_init(void)
ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
#endif //#if CONFIG_SPIRAM_RODATA
/**
* For now,
* - we only need to use MMU driver when PSRAM is enabled
* - MMU driver isn't public
*
* So we call `esp_mmu_init()` here, instead of calling it in startup code.
*/
esp_mmu_init();
//----------------------------------Map the PSRAM physical range to MMU-----------------------------//
/**
* @note 2
@ -203,12 +195,12 @@ esp_err_t esp_psram_init(void)
size_t total_mapped_size = 0;
size_t size_to_map = 0;
size_t byte_aligned_size = 0;
ret = esp_mmu_get_max_consecutive_free_block(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, &byte_aligned_size);
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
assert(ret == ESP_OK);
size_to_map = MIN(byte_aligned_size, psram_available_size);
const void *v_start_8bit_aligned = NULL;
ret = esp_mmu_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, &v_start_8bit_aligned);
ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_8bit_aligned);
assert(ret == ESP_OK);
#if CONFIG_IDF_TARGET_ESP32
@ -248,12 +240,12 @@ esp_err_t esp_psram_init(void)
size_to_map = psram_available_size - total_mapped_size;
size_t word_aligned_size = 0;
ret = esp_mmu_get_max_consecutive_free_block(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, &word_aligned_size);
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
assert(ret == ESP_OK);
size_to_map = MIN(word_aligned_size, size_to_map);
const void *v_start_32bit_aligned = NULL;
ret = esp_mmu_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, &v_start_32bit_aligned);
ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_32bit_aligned);
assert(ret == ESP_OK);
mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_32bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);

Wyświetl plik

@ -1,77 +0,0 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "ext_mem_layout.h"
#include "hal/mmu_types.h"
#if CONFIG_IDF_TARGET_ESP32
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
//Can be used for text
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM1_LINEAR), CACHE_BUS_IBUS1, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IROM0_LINEAR_ADDRESS_LOW, SOC_MMU_IROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IROM0_LINEAR), CACHE_BUS_IBUS2, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for rodata
{SOC_MMU_DROM0_LINEAR_ADDRESS_LOW, SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DROM0_LINEAR), CACHE_BUS_DBUS0, MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM1_LINEAR), CACHE_BUS_DBUS1, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#elif CONFIG_IDF_TARGET_ESP32S2
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
//Can be used for text
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM1_LINEAR), CACHE_BUS_IBUS1, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for Flash rodata, connected by IBUS
{SOC_MMU_DROM0_LINEAR_ADDRESS_LOW, SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DROM0_LINEAR), CACHE_BUS_IBUS2, MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DPORT_LINEAR_ADDRESS_LOW, SOC_MMU_DPORT_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DPORT_LINEAR), CACHE_BUS_DBUS2, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM1_LINEAR), CACHE_BUS_DBUS1, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM0_LINEAR), CACHE_BUS_DBUS0, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#elif CONFIG_IDF_TARGET_ESP32S3
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
/**
* Can be used for Flash text, rodata, and PSRAM
* IRAM0 linear address should be always the same as DRAM0 linear address
*/
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#endif

Wyświetl plik

@ -3,8 +3,6 @@ archive: libesp_psram.a
entries:
if SPIRAM = y:
mmu (noflash)
if SPIRAM_MODE_QUAD = y:
if IDF_TARGET_ESP32S3 = y:
esp_psram_impl_quad (noflash)

Wyświetl plik

@ -1,267 +0,0 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* This file will be redesigned into MMU driver, to maintain all the external
* memory contexts including:
* - Flash
* - PSRAM
* - DDR
*
* Now only MMU-PSRAM related private APIs
*/
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_check.h"
#include "soc/soc_caps.h"
#include "ext_mem_layout.h"
#include "freertos/FreeRTOS.h"
#include "hal/cache_types.h"
#include "hal/cache_ll.h"
#include "hal/mmu_types.h"
#include "hal/mmu_ll.h"
#include "mmu.h"
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define MMU_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
//This flag indicates the memory region is merged, we don't care about it anymore
#define MEM_REGION_MERGED -1
static const char *TAG = "mmu";
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
typedef struct mmu_linear_mem_ {
cache_bus_mask_t bus_id;
intptr_t start;
intptr_t end;
size_t pool_size;
intptr_t free_head;
size_t free_size;
int caps;
} mmu_linear_mem_t;
typedef struct {
/**
* number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
*/
uint32_t num_regions;
/**
* This saves the available MMU linear address regions,
* after reserving flash .rodata and .text, and after coalescing.
* Only the first `num_regions` items are valid
*/
mmu_linear_mem_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
static void s_reserve_irom_region(mmu_linear_mem_t *hw_mem_regions, int region_nums)
{
/**
* We follow the way how 1st bootloader load flash .text:
*
* - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
* we strongly rely on this to calculate the .text length
*/
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].pool_size <= irom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].free_size = 0;
irom_len_to_reserve -= hw_mem_regions[i].pool_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
hw_mem_regions[i].free_size -= irom_len_to_reserve;
}
}
}
}
static void s_reserve_drom_region(mmu_linear_mem_t *hw_mem_regions, int region_nums)
{
/**
* Similarly, we follow the way how 1st bootloader load flash .rodata:
*/
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].pool_size <= drom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].free_size = 0;
drom_len_to_reserve -= hw_mem_regions[i].pool_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
hw_mem_regions[i].free_size -= drom_len_to_reserve;
}
}
}
}
void esp_mmu_init(void)
{
mmu_linear_mem_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
hw_mem_regions[i].pool_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
#endif
assert(hw_mem_regions[i].pool_size % MMU_PAGE_SIZE == 0);
}
//First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
//Now we can coalesce adjacent regions
for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
mmu_linear_mem_t *a = &hw_mem_regions[i - 1];
mmu_linear_mem_t *b = &hw_mem_regions[i];
if ((b->free_head == a->end) && (b->caps == a->caps)) {
a->caps = MEM_REGION_MERGED;
b->bus_id |= a->bus_id;
b->start = a->start;
b->pool_size += a->pool_size;
b->free_head = a->free_head;
b->free_size += a->free_size;
}
}
}
//Count the mem regions left after coalescing
uint32_t region_num = 0;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if(hw_mem_regions[i].caps != MEM_REGION_MERGED) {
region_num++;
}
}
ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
//Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
uint32_t available_region_idx = 0;
s_mmu_ctx.num_regions = region_num;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
continue;
}
memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mmu_linear_mem_t));
available_region_idx++;
}
assert(available_region_idx == region_num);
}
esp_err_t esp_mmu_get_max_consecutive_free_block(mmu_mem_caps_t caps, size_t *out_len)
{
ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
}
*out_len = 0;
size_t max = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if ((s_mmu_ctx.mem_regions[i].caps & caps) == caps) {
if (s_mmu_ctx.mem_regions[i].free_size > max) {
max = s_mmu_ctx.mem_regions[i].free_size;
}
}
}
*out_len = max;
return ESP_OK;
}
esp_err_t esp_mmu_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, const void **out_ptr)
{
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
caps |= MMU_MEM_CAP_32BIT;
}
size_t aligned_size = ALIGN_UP_BY(size, MMU_PAGE_SIZE);
bool is_match = false;
uint32_t laddr = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if ((s_mmu_ctx.mem_regions[i].caps & caps) == caps) {
if (s_mmu_ctx.mem_regions[i].free_size < aligned_size) {
continue;
} else {
laddr = (uint32_t)s_mmu_ctx.mem_regions[i].free_head;
s_mmu_ctx.mem_regions[i].free_head += aligned_size;
s_mmu_ctx.mem_regions[i].free_size -= aligned_size;
is_match = true;
break;
}
}
}
ESP_RETURN_ON_FALSE(is_match, ESP_ERR_NOT_FOUND, TAG, "no such vaddr range");
ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
if (caps & MMU_MEM_CAP_EXEC) {
laddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION);
} else {
laddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA);
}
*out_ptr = (void *)laddr;
return ESP_OK;
}
esp_err_t esp_mmu_dump_region_usage(void)
{
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
ESP_EARLY_LOGI(TAG, "bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
ESP_EARLY_LOGI(TAG, "start: 0x%x", s_mmu_ctx.mem_regions[i].start);
ESP_EARLY_LOGI(TAG, "end: 0x%x", s_mmu_ctx.mem_regions[i].end);
ESP_EARLY_LOGI(TAG, "pool_size: 0x%x", s_mmu_ctx.mem_regions[i].pool_size);
ESP_EARLY_LOGI(TAG, "free_head: 0x%x", s_mmu_ctx.mem_regions[i].free_head);
ESP_EARLY_LOGI(TAG, "free_size: 0x%x", s_mmu_ctx.mem_regions[i].free_size);
ESP_EARLY_LOGI(TAG, "caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
}
return ESP_OK;
}

Wyświetl plik

@ -1,70 +0,0 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* This file will be moved out of `esp_psram` component. And will be
* future MMU driver, to maintain all the external memory contexts including:
* - Flash
* - PSRAM
* - DDR
*
* Now only support ESP32, ESP32S2, ESP32S3 virtual address maintenance, and is internal
*/
/**
* @brief Initialise the MMU driver
*
* This is called once in the IDF startup code. Don't call it in applications
*/
void esp_mmu_init(void);
/**
* @brief Get largest consecutive free external virtual memory block, with given capabilities
*
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[out] out_len Largest free block length, in bytes.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be null pointer
*/
esp_err_t esp_mmu_get_max_consecutive_free_block(mmu_mem_caps_t caps, size_t *out_len);
/**
* @brief Reserve a consecutive external virtual memory block, with given capabilities and size
*
* @param[in] size Size, in bytes, the amount of memory to find
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[out] out_ptr Pointer to start address of the memory block that is reserved
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be wrong caps makeup, or null pointer
* - ESP_ERR_NOT_FOUND: Didn't find enough memory with give caps
*/
esp_err_t esp_mmu_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, const void **out_ptr);
/**
* @brief Dump internal memory region usage
*
* @return
* - ESP_OK: On success
*/
esp_err_t esp_mmu_dump_region_usage(void);
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -55,7 +55,7 @@ else()
idf_component_register(SRCS "${srcs}"
INCLUDE_DIRS include
PRIV_REQUIRES spi_flash esp_timer
PRIV_REQUIRES spi_flash esp_timer esp_mm
# [refactor-todo] requirements due to init code,
# should be removable once using component init functions
# link-time registration is used.

Wyświetl plik

@ -319,7 +319,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4);
} >default_rodata_seg
@ -329,6 +328,7 @@ SECTIONS
{
. = ALIGN (4);
mapping[rodata_noload]
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
.flash.text :

Wyświetl plik

@ -97,7 +97,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.);
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -116,7 +116,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/**
@ -141,11 +141,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -222,7 +222,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg
@ -250,6 +249,7 @@ SECTIONS
{
. = ALIGN (4);
mapping[rodata_noload]
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
/* Marks the end of IRAM code segment */

Wyświetl plik

@ -207,7 +207,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.);
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -226,7 +226,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/**
@ -251,11 +251,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -332,7 +332,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg
@ -360,6 +359,7 @@ SECTIONS
{
. = ALIGN (4);
mapping[rodata_noload]
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
/* Marks the end of IRAM code segment */

Wyświetl plik

@ -240,7 +240,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.);
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -259,7 +259,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/**
@ -284,11 +284,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -365,7 +365,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg
@ -393,6 +392,7 @@ SECTIONS
{
. = ALIGN (4);
mapping[rodata_noload]
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
/* Marks the end of data, bss and possibly rodata */

Wyświetl plik

@ -210,7 +210,7 @@ SECTIONS
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.);
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
@ -229,7 +229,7 @@ SECTIONS
. += 16;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/**
@ -254,11 +254,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -335,7 +335,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg
@ -359,6 +358,13 @@ SECTIONS
__eh_frame_hdr_end = ABSOLUTE(.);
} > default_rodata_seg
.flash.rodata_noload (NOLOAD) :
{
. = ALIGN (4);
/* Empty for now, only a container for `_rodata_reserved_end` symbol, add needed rodata noload mappings here if needed */
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
/* Marks the end of IRAM code segment */
.iram0.text_end (NOLOAD) :
{

Wyświetl plik

@ -335,7 +335,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4);
} >default_rodata_seg
@ -345,6 +344,7 @@ SECTIONS
{
. = ALIGN (4);
mapping[rodata_noload]
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
.flash.text :

Wyświetl plik

@ -288,11 +288,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .; /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@ -361,7 +361,6 @@ SECTIONS
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4);
} > default_rodata_seg
@ -371,6 +370,7 @@ SECTIONS
{
. = ALIGN (4);
mapping[rodata_noload]
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg
/**

Wyświetl plik

@ -68,6 +68,7 @@
#include "esp32c2/memprot.h"
#endif
#include "esp_private/esp_mmu_map_private.h"
#if CONFIG_SPIRAM
#include "esp_psram.h"
#include "esp_private/esp_psram_extram.h"
@ -401,6 +402,11 @@ void IRAM_ATTR call_start_cpu0(void)
mspi_timing_flash_tuning();
#endif
#if !CONFIG_IDF_TARGET_ESP32H2
//ESP32H2 MMU-TODO: IDF-6251
esp_mmu_map_init();
#endif //!CONFIG_IDF_TARGET_ESP32H2
#if CONFIG_SPIRAM_BOOT_INIT
if (esp_psram_init() != ESP_OK) {
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -18,6 +18,8 @@
extern "C" {
#endif
#define MMU_LL_PSRAM_ENTRY_START_ID 1152
/**
* Convert MMU virtual address to linear address
*
@ -75,7 +77,7 @@ static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
__attribute__((always_inline))
static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
{
HAL_ASSERT(size == MMU_PAGE_64KB);
//ONly supports `MMU_PAGE_64KB`
}
/**
@ -101,6 +103,96 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
(ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
* @param mmu_id MMU ID
* @param vaddr virtual address to be mapped
*
* @return
* MMU table entry id
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
{
(void)mmu_id;
uint32_t offset = 0;
uint32_t shift_code = 0;
uint32_t vaddr_mask = 0;
//On ESP32, we only use PID0 and PID1
if (ADDRESS_IN_DROM0_CACHE(vaddr)) {
offset = 0;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_IRAM0_CACHE(vaddr)) {
offset = 64;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_IRAM1_CACHE(vaddr)) {
offset = 128;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_IROM0_CACHE(vaddr)) {
offset = 192;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_DRAM1_CACHE(vaddr)) {
//PSRAM page size 32KB
offset = MMU_LL_PSRAM_ENTRY_START_ID;
shift_code = 15;
vaddr_mask = MMU_VADDR_MASK >> 1;
} else {
HAL_ASSERT(false);
}
return offset + ((vaddr & vaddr_mask) >> shift_code);
}
/**
* Format the paddr to be mappable
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
uint32_t shift_code = 0;
if (target == MMU_TARGET_FLASH0) {
shift_code = 16;
} else {
//PSRAM page size 32KB
shift_code = 15;
}
return paddr >> shift_code;
}
/**
* Write to the MMU table to map the virtual memory and the physical memory
*
@ -113,7 +205,6 @@ __attribute__((always_inline))
static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
{
(void)target;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
switch (mmu_id) {
@ -140,7 +231,6 @@ __attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{
uint32_t mmu_value;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
switch (mmu_id) {
@ -167,7 +257,6 @@ __attribute__((always_inline))
static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
{
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
switch (mmu_id) {
case MMU_TABLE_CORE0:
@ -196,22 +285,164 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
/**
* Get MMU table entry is invalid
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*
* @return Ture for MMU entry is valid; False for invalid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]);
DPORT_INTERRUPT_RESTORE();
return (mmu_value & MMU_INVALID) ? true : false;
return (mmu_value & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
HAL_ASSERT(mmu_ll_check_entry_valid(mmu_id, entry_id));
return (entry_id >= MMU_LL_PSRAM_ENTRY_START_ID) ? MMU_TARGET_PSRAM0 : MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]);
DPORT_INTERRUPT_RESTORE();
return (entry_id >= MMU_LL_PSRAM_ENTRY_START_ID) ? (mmu_value << 15) : (mmu_value << 16);
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
(void)target;
DPORT_INTERRUPT_DISABLE();
if (target == MMU_TARGET_FLASH0) {
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
if (!(mmu_value & MMU_INVALID)) {
if (mmu_value == mmu_val) {
DPORT_INTERRUPT_RESTORE();
return i;
}
}
}
} else {
//For PSRAM, we only use PID 0/1. Its start entry ID is MMU_LL_PSRAM_ENTRY_START_ID (1152), and 128 entries are used for PSRAM
for (int i = MMU_LL_PSRAM_ENTRY_START_ID; i < 1280; i++) {
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
if (!(mmu_value & MMU_INVALID)) {
if (mmu_value == mmu_val) {
DPORT_INTERRUPT_RESTORE();
return i;
}
}
}
}
DPORT_INTERRUPT_RESTORE();
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_base = 0;
uint32_t shift_code = 0;
if (entry_id < 64) {
//first 64 entries are for DROM0
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0;
shift_code = 16;
vaddr_base = 0x3f400000;
} else if (entry_id >= 64 && entry_id < 128) {
//second 64 entries are for IRAM0
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 64;
shift_code = 16;
vaddr_base = 0x40000000;
} else if (entry_id >= 128 && entry_id < 192) {
//third 64 entries are for IRAM1
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 128;
shift_code = 16;
vaddr_base = 0x40000000;
} else if (entry_id >= 192 && entry_id < 256) {
//fourth 64 entries are for IROM0
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 192;
shift_code = 16;
vaddr_base = 0x40000000;
} else if (entry_id >= MMU_LL_PSRAM_ENTRY_START_ID) {
//starting from 1152, 128 entries are for DRAM1
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= MMU_LL_PSRAM_ENTRY_START_ID;
shift_code = 15;
vaddr_base = 0x3f800000;
} else {
HAL_ASSERT(false);
}
return vaddr_base + (entry_id << shift_code);
}
#ifdef __cplusplus

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -20,11 +20,36 @@ extern "C" {
#endif
/**
* @brief The real MMU page size get from Kconfig.
* Convert MMU virtual address to linear address
*
* @note Only used in this file
* @param vaddr virtual address
*
* @return linear address
*/
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE)
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
@ -71,6 +96,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
@ -109,14 +152,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
@ -200,19 +245,122 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
/**
* Get MMU table entry is invalid
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*
* @return Ture for MMU entry is valid; False for invalid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
default:
HAL_ASSERT(shift_code);
}
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -8,6 +8,7 @@
#pragma once
#include "esp_types.h"
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/assert.h"
@ -18,6 +19,38 @@
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
*
@ -64,6 +97,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
@ -85,14 +136,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
return paddr >> 16;
}
@ -159,19 +212,93 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
/**
* Get MMU table entry is invalid
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*
* @return Ture for MMU entry is valid; False for invalid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t laddr = entry_id << 16;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -54,9 +54,9 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v
cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1;
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) {
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_IBUS0;
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) {
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_DBUS0;
} else {
HAL_ASSERT(0); //Out of region

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -20,11 +20,30 @@ extern "C" {
#endif
/**
* @brief The real MMU page size get from Kconfig.
* Convert MMU virtual address to linear address
*
* @note Only used in this file
* @param vaddr virtual address
*
* @return linear address
*/
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE)
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
//On ESP32C6, I/D share the same vaddr range
return SOC_MMU_IBUS_VADDR_BASE | laddr;
}
__attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
{
@ -82,7 +101,25 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_IRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_DRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE));
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
@ -110,10 +147,13 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
return ((vaddr & MMU_VADDR_MASK(page_size)) >> shift_code);
return ((vaddr & MMU_VADDR_MASK) >> shift_code);
}
/**
@ -121,14 +161,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
@ -141,6 +183,9 @@ static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
@ -163,8 +208,8 @@ __attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mm
if (mmu_ll_cache_encryption_enabled()) {
mmu_val |= MMU_SENSITIVE;
}
/* Note: for ESP32-C6, invert invalid bit for compatible with upper-layer software */
mmu_raw_value = mmu_val ^ MMU_INVALID_MASK;
mmu_raw_value = mmu_val | MMU_VALID;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), mmu_raw_value);
}
@ -186,8 +231,10 @@ __attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t
if (mmu_ll_cache_encryption_enabled()) {
mmu_raw_value &= ~MMU_SENSITIVE;
}
/* Note: for ESP32-C6, invert invalid bit for compatible with upper-layer software */
ret = mmu_raw_value ^ MMU_INVALID_MASK;
if (!(mmu_raw_value & MMU_VALID)) {
return 0;
}
ret = mmu_raw_value & MMU_VALID_VAL_MASK;
return ret;
}
@ -204,23 +251,6 @@ __attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint3
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID);
}
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
uint32_t mmu_raw_value;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
/* Note: for ESP32-C6, the invalid-bit of MMU: 0 for invalid, 1 for valid */
return (mmu_raw_value & MMU_INVALID_MASK) ? false : true;
}
/**
* Unmap all the items in the MMU table
*
@ -234,6 +264,134 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
}
/**
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID) ? true : false;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), i);
if ((REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -54,9 +54,9 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v
cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1;
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) {
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_IBUS0;
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) {
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_DBUS0;
} else {
HAL_ASSERT(0); //Out of region

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -19,12 +19,33 @@
extern "C" {
#endif
/**
* @brief The real MMU page size get from Kconfig.
* Convert MMU virtual address to linear address
*
* @note Only used in this file
* @param vaddr virtual address
*
* @return linear address
*/
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE)
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
//On ESP32C6, I/D share the same vaddr range
return SOC_MMU_IBUS_VADDR_BASE | laddr;
}
__attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
{
@ -83,7 +104,25 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
{
(void)mmu_id;
uint32_t vaddr_end = vaddr_start + len;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_IRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_DRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE));
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
@ -119,7 +158,7 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
HAL_ASSERT(shift_code);
}
return ((vaddr & MMU_VADDR_MASK(page_size)) >> shift_code);
return ((vaddr & MMU_VADDR_MASK) >> shift_code);
}
/**
@ -132,9 +171,10 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
@ -217,28 +257,23 @@ __attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint3
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID);
}
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
// /**
// * Get MMU table entry is invalid
// *
// * @param mmu_id MMU ID
// * @param entry_id MMU entry ID
// * return ture for MMU entry is invalid, false for valid
// */
// __attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
// {
// (void)mmu_id;
uint32_t mmu_raw_value;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
/* Note: for ESP32-H2, the invalid-bit of MMU: 0 for invalid, 1 for valid */
return (mmu_raw_value & MMU_INVALID_MASK) ? false : true;
}
#ifdef __cplusplus
}
#endif
// uint32_t mmu_raw_value;
// REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
// mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
// /* Note: for ESP32-H2, the invalid-bit of MMU: 0 for invalid, 1 for valid */
// return (mmu_raw_value & MMU_INVALID_MASK) ? false : true;
// }
/**
@ -253,3 +288,137 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
mmu_ll_set_entry_invalid(mmu_id, i);
}
}
/**
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID) ? true : false;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), i);
if ((REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -8,6 +8,7 @@
#pragma once
#include "esp_types.h"
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/assert.h"
@ -18,6 +19,38 @@
extern "C" {
#endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/**
* Get MMU page size
*
@ -64,6 +97,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
@ -85,14 +136,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
return paddr >> 16;
}
@ -159,19 +212,93 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
/**
* Get MMU table entry is invalid
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*
* @return Ture for MMU entry is valid; False for invalid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t laddr = entry_id << 16;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -38,7 +38,7 @@ __attribute__((always_inline))
#endif
static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len)
{
HAL_ASSERT(cache_id == 0);
(void)cache_id;
cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1;
@ -87,7 +87,7 @@ __attribute__((always_inline))
#endif
static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask)
{
HAL_ASSERT(cache_id == 0);
(void)cache_id;
uint32_t ibus_mask = 0;
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_PRO_ICACHE_MASK_IRAM0 : 0;
@ -111,7 +111,7 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma
__attribute__((always_inline))
static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask)
{
HAL_ASSERT(cache_id == 0);
(void)cache_id;
uint32_t ibus_mask = 0;
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_PRO_ICACHE_MASK_IRAM0 : 0;

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -104,6 +104,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (on_ibus || on_dbus);
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
@ -143,14 +161,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
return paddr >> 16;
}
@ -217,19 +237,137 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
/**
* Get MMU table entry is invalid
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*
* @return Ture for MMU entry is valid; False for invalid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
HAL_ASSERT(mmu_ll_check_entry_valid(mmu_id, entry_id));
if ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_ACCESS_FLASH) {
return MMU_TARGET_FLASH0;
} else {
return MMU_TARGET_PSRAM0;
}
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_base = 0;
if (entry_id < 0x40) {
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 0;
vaddr_base = 0x40000000;
} else if (entry_id >= 0x40 && entry_id < 0x80) {
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 0x40;
vaddr_base = 0x40000000;
} else if (entry_id >= 0x80 && entry_id < 0xC0) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0x80;
vaddr_base = 0x3f000000;
} else if (entry_id >= 0xC0 && entry_id < 0x100) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0xC0;
vaddr_base = 0x3f000000;
} else if (entry_id >= 0x100 && entry_id < 0x140) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0x100;
vaddr_base = 0x3f000000;
} else if (entry_id >= 0x140 && entry_id < 0x180) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0x140;
vaddr_base = 0x3f000000;
} else {
HAL_ASSERT(false);
}
return vaddr_base + (entry_id << 16);
}
#ifdef __cplusplus

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -8,6 +8,7 @@
#pragma once
#include "esp_types.h"
#include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h"
#include "hal/assert.h"
@ -96,6 +97,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
@ -117,14 +136,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
(void)target;
return paddr >> 16;
}
@ -191,19 +212,94 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
}
/**
* Get MMU table entry is invalid
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*
* @return Ture for MMU entry is valid; False for invalid
*/
__attribute__((always_inline))
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false;
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
bool target_code = (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_TYPE;
return (target_code == MMU_ACCESS_FLASH) ? MMU_TARGET_FLASH0 : MMU_TARGET_PSRAM0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t laddr = entry_id << 16;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus

Wyświetl plik

@ -6,6 +6,7 @@
#pragma once
#include <esp_types.h>
#include "hal/mmu_types.h"
#ifdef __cplusplus
@ -17,7 +18,6 @@ extern "C" {
*/
void mmu_hal_init(void);
#if !CONFIG_IDF_TARGET_ESP32
/**
* Helper functions to convert the MMU page numbers into bytes. e.g.:
* - When MMU page size is 16KB, page_num = 2 will be converted into 32KB
@ -45,7 +45,7 @@ uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num);
uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes);
/**
* To map a virtual address region to a physical memory region
* To map a virtual address block to a physical memory block
*
* @param mmu_id MMU ID
* @param mem_type physical memory type, see `mmu_target_t`
@ -57,7 +57,47 @@ uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes);
* @note vaddr and paddr should be aligned with the mmu page size, see CONFIG_MMU_PAGE_SIZE
*/
void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, uint32_t paddr, uint32_t len, uint32_t *out_len);
#endif
/**
* To unmap a virtual address block that is mapped to a physical memory block previously
*
* @param[in] mmu_id MMU ID
* @param[in] vaddr start virtual address
* @param[in] len length to be unmapped, in bytes
*/
void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len);
/**
* Convert virtual address to physical address
*
* @param mmu_id MMU ID
* @param vaddr virtual address
* @param[out] out_paddr physical address
* @param[out] out_target Indicating the vaddr/paddr is mapped on which target, see `mmu_target_t`
*
* @return
* - true: virtual address is valid
* - false: virtual address isn't valid
*/
bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target);
/**
* Convert physical address to virtual address
*
* @note This function can only find the first match virtual address.
* However it is possible that a physical address is mapped to multiple virtual addresses.
*
* @param mmu_id MMU ID
* @param paddr physical address
* @param target physical memory target, see `mmu_target_t`
* @param type virtual address type, could be instruction or data
* @param[out] out_vaddr virtual address
*
* @return
* - true: found a matched vaddr
* - false: not found a matched vaddr
*/
bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr);
#ifdef __cplusplus
}

Wyświetl plik

@ -42,8 +42,8 @@ typedef enum {
* External physical memory
*/
typedef enum {
MMU_TARGET_FLASH0,
MMU_TARGET_PSRAM0,
MMU_TARGET_FLASH0 = BIT(0),
MMU_TARGET_PSRAM0 = BIT(1),
} mmu_target_t;
/**

Wyświetl plik

@ -13,23 +13,6 @@
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/cache.h"
#include "soc/dport_reg.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32C3
#include "esp32c3/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32C2
#include "esp32c2/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32H4
#include "esp32h4/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32C6
#include "esp32c6/rom/cache.h"
#endif
void mmu_hal_init(void)
{
mmu_ll_unmap_all(0);
@ -38,8 +21,6 @@ void mmu_hal_init(void)
#endif
}
#if !CONFIG_IDF_TARGET_ESP32
//If decided, add a jira ticket for implementing these APIs on ESP32
uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num)
{
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
@ -85,7 +66,7 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
HAL_ASSERT(vaddr % page_size_in_bytes == 0);
HAL_ASSERT(paddr % page_size_in_bytes == 0);
HAL_ASSERT((paddr + len - 1) < mmu_hal_pages_to_bytes(mmu_id, MMU_MAX_PADDR_PAGE_NUM));
HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, len));
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len));
uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
@ -93,7 +74,7 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
uint32_t mmu_val; //This is the physical address in the format that MMU supported
*out_len = mmu_hal_pages_to_bytes(mmu_id, page_num);
mmu_val = mmu_ll_format_paddr(mmu_id, paddr);
mmu_val = mmu_ll_format_paddr(mmu_id, paddr, mem_type);
while (page_num) {
entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
@ -103,4 +84,58 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
page_num--;
}
}
#endif //#if !CONFIG_IDF_TARGET_ESP32
void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len)
{
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
HAL_ASSERT(vaddr % page_size_in_bytes == 0);
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len));
uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
uint32_t entry_id = 0;
while (page_num) {
entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
mmu_ll_set_entry_invalid(mmu_id, entry_id);
vaddr += page_size_in_bytes;
page_num--;
}
}
bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target)
{
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, 1));
uint32_t entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
if (!mmu_ll_check_entry_valid(mmu_id, entry_id)) {
return false;
}
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
uint32_t offset = (uint32_t)vaddr % page_size_in_bytes;
*out_target = mmu_ll_get_entry_target(mmu_id, entry_id);
uint32_t paddr_base = mmu_ll_entry_id_to_paddr_base(mmu_id, entry_id);
*out_paddr = paddr_base | offset;
return true;
}
bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
{
HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, 1));
uint32_t mmu_val = mmu_ll_format_paddr(mmu_id, paddr, target);
int entry_id = mmu_ll_find_entry_id_based_on_map_value(mmu_id, mmu_val, target);
if (entry_id == -1) {
return false;
}
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
uint32_t offset = paddr % page_size_in_bytes;
uint32_t vaddr_base = mmu_ll_entry_id_to_vaddr_base(mmu_id, entry_id, type);
if (vaddr_base == 0) {
return false;
}
*out_vaddr = vaddr_base | offset;
return true;
}

Wyświetl plik

@ -227,10 +227,6 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 5
config SOC_CPU_CORES_NUM
int
default 2
@ -443,6 +439,14 @@ config SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP
int
default 3
config SOC_MMU_PERIPH_NUM
int
default 2
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 3
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool
default n

Wyświetl plik

@ -39,7 +39,18 @@ extern "C" {
#define MMU_INVALID BIT(8)
//MMU entry num, 384 entries that are used in IDF
/**
* Max MMU available paddr page num.
* `MMU_MAX_PADDR_PAGE_NUM * CONFIG_MMU_PAGE_SIZE` means the max paddr address supported by the MMU. e.g.:
* 256 * 64KB, means MMU can support 16MB paddr at most
*/
#define MMU_MAX_PADDR_PAGE_NUM 256
/**
* This is the mask used for mapping. e.g.:
* 0x4008_0000 & MMU_VADDR_MASK
*/
#define MMU_VADDR_MASK 0x3FFFFF
//MMU entry num, 384 entries that are used in IDF for Flash
#define MMU_ENTRY_NUM 384

Wyświetl plik

@ -136,11 +136,8 @@
#define SOC_BROWNOUT_RESET_SUPPORTED 1
#endif
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
/*-------------------------- CACHE CAPS --------------------------------------*/
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2
@ -231,6 +228,10 @@
#define SOC_MCPWM_CAPTURE_CHANNELS_PER_TIMER (3) ///< The number of capture channels that each capture timer has
#define SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP (3) ///< The number of GPIO synchros that each group has
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PERIPH_NUM 2
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 3
/*-------------------------- MPU CAPS ----------------------------------------*/
//TODO: correct the caller and remove unsupported lines
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0

Wyświetl plik

@ -199,10 +199,6 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool
default y
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_GDMA_GROUPS
int
default 1
@ -307,6 +303,18 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int
default 1
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool
default n

Wyświetl plik

@ -5,6 +5,7 @@
*/
#pragma once
#include <stdint.h>
#include "sdkconfig.h"
#include "esp_bit_defs.h"
@ -14,17 +15,18 @@ extern "C" {
#endif
/*IRAM0 is connected with Cache IBUS0*/
#define IRAM0_ADDRESS_LOW 0x4037C000
#define IRAM0_ADDRESS_HIGH 0x403C0000
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000
#define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * 64)) // MMU has 64 pages
#define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM)) // MMU has 64 pages
/*DRAM0 is connected with Cache DBUS0*/
#define DRAM0_ADDRESS_LOW 0x3FCA0000
#define DRAM0_ADDRESS_HIGH 0x3FCE0000
#define DRAM0_CACHE_ADDRESS_LOW 0x3C000000
#define DRAM0_CACHE_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * 64)) // MMU has 64 pages
#define DRAM0_CACHE_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM)) // MMU has 64 pages
#define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
#define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
@ -71,9 +73,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits
@ -104,6 +103,64 @@ extern "C" {
#define CACHE_MEMORY_IBANK0_ADDR 0x4037C000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
#if (CONFIG_MMU_PAGE_SIZE == 0x10000)
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 64 MMU entries, needs 0x3F to hold it.
*
* Therefore, 0x3F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x3FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x8000)
/**
* - 32KB MMU page size: the last 0x7FFF, which is the offset
* - 64 MMU entries, needs 0x3F to hold it.
*
* Therefore, 0x1F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x4000)
/**
* - 16KB MMU page size: the last 0x3FFF, which is the offset
* - 64 MMU entries, needs 0x3F to hold it.
*
* Therefore, 0xF,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0xFFFFF
#endif //CONFIG_MMU_PAGE_SIZE
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus
}

Wyświetl plik

@ -97,9 +97,6 @@
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
/*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (1U) // Number of GDMA pairs in each group
@ -154,6 +151,11 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0
#define SOC_MPU_MIN_REGION_SIZE 0x20000000U

Wyświetl plik

@ -439,6 +439,14 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int
default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool
default n

Wyświetl plik

@ -69,9 +69,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits
@ -99,6 +96,45 @@ extern "C" {
#define CACHE_MEMORY_IBANK0_ADDR 0x4037c000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -204,6 +204,10 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0
#define SOC_MPU_MIN_REGION_SIZE 0x20000000U

Wyświetl plik

@ -311,10 +311,6 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool
default y
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_DS_SIGNATURE_MAX_BIT_LEN
int
default 3072
@ -511,6 +507,18 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int
default 16
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_DI_VADDR_SHARED
bool
default y

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -13,35 +13,24 @@
extern "C" {
#endif
#define IRAM0_ADDRESS_LOW 0x40800000
#define IRAM0_ADDRESS_HIGH 0x40880000
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000
#define IRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_LOW + ((page_size) * 256))
#define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM))
#define DRAM0_ADDRESS_LOW IRAM0_ADDRESS_LOW //I/D share the same vaddr range
#define DRAM0_ADDRESS_HIGH IRAM0_ADDRESS_HIGH //I/D share the same vaddr range
#define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_LOW //I/D share the same vaddr range
#define DRAM0_CACHE_ADDRESS_HIGH(page_size) IRAM0_CACHE_ADDRESS_HIGH(page_size) //I/D share the same vaddr range
#define DRAM0_CACHE_OPERATION_HIGH(page_size) DRAM0_CACHE_ADDRESS_HIGH(page_size)
#define DRAM0_CACHE_ADDRESS_HIGH IRAM0_CACHE_ADDRESS_HIGH //I/D share the same vaddr range
#define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
#define BUS_SIZE(bus_name, page_size) (bus_name##_ADDRESS_HIGH(page_size) - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr, page_size) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH(page_size))
#define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define ADDRESS_IN_IRAM0(vaddr, page_size) ADDRESS_IN_BUS(IRAM0, vaddr, page_size)
#define ADDRESS_IN_IRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr, page_size)
#define ADDRESS_IN_DRAM0(vaddr, page_size) ADDRESS_IN_BUS(DRAM0, vaddr, page_size)
#define ADDRESS_IN_DRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr, page_size)
#define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr)
#define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
#define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr)
#define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr)
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE, page_size)
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE, page_size)
#define CACHE_IBUS 0
#define CACHE_IBUS_MMU_START 0
#define CACHE_IBUS_MMU_END 0x200
#define CACHE_DBUS 1
#define CACHE_DBUS_MMU_START 0
#define CACHE_DBUS_MMU_END 0x200
#define BUS_IRAM0_CACHE_SIZE BUS_SIZE(IRAM0_CACHE)
#define BUS_DRAM0_CACHE_SIZE BUS_SIZE(DRAM0_CACHE)
//TODO, remove these cache function dependencies
#define CACHE_IROM_MMU_START 0
@ -66,11 +55,9 @@ extern "C" {
#define MMU_MSPI_SENSITIVE BIT(10)
#define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH
#define MMU_ACCESS_SPIRAM MMU_MSPI_ACCESS_SPIRAM
#define MMU_VALID MMU_MSPI_VALID
#define MMU_SENSITIVE MMU_MSPI_SENSITIVE
// ESP32C6-TODO
#define MMU_INVALID_MASK MMU_MSPI_VALID
#define MMU_INVALID MMU_MSPI_INVALID
@ -79,9 +66,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits
@ -101,10 +85,70 @@ extern "C" {
* This is the mask used for mapping. e.g.:
* 0x4200_0000 & MMU_VADDR_MASK
*/
#define MMU_VADDR_MASK(page_size) ((page_size) * MMU_ENTRY_NUM - 1)
#define MMU_VADDR_MASK ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM - 1)
#define CACHE_MEMORY_IBANK0_ADDR 0x40800000
#define SOC_MMU_DBUS_VADDR_BASE 0x42000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
#if (CONFIG_MMU_PAGE_SIZE == 0x10000)
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x8000)
/**
* - 32KB MMU page size: the last 0x7FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x3F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x3FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x4000)
/**
* - 16KB MMU page size: the last 0x3FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x1F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFF
#endif //CONFIG_MMU_PAGE_SIZE
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -23,7 +23,6 @@ extern "C" {
#define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL
#define SOC_MMU_ADDR_MASK (MMU_VALID - 1)
#define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash
#define SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE FLASH_MMU_TABLE
#define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW
#define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START
#define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE))

Wyświetl plik

@ -134,10 +134,6 @@
#define SOC_CPU_HAS_PMA 1
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1
// TODO: IDF-5339 (Copy from esp32c3, need check)
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
// TODO: IDF-5360 (Copy from esp32c3, need check)
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/
/** The maximum length of a Digital Signature in bits. */
@ -233,6 +229,9 @@
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (16)
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_PERIPH_NUM (1U)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_DI_VADDR_SHARED (1) /*!< D/I vaddr are shared */
/*-------------------------- MPU CAPS ----------------------------------------*/

Wyświetl plik

@ -223,6 +223,10 @@ config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_DS_SIGNATURE_MAX_BIT_LEN
int
default 3072

Wyświetl plik

@ -15,28 +15,23 @@ extern "C" {
/*IRAM0 is connected with Cache IBUS0*/
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000
#define IRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_LOW + ((page_size) * 128)) // MMU has 256 pages, first 128 for instruction
#define IRAM0_ADDRESS_LOW 0x40000000
#define IRAM0_ADDRESS_HIGH(page_size) IRAM0_CACHE_ADDRESS_HIGH(page_size)
#define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * 128)) // MMU has 256 pages, first 128 for instruction
/*DRAM0 is connected with Cache DBUS0*/
#define DRAM0_ADDRESS_LOW 0x42000000
#define DRAM0_ADDRESS_HIGH 0x43000000
#define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE) // ESP32H2-TODO : IDF-6370
#define DRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_HIGH(page_size) + ((page_size) * 128)) // MMU has 256 pages, second 128 for data
#define DRAM0_CACHE_OPERATION_HIGH(page_size) DRAM0_CACHE_ADDRESS_HIGH(page_size)
#define ESP_CACHE_TEMP_ADDR 0x42000000
#define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_HIGH // ESP32H2-TODO : IDF-6370
#define DRAM0_CACHE_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * 128)) // MMU has 256 pages, second 128 for data
#define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
#define BUS_SIZE(bus_name, page_size) (bus_name##_ADDRESS_HIGH(page_size) - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr, page_size) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH(page_size))
#define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define ADDRESS_IN_IRAM0(vaddr, page_size) ADDRESS_IN_BUS(IRAM0, vaddr, page_size)
#define ADDRESS_IN_IRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr, page_size)
#define ADDRESS_IN_DRAM0(vaddr, page_size) ADDRESS_IN_BUS(DRAM0, vaddr, page_size)
#define ADDRESS_IN_DRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr, page_size)
#define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr)
#define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
#define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr)
#define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr)
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE, page_size)
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE, page_size)
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE)
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE)
#define CACHE_IBUS 0
#define CACHE_IBUS_MMU_START 0
@ -82,9 +77,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits
@ -104,10 +96,70 @@ extern "C" {
* This is the mask used for mapping. e.g.:
* 0x4200_0000 & MMU_VADDR_MASK
*/
#define MMU_VADDR_MASK(page_size) ((page_size) * MMU_ENTRY_NUM - 1)
#define MMU_VADDR_MASK ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM - 1)
#define CACHE_MEMORY_IBANK0_ADDR 0x40800000
#define SOC_MMU_DBUS_VADDR_BASE 0x42000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
#if (CONFIG_MMU_PAGE_SIZE == 0x10000)
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x8000)
/**
* - 32KB MMU page size: the last 0x7FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x3F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x3FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x4000)
/**
* - 16KB MMU page size: the last 0x3FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x1F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFF
#endif //CONFIG_MMU_PAGE_SIZE
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -23,7 +23,6 @@ extern "C" {
#define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL
#define SOC_MMU_ADDR_MASK (MMU_VALID - 1)
#define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash
#define SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE FLASH_MMU_TABLE
#define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW
#define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START
#define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE))

Wyświetl plik

@ -126,7 +126,8 @@
// TODO: IDF-6370 (Copy from esp32c6, need check)
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
// TODO: IDF-6285 (Copy from esp32c6, need check)
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/

Wyświetl plik

@ -415,6 +415,14 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int
default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool
default n

Wyświetl plik

@ -69,9 +69,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits
@ -99,6 +96,45 @@ extern "C" {
#define CACHE_MEMORY_IBANK0_ADDR 0x4037c000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus
}
#endif

Wyświetl plik

@ -211,6 +211,10 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0
#define SOC_MPU_MIN_REGION_SIZE 0x20000000U

Wyświetl plik

@ -255,10 +255,6 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 6
config SOC_CP_DMA_MAX_BUFFER_SIZE
int
default 4095
@ -455,6 +451,14 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int
default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 5
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool
default n

Wyświetl plik

@ -98,9 +98,6 @@ extern "C" {
#define MMU_ACCESS_FLASH BIT(15)
#define MMU_ACCESS_SPIRAM BIT(16)
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits

Wyświetl plik

@ -119,9 +119,6 @@
/*-------------------------- BROWNOUT CAPS -----------------------------------*/
#define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 6
/*-------------------------- CP-DMA CAPS -------------------------------------*/
#define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */
@ -211,6 +208,10 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/
//TODO: correct the caller and remove unsupported lines
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0

Wyświetl plik

@ -299,10 +299,6 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_CPU_CORES_NUM
int
default 2
@ -539,6 +535,14 @@ config SOC_MCPWM_SWSYNC_CAN_PROPAGATE
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_PCNT_GROUPS
int
default 1

Wyświetl plik

@ -69,9 +69,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/**
* MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits

Wyświetl plik

@ -116,9 +116,6 @@
/*-------------------------- BROWNOUT CAPS -----------------------------------*/
#define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
/*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32
@ -221,6 +218,10 @@
#define SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP (3) ///< The number of GPIO synchros that each group has
#define SOC_MCPWM_SWSYNC_CAN_PROPAGATE (1) ///< Software sync event can be routed to its output
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/
#include "mpu_caps.h"

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/

Wyświetl plik

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/

Wyświetl plik

@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import pytest

Wyświetl plik

@ -5,7 +5,7 @@
nvs, data, nvs, 0xb000, 0x5000
otadata, data, ota, 0x10000, 0x2000
phy_init, data, phy, 0x12000, 0x1000
factory, 0, 0, 0x20000, 0x160000
factory, 0, 0, 0x20000, 0x150000
# these OTA partitions are used for tests, but can't fit real OTA apps in them
# (done this way to reduce total flash usage.)
ota_0, 0, ota_0, , 64K

1 # Special partition table for unit test app
5 nvs, data, nvs, 0xb000, 0x5000
6 otadata, data, ota, 0x10000, 0x2000
7 phy_init, data, phy, 0x12000, 0x1000
8 factory, 0, 0, 0x20000, 0x160000 factory, 0, 0, 0x20000, 0x150000
9 # these OTA partitions are used for tests, but can't fit real OTA apps in them
10 # (done this way to reduce total flash usage.)
11 ota_0, 0, ota_0, , 64K