Merge branch 'feature/mmap_unordered_flash_pages' into 'master'

Add function to map non-contiguous flash pages to contiguous memory space.

See merge request !873
pull/822/head
Ivan Grokhotkov 2017-07-18 10:49:12 +08:00
commit aef5e90cce
3 zmienionych plików z 115 dodań i 17 usunięć

Wyświetl plik

@ -99,13 +99,41 @@ esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{
esp_err_t ret;
bool did_flush, need_flush = false;
if (src_addr & 0xffff) {
return ESP_ERR_INVALID_ARG;
}
if (src_addr + size > g_rom_flashchip.chip_size) {
return ESP_ERR_INVALID_ARG;
}
// region which should be mapped
int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
//prepare a linear pages array to feed into spi_flash_mmap_pages
int *pages=malloc(sizeof(int)*page_count);
if (pages==NULL) {
return ESP_ERR_NO_MEM;
}
for (int i = 0; i < page_count; i++) {
pages[i] = phys_page+i;
}
ret=spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
free(pages);
return ret;
}
esp_err_t IRAM_ATTR spi_flash_mmap_pages(int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{
esp_err_t ret;
bool did_flush, need_flush = false;
if (!page_count) {
return ESP_ERR_INVALID_ARG;
}
for (int i = 0; i < page_count; i++) {
if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
return ESP_ERR_INVALID_ARG;
}
}
mmap_entry_t* new_entry = (mmap_entry_t*) malloc(sizeof(mmap_entry_t));
if (new_entry == 0) {
return ESP_ERR_NO_MEM;
@ -113,8 +141,12 @@ esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_
spi_flash_disable_interrupts_caches_and_other_cpu();
did_flush = spi_flash_ensure_unmodified_region(src_addr, size);
did_flush = 0;
for (int i = 0; i < page_count; i++) {
if (spi_flash_ensure_unmodified_region(pages[i]*SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE)) {
did_flush = 1;
}
}
spi_flash_mmap_init();
// figure out the memory region where we should look for pages
int region_begin; // first page to check
@ -131,21 +163,21 @@ esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_
region_size = 3 * 64 - region_begin;
region_addr = VADDR1_FIRST_USABLE_ADDR;
}
// region which should be mapped
int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
if (region_size < page_count) {
return ESP_ERR_NO_MEM;
}
// The following part searches for a range of MMU entries which can be used.
// Algorithm is essentially naïve strstr algorithm, except that unused MMU
// entries are treated as wildcards.
int start;
int end = region_begin + region_size - page_count;
for (start = region_begin; start < end; ++start) {
int page = phys_page;
int pageno = 0;
int pos;
for (pos = start; pos < start + page_count; ++pos, ++page) {
for (pos = start; pos < start + page_count; ++pos, ++pageno) {
int table_val = (int) DPORT_PRO_FLASH_MMU_TABLE[pos];
uint8_t refcnt = s_mmap_page_refcnt[pos];
if (refcnt != 0 && table_val != page) {
if (refcnt != 0 && table_val != pages[pageno]) {
break;
}
}
@ -160,17 +192,17 @@ esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_
*out_ptr = NULL;
ret = ESP_ERR_NO_MEM;
} else {
// set up mapping using pages [start, start + page_count)
uint32_t entry_val = (uint32_t) phys_page;
for (int i = start; i != start + page_count; ++i, ++entry_val) {
// set up mapping using pages
uint32_t pageno = 0;
for (int i = start; i != start + page_count; ++i, ++pageno) {
// sanity check: we won't reconfigure entries with non-zero reference count
assert(s_mmap_page_refcnt[i] == 0 ||
(DPORT_PRO_FLASH_MMU_TABLE[i] == entry_val &&
DPORT_APP_FLASH_MMU_TABLE[i] == entry_val));
(DPORT_PRO_FLASH_MMU_TABLE[i] == pages[pageno] &&
DPORT_APP_FLASH_MMU_TABLE[i] == pages[pageno]));
if (s_mmap_page_refcnt[i] == 0) {
if (DPORT_PRO_FLASH_MMU_TABLE[i] != entry_val || DPORT_APP_FLASH_MMU_TABLE[i] != entry_val) {
DPORT_PRO_FLASH_MMU_TABLE[i] = entry_val;
DPORT_APP_FLASH_MMU_TABLE[i] = entry_val;
if (DPORT_PRO_FLASH_MMU_TABLE[i] != pages[pageno] || DPORT_APP_FLASH_MMU_TABLE[i] != pages[pageno]) {
DPORT_PRO_FLASH_MMU_TABLE[i] = pages[pageno];
DPORT_APP_FLASH_MMU_TABLE[i] = pages[pageno];
need_flush = true;
}
}

Wyświetl plik

@ -176,6 +176,29 @@ typedef uint32_t spi_flash_mmap_handle_t;
esp_err_t spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
const void** out_ptr, spi_flash_mmap_handle_t* out_handle);
/**
* @brief Map sequences of pages of flash memory into data or instruction address space
*
* This function allocates sufficient number of 64k MMU pages and configures
* them to map the indicated pages of flash memory contiguously into data address
* space or into instruction address space. In this respect, it works in a similar
* way as spi_flash_mmap but it allows mapping a (maybe non-contiguous) set of pages
* into a contiguous region of memory.
*
* @param pages An array of numbers indicating the 64K pages in flash to be mapped
* contiguously into memory. These indicate the indexes of the 64K pages,
* not the byte-size addresses as used in other functions.
* @param pagecount Size of the pages array
* @param memory Memory space where the region should be mapped
* @param out_ptr Output, pointer to the mapped memory region
* @param out_handle Output, handle which should be used for spi_flash_munmap call
*
* @return ESP_OK on success, ESP_ERR_NO_MEM if pages can not be allocated
*/
esp_err_t spi_flash_mmap_pages(int *pages, size_t pagecount, spi_flash_mmap_memory_t memory,
const void** out_ptr, spi_flash_mmap_handle_t* out_handle);
/**
* @brief Release region previously obtained using spi_flash_mmap
*

Wyświetl plik

@ -184,6 +184,49 @@ TEST_CASE("Can mmap into instruction address space", "[mmap]")
}
TEST_CASE("Can mmap unordered pages into contiguous memory", "[spi_flash]")
{
int nopages;
int *pages;
int startpage;
setup_mmap_tests();
nopages=(end-start)/SPI_FLASH_MMU_PAGE_SIZE;
pages=alloca(sizeof(int)*nopages);
startpage=start/SPI_FLASH_MMU_PAGE_SIZE;
//make inverse mapping: virt 0 -> page (nopages-1), virt 1 -> page (nopages-2), ...
for (int i=0; i<nopages; i++) {
pages[i]=startpage+(nopages-1)-i;
printf("Offset %x page %d\n", i*0x10000, pages[i]);
}
printf("Attempting mapping of unordered pages to contiguous memory area\n");
spi_flash_mmap_handle_t handle1;
const void *ptr1;
ESP_ERROR_CHECK( spi_flash_mmap_pages(pages, nopages, SPI_FLASH_MMAP_DATA, &ptr1, &handle1) );
printf("mmap_res: handle=%d ptr=%p\n", handle1, ptr1);
spi_flash_mmap_dump();
srand(0);
const uint32_t *data = (const uint32_t *) ptr1;
for (int block = 0; block < nopages; ++block) {
for (int sector = 0; sector < 16; ++sector) {
for (uint32_t word = 0; word < 1024; ++word) {
TEST_ASSERT_EQUAL_UINT32(rand(), data[(((nopages-1)-block) * 16 + sector) * 1024 + word]);
}
}
}
printf("Unmapping handle1\n");
spi_flash_munmap(handle1);
spi_flash_mmap_dump();
}
TEST_CASE("flash_mmap invalidates just-written data", "[spi_flash]")
{
const void *ptr1;