Merge branch 'bugfix/spi_flash_mmap_stale_data_issue' into 'master'

spi_flash: fix stale read issue for memory mapped partition

See merge request idf/esp-idf!4437
pull/3137/head
Angus Gratton 2019-03-14 14:56:12 +08:00
commit 7c69f6172b
5 zmienionych plików z 93 dodań i 102 usunięć

Wyświetl plik

@ -48,12 +48,10 @@ void spi_flash_disable_interrupts_caches_and_other_cpu_no_os();
// This function is implied to be called when other CPU is not running or running code from IRAM.
void spi_flash_enable_interrupts_caches_no_os();
// Mark the pages containing a flash region as having been
// erased or written to. This means the flash cache needs
// to be evicted before these pages can be flash_mmap()ed again,
// as they may contain stale data
//
// Flushes cache if address range has corresponding valid cache mappings
// Recommended to use post flash program operation (erase or write)
// Only call this while holding spi_flash_op_lock()
void spi_flash_mark_modified_region(uint32_t start_addr, uint32_t length);
// Returns true if cache was flushed, false otherwise
bool spi_flash_check_and_flush_cache(uint32_t start_addr, uint32_t length);
#endif //ESP_SPI_FLASH_CACHE_UTILS_H

Wyświetl plik

@ -47,19 +47,6 @@
#define VADDR1_FIRST_USABLE_ADDR 0x400D0000
#define PRO_IRAM0_FIRST_USABLE_PAGE ((VADDR1_FIRST_USABLE_ADDR - VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + 64)
/* Ensure pages in a region haven't been marked as written via
spi_flash_mark_modified_region(). If the page has
been written, flush the entire flash cache before returning.
This ensures stale cache entries are never read after fresh calls
to spi_flash_mmap(), while keeping the number of cache flushes to a
minimum.
Returns true if cache was flushed.
*/
static bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length);
typedef struct mmap_entry_{
uint32_t handle;
int page;
@ -144,7 +131,7 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{
esp_err_t ret;
bool did_flush, need_flush = false;
bool need_flush = false;
if (!page_count) {
return ESP_ERR_INVALID_ARG;
}
@ -163,12 +150,6 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
spi_flash_disable_interrupts_caches_and_other_cpu();
did_flush = 0;
for (int i = 0; i < page_count; i++) {
if (spi_flash_ensure_unmodified_region(pages[i]*SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE)) {
did_flush = 1;
}
}
spi_flash_mmap_init();
// figure out the memory region where we should look for pages
int region_begin; // first page to check
@ -243,7 +224,7 @@ esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, sp
Working on a long term fix that doesn't require invalidating
entire cache.
*/
if (!did_flush && need_flush) {
if (need_flush) {
#if CONFIG_SPIRAM_SUPPORT
esp_spiram_writeback_cache();
#endif
@ -338,71 +319,6 @@ uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
return count;
}
/* 256-bit (up to 16MB of 64KB pages) bitset of all flash pages
that have been written to since last cache flush.
Before mmaping a page, need to flush caches if that page has been
written to.
Note: It's possible to do some additional performance tweaks to
this algorithm, as we actually only need to flush caches if a page
was first mmapped, then written to, then is about to be mmaped a
second time. This is a fair bit more complex though, so unless
there's an access pattern that this would significantly boost then
it's probably not worth it.
*/
static uint32_t written_pages[256/32];
static bool update_written_pages(size_t start_addr, size_t length, bool mark);
void IRAM_ATTR spi_flash_mark_modified_region(size_t start_addr, size_t length)
{
update_written_pages(start_addr, length, true);
}
static IRAM_ATTR bool spi_flash_ensure_unmodified_region(size_t start_addr, size_t length)
{
return update_written_pages(start_addr, length, false);
}
/* generic implementation for the previous two functions */
static inline IRAM_ATTR bool update_written_pages(size_t start_addr, size_t length, bool mark)
{
/* align start_addr & length to full MMU pages */
uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
length += (start_addr - page_start_addr);
length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
int page = addr / SPI_FLASH_MMU_PAGE_SIZE;
if (page >= 256) {
return false; /* invalid address */
}
int idx = page / 32;
uint32_t bit = 1 << (page % 32);
if (mark) {
written_pages[idx] |= bit;
} else if (written_pages[idx] & bit) {
/* it is tempting to write a version of this that only
flushes each CPU's cache as needed. However this is
tricky because mmaped memory can be used on un-pinned
cores, or the pointer passed between CPUs.
*/
#if CONFIG_SPIRAM_SUPPORT
esp_spiram_writeback_cache();
#endif
Cache_Flush(0);
#ifndef CONFIG_FREERTOS_UNICORE
Cache_Flush(1);
#endif
bzero(written_pages, sizeof(written_pages));
return true;
}
}
return false;
}
uint32_t spi_flash_cache2phys(const void *cached)
{
intptr_t c = (intptr_t)cached;
@ -464,3 +380,55 @@ const void *IRAM_ATTR spi_flash_phys2cache(uint32_t phys_offs, spi_flash_mmap_me
spi_flash_enable_interrupts_caches_and_other_cpu();
return NULL;
}
static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_page)
{
int start[2], end[2];
/* SPI_FLASH_MMAP_DATA */
start[0] = 0;
end[0] = 64;
/* SPI_FLASH_MMAP_INST */
start[1] = PRO_IRAM0_FIRST_USABLE_PAGE;
end[1] = 256;
DPORT_INTERRUPT_DISABLE();
for (int j = 0; j < 2; j++) {
for (int i = start[j]; i < end[j]; i++) {
if (DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]) == phys_page) {
DPORT_INTERRUPT_RESTORE();
return true;
}
}
}
DPORT_INTERRUPT_RESTORE();
return false;
}
/* Validates if given flash address has corresponding cache mapping, if yes, flushes cache memories */
IRAM_ATTR bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length)
{
/* align start_addr & length to full MMU pages */
uint32_t page_start_addr = start_addr & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
length += (start_addr - page_start_addr);
length = (length + SPI_FLASH_MMU_PAGE_SIZE - 1) & ~(SPI_FLASH_MMU_PAGE_SIZE-1);
for (uint32_t addr = page_start_addr; addr < page_start_addr + length; addr += SPI_FLASH_MMU_PAGE_SIZE) {
uint32_t page = addr / SPI_FLASH_MMU_PAGE_SIZE;
if (page >= 256) {
return false; /* invalid address */
}
if (is_page_mapped_in_cache(page)) {
#if CONFIG_SPIRAM_SUPPORT
esp_spiram_writeback_cache();
#endif
Cache_Flush(0);
#ifndef CONFIG_FREERTOS_UNICORE
Cache_Flush(1);
#endif
return true;
}
}
return false;
}

Wyświetl plik

@ -238,6 +238,11 @@ esp_err_t IRAM_ATTR spi_flash_erase_range(uint32_t start_addr, uint32_t size)
}
}
COUNTER_STOP(erase);
spi_flash_guard_start();
spi_flash_check_and_flush_cache(start_addr, size);
spi_flash_guard_end();
return spi_flash_translate_rc(rc);
}
@ -404,9 +409,9 @@ esp_err_t IRAM_ATTR spi_flash_write(size_t dst, const void *srcv, size_t size)
out:
COUNTER_STOP(write);
spi_flash_guard_op_lock();
spi_flash_mark_modified_region(dst, size);
spi_flash_guard_op_unlock();
spi_flash_guard_start();
spi_flash_check_and_flush_cache(dst, size);
spi_flash_guard_end();
return spi_flash_translate_rc(rc);
}
@ -470,9 +475,9 @@ esp_err_t IRAM_ATTR spi_flash_write_encrypted(size_t dest_addr, const void *src,
COUNTER_ADD_BYTES(write, size);
COUNTER_STOP(write);
spi_flash_guard_op_lock();
spi_flash_mark_modified_region(dest_addr, size);
spi_flash_guard_op_unlock();
spi_flash_guard_start();
spi_flash_check_and_flush_cache(dest_addr, size);
spi_flash_guard_end();
return spi_flash_translate_rc(rc);
}

Wyświetl plik

@ -11,9 +11,9 @@ void spi_flash_init(const char* chip_size, size_t block_size, size_t sector_size
_spi_flash_init(chip_size, block_size, sector_size, page_size, partition_bin);
}
void spi_flash_mark_modified_region(size_t start_addr, size_t length)
bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length)
{
return;
return true;
}
esp_rom_spiflash_result_t esp_rom_spiflash_unlock()
@ -54,4 +54,4 @@ void spi_flash_disable_interrupts_caches_and_other_cpu_no_os()
void spi_flash_enable_interrupts_caches_no_os()
{
return;
}
}

Wyświetl plik

@ -68,7 +68,6 @@ static void setup_mmap_tests()
}
/* Only rewrite the sector if it has changed */
if (sector_needs_write) {
printf("setup_mmap_tests(): Prepping sector %d\n", abs_sector);
ESP_ERROR_CHECK( spi_flash_erase_sector((uint16_t) abs_sector) );
ESP_ERROR_CHECK( spi_flash_write(sector_offs, (const uint8_t *) buffer, sizeof(buffer)) );
}
@ -402,3 +401,24 @@ TEST_CASE("munmap followed by mmap flushes cache", "[spi_flash]")
TEST_ASSERT_NOT_EQUAL(0, memcmp(buf, data, sizeof(buf)));
}
TEST_CASE("no stale data read post mmap and write partition", "[spi_flash]")
{
const char buf[] = "Test buffer data for partition";
char read_data[sizeof(buf)];
setup_mmap_tests();
const esp_partition_t *p = get_test_data_partition();
const uint32_t* data;
spi_flash_mmap_handle_t handle;
TEST_ESP_OK(esp_partition_mmap(p, 0, SPI_FLASH_MMU_PAGE_SIZE,
SPI_FLASH_MMAP_DATA, (const void **) &data, &handle) );
memcpy(read_data, data, sizeof(read_data));
TEST_ESP_OK(esp_partition_erase_range(p, 0, SPI_FLASH_MMU_PAGE_SIZE));
TEST_ESP_OK(esp_partition_write(p, 0, buf, sizeof(buf)));
/* This should retrigger actual flash content read */
memcpy(read_data, data, sizeof(read_data));
spi_flash_munmap(handle);
TEST_ASSERT_EQUAL(0, memcmp(buf, read_data, sizeof(buf)));
}