himem: reverse error check logic in himem

pull/7506/head
Marius Vikhammer 2021-08-31 08:46:27 +08:00
rodzic 9c666875dc
commit 2e0cab6f94
1 zmienionych plików z 18 dodań i 18 usunięć

Wyświetl plik

@ -133,10 +133,10 @@ void __attribute__((constructor)) esp_himem_init(void)
if (SPIRAM_BANKSWITCH_RESERVE == 0) return;
int maxram=esp_spiram_get_size();
//catch double init
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, , TAG, "already initialized"); //Looks weird; last arg is empty so it expands to 'return ;'
ESP_RETURN_ON_FALSE(s_range_descriptor != NULL, , TAG, "already initialized");
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL, , TAG, "already initialized"); //Looks weird; last arg is empty so it expands to 'return ;'
ESP_RETURN_ON_FALSE(s_range_descriptor == NULL, , TAG, "already initialized");
//need to have some reserved banks
ESP_RETURN_ON_FALSE(SPIRAM_BANKSWITCH_RESERVE == 0, , TAG, "No banks reserved for himem");
ESP_RETURN_ON_FALSE(SPIRAM_BANKSWITCH_RESERVE != 0, , TAG, "No banks reserved for himem");
//Start and end of physical reserved memory. Note it starts slightly under
//the 4MiB mark as the reserved banks can't have an unity mapping to be used by malloc
//anymore; we treat them as himem instead.
@ -218,7 +218,7 @@ esp_err_t esp_himem_free(esp_himem_handle_t handle)
//Check if any of the blocks is still mapped; fail if this is the case.
for (int i = 0; i < handle->block_ct; i++) {
assert(ramblock_idx_valid(handle->block[i]));
ESP_RETURN_ON_FALSE(s_ram_descriptor[handle->block[i]].is_mapped, ESP_ERR_INVALID_ARG, TAG, "block in range still mapped");
ESP_RETURN_ON_FALSE(!s_ram_descriptor[handle->block[i]].is_mapped, ESP_ERR_INVALID_ARG, TAG, "block in range still mapped");
}
//Mark blocks as free
portENTER_CRITICAL(&spinlock);
@ -236,8 +236,8 @@ esp_err_t esp_himem_free(esp_himem_handle_t handle)
esp_err_t esp_himem_alloc_map_range(size_t size, esp_himem_rangehandle_t *handle_out)
{
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
ESP_RETURN_ON_FALSE(size % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_SIZE, TAG, "requested size not aligned to blocksize");
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
ESP_RETURN_ON_FALSE(size % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_SIZE, TAG, "requested size not aligned to blocksize");
int blocks = size / CACHE_BLOCKSIZE;
esp_himem_rangedata_t *r = calloc(sizeof(esp_himem_rangedata_t), 1);
if (!r) {
@ -279,7 +279,7 @@ esp_err_t esp_himem_free_map_range(esp_himem_rangehandle_t handle)
for (int i = 0; i < handle->block_ct; i++) {
assert(rangeblock_idx_valid(handle->block_start + i));
assert(s_range_descriptor[i + handle->block_start].is_alloced == 1); //should be, if handle is valid
ESP_RETURN_ON_FALSE(s_range_descriptor[i + handle->block_start].is_mapped, ESP_ERR_INVALID_ARG, TAG, "memory still mapped to range");
ESP_RETURN_ON_FALSE(!s_range_descriptor[i + handle->block_start].is_mapped, ESP_ERR_INVALID_ARG, TAG, "memory still mapped to range");
}
//We should be good to free this. Mark blocks as free.
portENTER_CRITICAL(&spinlock);
@ -297,19 +297,19 @@ esp_err_t esp_himem_map(esp_himem_handle_t handle, esp_himem_rangehandle_t range
int ram_block = ram_offset / CACHE_BLOCKSIZE;
int range_block = range_offset / CACHE_BLOCKSIZE;
int blockcount = len / CACHE_BLOCKSIZE;
ESP_RETURN_ON_FALSE(s_ram_descriptor == NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
ESP_RETURN_ON_FALSE(s_ram_descriptor != NULL, ESP_ERR_INVALID_STATE, TAG, "Himem not available!");
//Offsets and length must be block-aligned
ESP_RETURN_ON_FALSE(ram_offset % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "ram offset not aligned to blocksize");
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "range not aligned to blocksize");
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "length not aligned to blocksize");
ESP_RETURN_ON_FALSE(ram_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "ram offset not aligned to blocksize");
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "range not aligned to blocksize");
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "length not aligned to blocksize");
//ram and range should be within allocated range
ESP_RETURN_ON_FALSE(ram_block + blockcount > handle->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of phys ram handle");
ESP_RETURN_ON_FALSE(range_block + blockcount > range->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of range handle");
ESP_RETURN_ON_FALSE(ram_block + blockcount <= handle->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of phys ram handle");
ESP_RETURN_ON_FALSE(range_block + blockcount <= range->block_ct, ESP_ERR_INVALID_SIZE, TAG, "args not in range of range handle");
//Check if ram blocks aren't already mapped, and if memory range is unmapped
for (int i = 0; i < blockcount; i++) {
ESP_RETURN_ON_FALSE(s_ram_descriptor[handle->block[i + ram_block]].is_mapped, ESP_ERR_INVALID_STATE, TAG, "ram already mapped");
ESP_RETURN_ON_FALSE(s_range_descriptor[range->block_start + i + range_block].is_mapped, ESP_ERR_INVALID_STATE, TAG, "range already mapped");
ESP_RETURN_ON_FALSE(!s_ram_descriptor[handle->block[i + ram_block]].is_mapped, ESP_ERR_INVALID_STATE, TAG, "ram already mapped");
ESP_RETURN_ON_FALSE(!s_range_descriptor[range->block_start + i + range_block].is_mapped, ESP_ERR_INVALID_STATE, TAG, "range already mapped");
}
//Map and mark as mapped
@ -338,9 +338,9 @@ esp_err_t esp_himem_unmap(esp_himem_rangehandle_t range, void *ptr, size_t len)
int range_offset = (uint32_t)ptr - VIRT_HIMEM_RANGE_START;
int range_block = (range_offset / CACHE_BLOCKSIZE) - range->block_start;
int blockcount = len / CACHE_BLOCKSIZE;
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "range offset not block-aligned");
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE != 0, ESP_ERR_INVALID_ARG, TAG, "map length not block-aligned");
ESP_RETURN_ON_FALSE(range_block + blockcount > range->block_ct, ESP_ERR_INVALID_ARG, TAG, "range out of bounds for handle");
ESP_RETURN_ON_FALSE(range_offset % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "range offset not block-aligned");
ESP_RETURN_ON_FALSE(len % CACHE_BLOCKSIZE == 0, ESP_ERR_INVALID_ARG, TAG, "map length not block-aligned");
ESP_RETURN_ON_FALSE(range_block + blockcount <= range->block_ct, ESP_ERR_INVALID_ARG, TAG, "range out of bounds for handle");
portENTER_CRITICAL(&spinlock);
for (int i = 0; i < blockcount; i++) {