freertos: Added partial unit tests for Linux simulator

* The unit tests are derived from the FreeRTOS test app
  in components/freertos/test_apps/freertos. They are
  quite incompatible with the main test application, which
  is why they have been placed under
  tools/test_apps/linux_compatible/linux_freertos for now.
pull/11283/head
Jakob Hasse 2023-04-21 11:16:28 +08:00
rodzic 550ecbe37a
commit d6fe302904
23 zmienionych plików z 1990 dodań i 0 usunięć

Wyświetl plik

@ -19,6 +19,10 @@ tools/test_apps/linux_compatible/hello_world_linux_compatible:
enable:
- if: INCLUDE_DEFAULT == 1 or IDF_TARGET == "linux"
tools/test_apps/linux_compatible/linux_freertos:
enable:
- if: IDF_TARGET == "linux"
tools/test_apps/linux_compatible/rmt_mock_build_test:
enable:
- if: IDF_TARGET == "linux"

Wyświetl plik

@ -0,0 +1,9 @@
# For more information about build system see
# https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/build-system.html
# The following five lines of boilerplate have to be in your project's
# CMakeLists in this exact order for cmake to work correctly
cmake_minimum_required(VERSION 3.16)
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
set(COMPONENTS main)
project(linux_freertos)

Wyświetl plik

@ -0,0 +1,26 @@
| Supported Targets | Linux |
| ----------------- | ----- |
# Simple test application for [SMP Linux](TODO) port
Note that the IDF version of the FreeRTOS POSIX/Linux simulator is not tested here, that one is merely the upstream version.
## Build
```
idf.py --preview set-target linux
```
Amazon FReeRTOS SMP configuration is already set via `sdkconfig.defaults`, no need to configure.
```
idf.py build
```
## Run
```
idf.py monitor
```
After the test output, input: `![ignore]` to not run the ignored test

Wyświetl plik

@ -0,0 +1,25 @@
# Register all of the "kernel" tests as a component
# For refactored FreeRTOS unit tests, we need to support #include "xxx.h" of FreeRTOS headers
idf_component_get_property(FREERTOS_ORIG_INCLUDE_PATH freertos ORIG_INCLUDE_PATH)
set(src_dirs
"." # For freertos_test_utils.c
"tasks"
"queue"
"port"
"stream_buffer"
"timers")
set(priv_include_dirs
"." # For portTestMacro.h
"${FREERTOS_ORIG_INCLUDE_PATH}") # FreeRTOS headers via`#include "xxx.h"`
idf_component_register(SRC_DIRS ${src_dirs}
PRIV_INCLUDE_DIRS ${priv_include_dirs}
PRIV_REQUIRES unity
WHOLE_ARCHIVE)
target_compile_options(${COMPONENT_LIB} PRIVATE
-Wno-pointer-to-int-cast
)

Wyświetl plik

@ -0,0 +1,21 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/semphr.h"
#include "freertos_test_utils.h"
#include <time.h>
uint64_t ref_clock_get(void)
{
struct timespec current_time;
assert(clock_gettime(CLOCK_MONOTONIC, &current_time) == 0);
uint64_t ref_ticks = current_time.tv_sec * 1000000;
ref_ticks += (current_time.tv_nsec / 1000);
return ref_ticks;
}

Wyświetl plik

@ -0,0 +1,12 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdint.h>
#include "freertos/FreeRTOS.h"
uint64_t ref_clock_get(void);

Wyświetl plik

@ -0,0 +1,162 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#if CONFIG_FREERTOS_SMP
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
/*
Test TLSP Deletion Callbacks
Purpose:
- Test that TLSP Deletion Callbacks can be registered
- Test that TLSP Deletion Callbacks are called when a task is deleted
Procedure:
- Create a task on each core along with an array of integers to act as TLSP data
- Each task should initialize their integers to a particular value (i.e., the index value)
- Each task should register those integers as TLSPs along with a deletion callback
- Each task should self delete to trigger the TLSP deletion callback
- The TLSP deletion callback should indicate that it has run by negating the integer values
Expected:
- The TLSP deletion callback should check that the correct TLSP is provided by checking the TLSPs initialization
value (i.e., should be set to the index value)
- After deletion, the integer values should be negated to indicate deletion callback execution
*/
#define NUM_TLSP CONFIG_FREERTOS_THREAD_LOCAL_STORAGE_POINTERS
static void tlsp_del_cb(int index, void *tlsp)
{
int *val = (int *)tlsp;
// Check that the TLSP's initialization value is correct
TEST_ASSERT_EQUAL(index, *val);
// Set the TLSP's value again to a negative value to indicate that the del cb has ran
*val = -*val;
}
static void tlsp_task(void *arg)
{
int *tlsps = (int *)arg;
for (int index = 0; index < NUM_TLSP; index++) {
// Initialize the TLSPs to a positive value
tlsps[index] = index;
// Set TLSPs and deletion callbacks
vTaskSetThreadLocalStoragePointerAndDelCallback(NULL, index, &tlsps[index], tlsp_del_cb);
}
// Self delete to trigger TLSP del cb
vTaskDelete(NULL);
}
TEST_CASE("Test TLSP deletion callbacks", "[freertos]")
{
TaskHandle_t tasks[portNUM_PROCESSORS];
int tlsps[portNUM_PROCESSORS][NUM_TLSP];
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(tlsp_task, "tlsp_tsk", configMINIMAL_STACK_SIZE * 2, (void *)&tlsps[i], CONFIG_UNITY_FREERTOS_PRIORITY - 1, &tasks[i], i));
}
// Significant delay to let tasks run and delete themselves
vTaskDelay(pdMS_TO_TICKS(100));
// Check the values of the TLSPs to see if the del cb have ran
for (int i = 0; i < portNUM_PROCESSORS; i++) {
for (int index = 0; index < NUM_TLSP; index++) {
// Del cb should have set the TLSP to a negative value
TEST_ASSERT_EQUAL(-index, tlsps[i][index]);
}
}
}
#else // CONFIG_FREERTOS_SMP
// Todo: Remove IDF FreeRTOS Test Case
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "unity.h"
/* --------Test backported thread local storage pointer and deletion cb feature----------
* vTaskSetThreadLocalStoragePointerAndDelCallback()
* pvTaskGetThreadLocalStoragePointer(),
*
* This test creates a task and set's the task's TLSPs. The task is then deleted
* which should trigger the deletion cb.
*/
#define NO_OF_TLSP configNUM_THREAD_LOCAL_STORAGE_POINTERS
#define TLSP_SET_BASE 0x0F //0b1111 to be bit shifted by index
#define TLSP_DEL_BASE 0x05 //0b0101 to be bit shifted by index
//The variables pointed to by Thread Local Storage Pointer
static uint32_t task_storage[portNUM_PROCESSORS][NO_OF_TLSP] = {0};
/* If static task cleanup is defined, can't set index 0 even if the calling task is not a pthread,
as the cleanup is called for every task.
*/
#if defined(CONFIG_FREERTOS_ENABLE_STATIC_TASK_CLEAN_UP)
static const int skip_index = 0; /*PTHREAD_TLS_INDEX*/
#else
static const int skip_index = -1;
#endif
static void del_cb(int index, void *ptr)
{
*((uint32_t *)ptr) = (TLSP_DEL_BASE << index); //Indicate deletion by setting task storage element to a unique value
}
static void task_cb(void *arg)
{
int core = xPortGetCoreID();
for(int i = 0; i < NO_OF_TLSP; i++){
if (i == skip_index) {
continue;
}
task_storage[core][i] = (TLSP_SET_BASE << i); //Give each element of task_storage a unique number
vTaskSetThreadLocalStoragePointerAndDelCallback(NULL, i, (void *)&task_storage[core][i], del_cb); //Set each TLSP to point to a task storage element
}
for(int i = 0; i < NO_OF_TLSP; i++){
if (i == skip_index) {
continue;
}
uint32_t * tlsp = (uint32_t *)pvTaskGetThreadLocalStoragePointer(NULL, i);
TEST_ASSERT_EQUAL(*tlsp, (TLSP_SET_BASE << i)); //Check if TLSP points to the correct task storage element by checking unique value
}
vTaskDelete(NULL); //Delete Task to Trigger TSLP deletion callback
}
TEST_CASE("Test FreeRTOS thread local storage pointers and del cb", "[freertos]")
{
//Create Task
for(int core = 0; core < portNUM_PROCESSORS; core++){
xTaskCreatePinnedToCore(task_cb, "task", 1024, NULL, CONFIG_UNITY_FREERTOS_PRIORITY+1, NULL, core);
}
vTaskDelay(10); //Delay long enough for tasks to run to completion
for(int core = 0; core < portNUM_PROCESSORS; core++){
for(int i = 0; i < NO_OF_TLSP; i++){
if (i == skip_index) {
continue;
}
TEST_ASSERT_EQUAL((TLSP_DEL_BASE << i), task_storage[core][i]); //Check del_cb ran by checking task storage for unique value
}
}
}
#endif // CONFIG_FREERTOS_SMP

Wyświetl plik

@ -0,0 +1,18 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "freertos_test_utils.h"
#define configTEST_DEFAULT_STACK_SIZE 4096
#define configTEST_UNITY_TASK_PRIORITY CONFIG_UNITY_FREERTOS_PRIORITY
#define portTEST_REF_CLOCK_TYPE uint64_t
#define portTEST_REF_CLOCK_INIT()
#define portTEST_REF_CLOCK_DEINIT()
#define portTEST_REF_CLOCK_GET_TIME() ((uint64_t) ref_clock_get())
#define portTEST_TICKS_TO_REF_CLOCK(ticks) ((ticks) * (1000000/configTICK_RATE_HZ))

Wyświetl plik

@ -0,0 +1,186 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdio.h>
#include <stdlib.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/queue.h"
#include "freertos/semphr.h"
#include "freertos/idf_additions.h"
#include "unity.h"
#define QUEUE_LEN 4
static void allocate_resources(int num_queues, int queue_len, QueueHandle_t *queue_list_ret, QueueSetHandle_t *queue_set_ret)
{
// Create queue set
*queue_set_ret = xQueueCreateSet(num_queues * queue_len);
TEST_ASSERT_NOT_EQUAL(NULL, *queue_set_ret);
// Create queues and add them to the queue set
for (int i = 0; i < num_queues; i++) {
queue_list_ret[i] = xQueueCreate(queue_len, sizeof(BaseType_t));
TEST_ASSERT_NOT_EQUAL(NULL, queue_list_ret[i]);
TEST_ASSERT_EQUAL(pdPASS, xQueueAddToSet(queue_list_ret[i], *queue_set_ret));
}
}
static void free_resources(int num_queues, QueueHandle_t *queue_list, QueueSetHandle_t queue_set)
{
// Remove queues form queue set and delete the queues
for (int i = 0; i < num_queues; i++) {
TEST_ASSERT_EQUAL(pdPASS, xQueueRemoveFromSet(queue_list[i], queue_set));
vQueueDelete(queue_list[i]);
}
vQueueDelete(queue_set);
}
/*
Test queue sets basic
Purpose:
- Test that queue set works as expected
Procedure:
- Create NUM_QUEUES queues and add them to the same queue set
- Fill each queue sequentially with QUEUE_LEN items
Expected:
- Each call to xQueueSend() should generate a member in the queue set
- The order of the members should match the order in which xQueueSend() was called
- The item sent by the xQueueSend() is correct when read
*/
#define NUM_QUEUES 5
TEST_CASE("Test Queue sets", "[freertos]")
{
// Create queues and queue set
QueueHandle_t queues[NUM_QUEUES];
QueueSetHandle_t queue_set;
allocate_resources(NUM_QUEUES, QUEUE_LEN, queues, &queue_set);
// Fill each queue sequentially with QUEUE_LEN items
for (int i = 0; i < NUM_QUEUES; i++) {
for (int j = 0; j < QUEUE_LEN; j++) {
BaseType_t item = j;
TEST_ASSERT_EQUAL(pdTRUE, xQueueSend(queues[i], &item, 0));
}
}
for (int i = 0; i < NUM_QUEUES; i++) {
for (int j = 0; j < QUEUE_LEN; j++) {
// Check the queue set member
QueueSetMemberHandle_t member = xQueueSelectFromSet(queue_set, 0);
TEST_ASSERT_EQUAL(queues[i], member);
// Check the queue's items
BaseType_t item;
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(member, &item, 0));
TEST_ASSERT_EQUAL(j, item);
}
}
// Check that there are no more members
TEST_ASSERT_EQUAL(NULL, xQueueSelectFromSet(queue_set, 0));
// Cleanup queues and queue set
free_resources(NUM_QUEUES, queues, queue_set);
}
#ifndef CONFIG_FREERTOS_UNICORE
/*
Test queue set SMP thread safety
Purpose:
- Test that queue set works when being used from different cores simultaneously
Procedure:
- Create a queue for each core and add them to the same queue set
- Create a task on each core to send QUEUE_LEN items to their assigned queue
- Synchronize the tasks so that the start sending at the same time
Expected:
- Each call to xQueueSend() should generate a member in the queue set
- The item sent by the xQueueSend() is correct when read
*/
static volatile bool start_other_cores;
static SemaphoreHandle_t done_sem = NULL;
static void send_func(void *arg)
{
QueueHandle_t queue = (QueueHandle_t)arg;
BaseType_t core_id = xPortGetCoreID();
if (core_id == 0) {
// We are core 0. Trigger the other cores to start
start_other_cores = true;
} else {
// Wait to be started by main core
while (!start_other_cores) {
;
}
}
// Fill the queue assigned to the current core
for (int i = 0; i < QUEUE_LEN; i++) {
TEST_ASSERT_EQUAL(pdTRUE, xQueueSend(queue, &core_id, 0));
}
if (core_id != 0) {
// Indicate completion to core 0 and self delete
xSemaphoreGive(done_sem);
vTaskDelete(NULL);
}
}
TEST_CASE("Test queue sets multi-core", "[freertos]")
{
// Create done semaphore
done_sem = xSemaphoreCreateCounting(portNUM_PROCESSORS - 1, 0);
TEST_ASSERT_NOT_EQUAL(NULL, done_sem);
// Create queues and queue set
QueueHandle_t queues[portNUM_PROCESSORS];
QueueSetHandle_t queue_set;
allocate_resources(portNUM_PROCESSORS, QUEUE_LEN, queues, &queue_set);
// Create tasks of the same priority for all cores except for core 0
for (int i = 1; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(send_func, "send", 2048, (void *)queues[i], CONFIG_UNITY_FREERTOS_PRIORITY, NULL, i));
}
// Core 0 calls send_func as well triggering the simultaneous sends from all cores
send_func((void *)queues[0]);
// Wait for all other cores to be done
for (int i = 1; i < portNUM_PROCESSORS; i++) {
xSemaphoreTake(done_sem, portMAX_DELAY);
}
// Read queues from the queue set, then read an item from the queue
uint32_t queues_check_count[portNUM_PROCESSORS] = {0};
QueueSetMemberHandle_t member = xQueueSelectFromSet(queue_set, 0);
while (member != NULL) {
// Read the core ID from the queue, check that core ID is sane
BaseType_t core_id;
TEST_ASSERT_EQUAL(pdTRUE, xQueueReceive(member, &core_id, 0));
TEST_ASSERT_LESS_THAN(portNUM_PROCESSORS, core_id);
queues_check_count[core_id]++;
// Get next member
member = xQueueSelectFromSet(queue_set, 0);
}
// Check that all items from all queues have been read
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(QUEUE_LEN, queues_check_count[i]);
}
// Cleanup queues and queue set
free_resources(portNUM_PROCESSORS, queues, queue_set);
// Cleanup done sem
vSemaphoreDelete(done_sem);
done_sem = NULL;
}
#endif // CONFIG_FREERTOS_UNICORE

Wyświetl plik

@ -0,0 +1,73 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/stream_buffer.h"
#include "unity.h"
#define TEST_NUM_BYTES 100
#define TEST_RECEIVER_TIMEOUT_TICKS pdMS_TO_TICKS(1000) // 1ms timeout for receiver task
typedef struct {
StreamBufferHandle_t stream_buffer;
SemaphoreHandle_t done_sem;
} test_args_t;
static void sender_task(void *arg)
{
test_args_t *test_args = (test_args_t *)arg;
printf("Starting sender task... \n");
for (int i = 0; i < TEST_NUM_BYTES; i++) {
// Send a single byte, with the byte's value being the number of bytes sent thus far
uint8_t data = (uint8_t)i;
TEST_ASSERT_EQUAL(1, xStreamBufferSend(test_args->stream_buffer, &data, 1, 0));
// Short delay to give a chance for receiver task to receive
vTaskDelay(1);
}
xSemaphoreGive(test_args->done_sem);
vTaskDelete(NULL);
}
static void receiver_task(void *arg)
{
test_args_t *test_args = (test_args_t *)arg;
printf("Starting receiver task... \n");
for (int i = 0; i < TEST_NUM_BYTES; i++) {
// Receive a single byte. The received byte's value being the number of bytes sent/received thus far
uint8_t data;
TEST_ASSERT_EQUAL(1, xStreamBufferReceive(test_args->stream_buffer, &data, 1, TEST_RECEIVER_TIMEOUT_TICKS));
TEST_ASSERT_EQUAL(i, data);
}
xSemaphoreGive(test_args->done_sem);
vTaskDelete(NULL);
}
TEST_CASE("Stream Buffer: Send-receive tasks", "[freertos]")
{
test_args_t test_args;
test_args.stream_buffer = xStreamBufferCreate(TEST_NUM_BYTES, 1);
test_args.done_sem = xSemaphoreCreateCounting(2, 0);
TEST_ASSERT_NOT_NULL(test_args.stream_buffer);
TEST_ASSERT_NOT_NULL(test_args.done_sem);
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(sender_task, "sender", 4096, &test_args, CONFIG_UNITY_FREERTOS_PRIORITY + 2, NULL, 0));
TEST_ASSERT_EQUAL(pdTRUE, xTaskCreatePinnedToCore(receiver_task, "receiver", 4096, &test_args, CONFIG_UNITY_FREERTOS_PRIORITY + 1, NULL, 1));
// Wait for both tasks to complete
for (int i = 0; i < 2; i++) {
xSemaphoreTake(test_args.done_sem, portMAX_DELAY);
}
vStreamBufferDelete(test_args.stream_buffer);
vSemaphoreDelete(test_args.done_sem);
}

Wyświetl plik

@ -0,0 +1,91 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
/*
Test eTaskGetState()
Purpose:
- Test that eTaskGetState() returns the correct state for a particular task
Procedure:
- Create tasks in every state (and repeat for each core)
- Note: eDeleted is not tested due to needing to control when the idle tasks run
- Call eTaskGetState() on each created task
Expected:
- eTaskGetState() should return the correct state for each created task
*/
static void blocked_task(void *arg)
{
vTaskDelay(portMAX_DELAY-1);
// Shouldn't need to self delete, but added for extra safety
vTaskDelete(NULL);
}
static void suspended_task(void *arg)
{
vTaskSuspend(NULL);
// Shouldn't need to self delete, but added for extra safety
vTaskDelete(NULL);
}
static void loop_task(void *arg)
{
// Short delay to allow other created tasks to run
vTaskDelay(2);
while (1) {
;
}
}
TEST_CASE("Test eTaskGetState", "[freertos]")
{
TaskHandle_t blocked_tasks[portNUM_PROCESSORS];
TaskHandle_t suspended_tasks[portNUM_PROCESSORS];
TaskHandle_t ready_tasks[portNUM_PROCESSORS];
TaskHandle_t running_tasks[portNUM_PROCESSORS];
// Create tasks of each state on each core
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(blocked_task, "blkd", configMINIMAL_STACK_SIZE * 2, NULL, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &blocked_tasks[i], i));
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(suspended_task, "susp", configMINIMAL_STACK_SIZE * 2, NULL, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &suspended_tasks[i], i));
TEST_ASSERT_EQUAL(pdPASS, xTaskCreatePinnedToCore(loop_task, "rdy", configMINIMAL_STACK_SIZE * 2, NULL, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &ready_tasks[i], i));
if (i == CONFIG_UNITY_FREERTOS_CPU) {
running_tasks[i] = xTaskGetCurrentTaskHandle();
} else {
xTaskCreatePinnedToCore(loop_task, "run", configMINIMAL_STACK_SIZE * 2, NULL, CONFIG_UNITY_FREERTOS_PRIORITY, &running_tasks[i], i);
}
}
// Short delay to allow created tasks to run
vTaskDelay(10);
// Check the state of the created tasks
for (int i = 0; i < portNUM_PROCESSORS; i++) {
TEST_ASSERT_EQUAL(eBlocked, eTaskGetState(blocked_tasks[i]));
TEST_ASSERT_EQUAL(eSuspended, eTaskGetState(suspended_tasks[i]));
TEST_ASSERT_EQUAL(eReady, eTaskGetState(ready_tasks[i]));
TEST_ASSERT_EQUAL(eRunning, eTaskGetState(running_tasks[i]));
}
// Clean up created tasks
for (int i = 0; i < portNUM_PROCESSORS; i++) {
vTaskDelete(blocked_tasks[i]);
vTaskDelete(suspended_tasks[i]);
vTaskDelete(ready_tasks[i]);
if (i != CONFIG_UNITY_FREERTOS_CPU) {
vTaskDelete(running_tasks[i]);
}
}
// Short delay to allow task memory to be cleaned
vTaskDelay(10);
}

Wyświetl plik

@ -0,0 +1,105 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Test backported deletion behavior by creating tasks of various affinities and
* check if the task memory is freed immediately under the correct conditions.
*
* The behavior of vTaskDelete() results in the immediate freeing of task memory
* and the immediate execution of deletion callbacks for tasks which are not
* running, provided they are not pinned to the other core (due to FPU cleanup
* requirements).
*
* If the condition is not met, freeing of task memory and execution of
* deletion callbacks will still be carried out by the Idle Task.
*/
#include <stdio.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "esp_heap_caps.h"
#include "unity.h"
#include "esp_rom_sys.h"
#define NO_OF_TSKS 3
#define DELAY_TICKS 2
/* Caps of all memory which is allocated from when a task is created */
#define HEAP_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
#define DELAY_US_ITERATIONS 1000
typedef struct {
SemaphoreHandle_t sem;
volatile bool deleted; // Check the deleted task doesn't keep running after being deleted
} tsk_blocks_param_t;
/* Task blocks as often as possible
(two or more of these can share the same semaphore and "juggle" it around)
*/
static void tsk_blocks_frequently(void *param)
{
tsk_blocks_param_t *p = (tsk_blocks_param_t *)param;
SemaphoreHandle_t sem = p->sem;
srand(xTaskGetTickCount() ^ (int)xTaskGetCurrentTaskHandle());
while (1) {
assert(!p->deleted);
esp_rom_delay_us(rand() % 10);
assert(!p->deleted);
xSemaphoreTake(sem, portMAX_DELAY);
assert(!p->deleted);
esp_rom_delay_us(rand() % 10);
assert(!p->deleted);
xSemaphoreGive(sem);
}
}
TEST_CASE("FreeRTOS Delete Blocked Tasks", "[freertos][ignore]") // TODO: esp_rom_delay_us is interrupted by signal
{
TaskHandle_t blocking_tasks[portNUM_PROCESSORS + 1]; // one per CPU, plus one unpinned task
tsk_blocks_param_t params[portNUM_PROCESSORS + 1] = { 0 };
esp_rom_delay_us(100);
unsigned before = heap_caps_get_free_size(MALLOC_CAP_8BIT);
printf("Free memory at start %u\n", before);
/* Any bugs will depend on relative timing of destroying the tasks, so create & delete many times.
Stop early if it looks like some resources have not been properly cleaned up.
(1000 iterations takes about 9 seconds on ESP32 dual core)
*/
for(unsigned iter = 0; iter < 1000; iter++) {
// Create everything
SemaphoreHandle_t sem = xSemaphoreCreateMutex();
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
params[i].deleted = false;
params[i].sem = sem;
TEST_ASSERT_EQUAL(pdTRUE,
xTaskCreatePinnedToCore(tsk_blocks_frequently, "tsk_block", 4096, &params[i],
CONFIG_UNITY_FREERTOS_PRIORITY - 1, &blocking_tasks[i],
i < portNUM_PROCESSORS ? i : tskNO_AFFINITY));
}
vTaskDelay(5); // Let the tasks juggle the mutex for a bit
for(unsigned i = 0; i < portNUM_PROCESSORS + 1; i++) {
vTaskDelete(blocking_tasks[i]);
params[i].deleted = true;
}
vTaskDelay(4); // Yield to the idle task for cleanup
vSemaphoreDelete(sem);
// Check we haven't leaked resources yet
TEST_ASSERT_GREATER_OR_EQUAL(before - 256, heap_caps_get_free_size(MALLOC_CAP_8BIT));
}
}

Wyświetl plik

@ -0,0 +1,81 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
Unit tests for FreeRTOS preemption
*/
#include <esp_types.h>
#include <stdio.h>
#include <inttypes.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "freertos/queue.h"
#include "freertos/idf_additions.h"
#include "unity.h"
#include "sdkconfig.h"
static volatile bool trigger;
static volatile bool flag;
#ifndef CONFIG_FREERTOS_SMP
#define MAX_YIELD_COUNT 10000
#else
//TODO: IDF-5081
#define MAX_YIELD_COUNT 17000
#endif // CONFIG_FREERTOS_SMP
/* Task:
- Waits for 'trigger' variable to be set
- Reads the cycle count on this CPU
- Pushes it into a queue supplied as a param
- Busy-waits until the main task terminates it
*/
static void task_send_to_queue(void *param)
{
QueueHandle_t queue = (QueueHandle_t) param;
uint32_t ccount;
while(!trigger) {}
ccount = 0;
flag = true;
xQueueSendToBack(queue, &ccount, 0);
/* This is to ensure that higher priority task
won't wake anyhow, due to this task terminating.
The task runs until terminated by the main task.
*/
while(1) {}
}
TEST_CASE("Yield from lower priority task, same CPU", "[freertos]")
{
/* Do this 3 times, mostly for the benchmark value - the first
run includes a cache miss so uses more cycles than it should. */
for (int i = 0; i < 3; i++) {
TaskHandle_t sender_task;
QueueHandle_t queue = xQueueCreate(1, sizeof(uint32_t));
flag = false;
trigger = false;
/* "yield" task sits on our CPU, lower priority to us */
xTaskCreatePinnedToCore(task_send_to_queue, "YIELD", 2048, (void *)queue, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &sender_task, CONFIG_UNITY_FREERTOS_CPU);
vTaskDelay(1); /* make sure everything is set up */
trigger = true;
uint32_t yield_ccount;
TEST_ASSERT( xQueueReceive(queue, &yield_ccount, 100 / portTICK_PERIOD_MS) );
TEST_ASSERT( flag );
vTaskDelete(sender_task);
vQueueDelete(queue);
}
}

Wyświetl plik

@ -0,0 +1,82 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#include "portTestMacro.h"
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test Priority Scheduling (Single Core)
Purpose:
- Test that the single-core scheduler always schedules the highest priority ready task
Procedure:
- Raise the unityTask priority to (configMAX_PRIORITIES - 1)
- unityTask creates the following lower priority tasks
- task_A (configMAX_PRIORITIES - 2)
- task_B (configMAX_PRIORITIES - 3)
- UnityTask blocks for a short period of time to allow task_A to run
- Clean up and restore unityTask's original priority
Expected:
- task_A should run after unityTask blocks
- task_B should never have run
*/
#if ( configNUM_CORES == 1 )
#define UNITY_TASK_DELAY_TICKS 10
static BaseType_t task_A_ran;
static BaseType_t task_B_ran;
static void task_A(void *arg)
{
task_A_ran = pdTRUE;
/* Keeping spinning to prevent the lower priority task_B from running */
while (1) {
;
}
}
static void task_B(void *arg)
{
/* The following should never run due to task_B having a lower priority */
task_B_ran = pdTRUE;
while (1) {
;
}
}
TEST_CASE("Tasks: Test priority scheduling", "[freertos]")
{
TaskHandle_t task_A_handle;
TaskHandle_t task_B_handle;
task_A_ran = pdFALSE;
task_B_ran = pdFALSE;
/* Raise the priority of the unityTask */
vTaskPrioritySet(NULL, configMAX_PRIORITIES - 1);
/* Create task_A and task_B */
xTaskCreate(task_A, "task_A", configTEST_DEFAULT_STACK_SIZE, (void *)xTaskGetCurrentTaskHandle(), configMAX_PRIORITIES - 2, &task_A_handle);
xTaskCreate(task_B, "task_B", configTEST_DEFAULT_STACK_SIZE, (void *)xTaskGetCurrentTaskHandle(), configMAX_PRIORITIES - 3, &task_B_handle);
/* Block to allow task_A to be scheduled */
vTaskDelay(UNITY_TASK_DELAY_TICKS);
/* Test that only task_A has run */
TEST_ASSERT_EQUAL(pdTRUE, task_A_ran);
TEST_ASSERT_EQUAL(pdFALSE, task_B_ran);
vTaskDelete(task_A_handle);
vTaskDelete(task_B_handle);
/* Restore the priority of the unityTask */
vTaskPrioritySet(NULL, configTEST_UNITY_TASK_PRIORITY);
}
#endif /* configNUM_CORES == 1 */

Wyświetl plik

@ -0,0 +1,180 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "unity.h"
#include "freertos_test_utils.h"
#include "portTestMacro.h"
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test vTaskDelay
Purpose:
- Test that vTaskDelay is accurate
Procedure:
- The test contains TEST_VTASKDELAY_ITERATIONS number of iterations. For each iteration...
- vTaskDelay(1) to align to next tick boundary
- Store current tick count and current time (using ref clock)
- vTaskDelay for TEST_VTASKDELAY_TICKS
- Get post delay tick count and ref clock time
- For single core, run the test directly from the UnityTask
- For SMP, run the test once on each core (using vTestOnAllCores())
Expected:
- The elapsed ticks should be TEST_VTASKDELAY_TICKS, with TEST_VTASKDELAY_DELTA_TICKS error allowed (in case the
delay and ref clock functions last long enough to cross a tick boundary).
- The elapsed time should be equivalent to TEST_VTASKDELAY_TICKS tick periods, with TEST_VTASKDELAY_TICKS tick
period of error allowed (in case ref clock functions last longer that a tick period).
*/
#if ( INCLUDE_vTaskDelay == 1 )
#define TEST_VTASKDELAY_TICKS 5 // Number of ticks to delay in test
#define TEST_VTASKDELAY_ITERATIONS 5 // Number of iterations in test
#if CONFIG_FREERTOS_SMP
#define TEST_VTASKDELAY_DELTA_TICKS 1 // Number of ticks worth of delta allowed
#else
#define TEST_VTASKDELAY_DELTA_TICKS 2 // Number of ticks worth of delta allowed. We allow 2 ticks in IDF FreeRTOS as each core's tick interrupt could be out of phase
#endif
static void test_vTaskDelay(void *arg)
{
for (int i = 0; i < TEST_VTASKDELAY_ITERATIONS; i++) {
TickType_t tick_start, tick_end;
portTEST_REF_CLOCK_TYPE ref_clock_start, ref_clock_end;
/* Delay until the next tick boundary */
vTaskDelay(1);
/* Get the current tick count and ref clock time */
tick_start = xTaskGetTickCount();
ref_clock_start = portTEST_REF_CLOCK_GET_TIME();
vTaskDelay(TEST_VTASKDELAY_TICKS);
/* Get the post delay tick count and ref clock time */
tick_end = xTaskGetTickCount();
ref_clock_end = portTEST_REF_CLOCK_GET_TIME();
/* Check that elapsed ticks and ref clock is accurate. We allow TEST_VTASKDELAY_DELTA_TICKS of error in case
* vTaskDelay() or portTEST_REF_CLOCK_GET_TIME() last long enough to cross a tick boundary */
#if ( configUSE_16_BIT_TICKS == 1 )
TEST_ASSERT_UINT16_WITHIN(TEST_VTASKDELAY_DELTA_TICKS, TEST_VTASKDELAY_TICKS, tick_end - tick_start);
#else
TEST_ASSERT_UINT32_WITHIN(TEST_VTASKDELAY_DELTA_TICKS, TEST_VTASKDELAY_TICKS, tick_end - tick_start);
#endif
TEST_ASSERT_UINT32_WITHIN(portTEST_TICKS_TO_REF_CLOCK(TEST_VTASKDELAY_DELTA_TICKS),
portTEST_TICKS_TO_REF_CLOCK(TEST_VTASKDELAY_TICKS),
ref_clock_end - ref_clock_start);
}
}
TEST_CASE("Tasks: Test vTaskDelay", "[freertos]")
{
portTEST_REF_CLOCK_INIT();
#if ( configNUM_CORES > 1 )
vTestOnAllCores(test_vTaskDelay, NULL, configTEST_DEFAULT_STACK_SIZE, configTEST_UNITY_TASK_PRIORITY - 1);
#else
/* Test vTaskDelay directly on the current core */
test_vTaskDelay(NULL);
#endif
portTEST_REF_CLOCK_DEINIT();
}
#endif /* ( INCLUDE_vTaskDelay == 1 ) */
/* ------------------------------------------------------------------------------------------------------------------ */
/*
Test vTaskDelayUntil
Purpose:
- Test that vTaskDelayUntil is accurate
Procedure:
- The test contains TEST_VTASKDELAYUNTIL_ITERATIONS number of iterations. For each iteration...
- vTaskDelay(1) to align to next tick boundary
- Store current tick count and current time (using ref clock)
- Call vTaskDelayUntil() for TEST_VTASKDELAYUNTIL_TICKS, using the stored tick count as the previous wake time
- Get post delay tick count and ref clock time
- For single core, run the test directly from the UnityTask
- For SMP, run the test once on each core (using vTestOnAllCores())
Expected:
- The elapsed ticks should be TEST_VTASKDELAYUNTIL_TICKS, with TEST_VTASKDELAYUNTIL_DELTA_TICKS tick of error
allowed (in case the delay and ref clock functions last long enough to cross a tick boundary).
- The elapsed time should be equivalent to TEST_VTASKDELAYUNTIL_TICKS tick periods, with
TEST_VTASKDELAYUNTIL_DELTA_TICKS tick period of error allowed (in case ref clock functions last longer that a tick
period).
*/
#if ( INCLUDE_xTaskDelayUntil == 1 )
#define TEST_VTASKDELAYUNTIL_TICKS 5 // Number of ticks to delay in test
#define TEST_VTASKDELAYUNTIL_ITERATIONS 5 // Number of iterations in test
#if CONFIG_FREERTOS_SMP
#define TEST_VTASKDELAYUNTIL_DELTA_TICKS 1 // Number of ticks worth of delta allowed
#else
#define TEST_VTASKDELAYUNTIL_DELTA_TICKS 2 // Number of ticks worth of delta allowed. We allow 2 ticks in IDF FreeRTOS as each core's tick interrupt could be out of phase
#endif
static void test_vTaskDelayUntil(void *arg)
{
for (int i = 0; i < TEST_VTASKDELAYUNTIL_ITERATIONS; i++) {
TickType_t tick_start, tick_end, last_wake_tick;
portTEST_REF_CLOCK_TYPE ref_clock_start, ref_clock_end;
/* Delay until the next tick boundary */
vTaskDelay(1);
/* Get the current tick count and ref clock time */
tick_start = xTaskGetTickCount();
last_wake_tick = tick_start;
ref_clock_start = portTEST_REF_CLOCK_GET_TIME();
vTaskDelayUntil(&last_wake_tick, TEST_VTASKDELAYUNTIL_TICKS);
/* Get the post delay tick count and ref clock time */
tick_end = xTaskGetTickCount();
ref_clock_end = portTEST_REF_CLOCK_GET_TIME();
/* Check that elapsed ticks and ref clock is accurate. We allow TEST_VTASKDELAYUNTIL_DELTA_TICKS of error in
* case vTaskDelayUntil() or portTEST_REF_CLOCK_GET_TIME() last long enough to cross a tick boundary */
#if ( configUSE_16_BIT_TICKS == 1 )
TEST_ASSERT_UINT16_WITHIN(TEST_VTASKDELAYUNTIL_DELTA_TICKS, TEST_VTASKDELAYUNTIL_TICKS, tick_end - tick_start);
TEST_ASSERT_UINT16_WITHIN(TEST_VTASKDELAYUNTIL_DELTA_TICKS, tick_end, last_wake_tick);
#else
TEST_ASSERT_UINT32_WITHIN(TEST_VTASKDELAYUNTIL_DELTA_TICKS, TEST_VTASKDELAYUNTIL_TICKS, tick_end - tick_start);
TEST_ASSERT_UINT32_WITHIN(TEST_VTASKDELAYUNTIL_DELTA_TICKS, tick_end, last_wake_tick);
#endif
/* Check that the elapsed ref clock time is accurate. We allow TEST_VTASKDELAYUNTIL_DELTA_TICKS time worth of
* error to account for the execution time of the ref clock functions. */
TEST_ASSERT_UINT32_WITHIN(portTEST_TICKS_TO_REF_CLOCK(TEST_VTASKDELAYUNTIL_DELTA_TICKS),
portTEST_TICKS_TO_REF_CLOCK(TEST_VTASKDELAYUNTIL_TICKS),
ref_clock_end - ref_clock_start);
}
}
TEST_CASE("Tasks: Test vTaskDelayUntil", "[freertos]")
{
portTEST_REF_CLOCK_INIT();
#if ( configNUM_CORES > 1 )
vTestOnAllCores(test_vTaskDelayUntil, NULL, configTEST_DEFAULT_STACK_SIZE, configTEST_UNITY_TASK_PRIORITY - 1);
#else
/* Test vTaskDelay directly on the current core */
test_vTaskDelayUntil(NULL);
#endif
portTEST_REF_CLOCK_DEINIT();
}
#endif /* ( INCLUDE_xTaskDelayUntil == 1 ) */

Wyświetl plik

@ -0,0 +1,87 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
Unit tests for FreeRTOS task priority get/set
*/
#include <esp_types.h>
#include <stdio.h>
#include <strings.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
static void counter_task(void *param)
{
volatile uint32_t *counter = (volatile uint32_t *)param;
while (1) {
(*counter)++;
}
}
TEST_CASE("Get/Set Priorities", "[freertos]")
{
/* Two tasks per processor */
TaskHandle_t tasks[portNUM_PROCESSORS][2] = { 0 };
unsigned volatile counters[portNUM_PROCESSORS][2] = { 0 };
TEST_ASSERT_EQUAL(CONFIG_UNITY_FREERTOS_PRIORITY, uxTaskPriorityGet(NULL));
/* create a matrix of counter tasks on each core */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
for (int task = 0; task < 2; task++) {
xTaskCreatePinnedToCore(counter_task, "count", 2048, (void *)&(counters[cpu][task]), CONFIG_UNITY_FREERTOS_PRIORITY - task, &(tasks[cpu][task]), cpu);
}
}
/* check they were created with the expected priorities */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
for (int task = 0; task < 2; task++) {
TEST_ASSERT_EQUAL(CONFIG_UNITY_FREERTOS_PRIORITY - task, uxTaskPriorityGet(tasks[cpu][task]));
}
}
vTaskDelay(10);
/* at this point, only the higher priority tasks (first index) should be counting */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
TEST_ASSERT_NOT_EQUAL(0, counters[cpu][0]);
TEST_ASSERT_EQUAL(0, counters[cpu][1]);
}
/* swap priorities! */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
vTaskPrioritySet(tasks[cpu][0], CONFIG_UNITY_FREERTOS_PRIORITY - 1);
vTaskPrioritySet(tasks[cpu][1], CONFIG_UNITY_FREERTOS_PRIORITY);
}
/* check priorities have swapped... */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
TEST_ASSERT_EQUAL(CONFIG_UNITY_FREERTOS_PRIORITY -1, uxTaskPriorityGet(tasks[cpu][0]));
TEST_ASSERT_EQUAL(CONFIG_UNITY_FREERTOS_PRIORITY, uxTaskPriorityGet(tasks[cpu][1]));
}
/* check the tasks which are counting have also swapped now... */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
unsigned old_counters[2];
old_counters[0] = counters[cpu][0];
old_counters[1] = counters[cpu][1];
vTaskDelay(10);
TEST_ASSERT_EQUAL(old_counters[0], counters[cpu][0]);
TEST_ASSERT_NOT_EQUAL(old_counters[1], counters[cpu][1]);
}
/* clean up */
for (int cpu = 0; cpu < portNUM_PROCESSORS; cpu++) {
for (int task = 0; task < 2; task++) {
vTaskDelete(tasks[cpu][task]);
}
}
}

Wyświetl plik

@ -0,0 +1,657 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Unit tests for FreeRTOS task yielding
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "unity.h"
#include <string.h>
// Array to store the task ids of the test threads being yielded
static volatile uint32_t task_yield_sequence[3];
// Index variable to access the yield sequence array
static volatile uint32_t idx = 0;
// Lock to protect the shared variables to store task id
static portMUX_TYPE idx_lock;
// Synchronization variable to have a deterministic dispatch sequence of the test threads
static volatile bool task_sequence_ready;
// Synchronization variable between the test threads and the unity task
static volatile uint32_t count;
// Lock variable to create a blocked task scenario
static volatile SemaphoreHandle_t task_mutex;
// This helper macro is used to store the task id atomically
#define STORE_TASK_ID(task_id) ({ \
portENTER_CRITICAL(&idx_lock); \
task_yield_sequence[idx++] = task_id; \
portEXIT_CRITICAL(&idx_lock); \
})
/*
* Test yielding for same priority tasks on the same core.
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the same core.
* - Each task pushes its task_id on to a queue and then yields.
* - Unity task checks the sequence of the tasks run once the yield_tasks are done.
*/
static void yield_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Notify the yield_task2 to run */
task_sequence_ready = true;
/* Yield */
taskYIELD();
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
static void yield_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Yield */
taskYIELD();
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must run the next ready task of the same priority", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(yield_task1, "yield_task1", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(yield_task2, "yield_task2", 2048, (void *)2, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
/* Wait for the tasks to finish up */
while (count != 2) {
vTaskDelay(10);
}
idx = 0;
/* Verify that the yield is successful and the next ready task is run */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
/*
* Test yielding behavior when a task is blocked
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the same core.
* - One task blocks on a mutex.
* - Second task does not contest for a mutex and yields.
* - Unity task verifies that the blocked task is not scheduled unless it is ready to run.
*/
static void test_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Block on mutex taken by the unity task */
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(task_mutex, portMAX_DELAY));
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Release mutex */
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreGive(task_mutex));
/* Delete self */
vTaskDelete(NULL);
}
static void test_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Yield */
taskYIELD();
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must not run a blocked task", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Create mutex and acquire it */
task_mutex = xSemaphoreCreateMutex();
TEST_ASSERT_NOT_EQUAL(NULL, task_mutex);
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreTake(task_mutex, portMAX_DELAY));
/* Create test_task1. This gets blocked. */
xTaskCreatePinnedToCore(test_task1, "test_task1", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
/* Wait for test_task1 to start up and get blocked */
vTaskDelay(10);
/* Create test_task2. This issues the yield. */
xTaskCreatePinnedToCore(test_task2, "test_task2", 2048, (void *)2, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
/* Wait for test_task2 to finish up */
while (count != 1) {
vTaskDelay(10);
}
/* Release mutex. This should unblock test_task1. */
TEST_ASSERT_EQUAL(pdTRUE, xSemaphoreGive(task_mutex));
/* Wait for test_task1 to finish up */
vTaskDelay(10);
idx = 0;
/* Verify that the yield results in the same task running again and not the blocked task */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
/* Verify that the task yield did not result in a context switch */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
/* Verify that the other task is scheduled once it is unblocked */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Cleanup task mutex */
vSemaphoreDelete(task_mutex);
}
/*
* Test yielding behavior when the scheduler is suspended
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the same core.
* - One task suspends the scheduler and then yields.
* - Unity task verifies that the yield does not happen until the scheduler is resumed.
*/
static void test_critical_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Suspend scheduler */
vTaskSuspendAll();
/* Set the task sequence flag once test_critical_task1 runs */
task_sequence_ready = true;
/* Yield */
taskYIELD();
/* Store task_id in the sequence array.
* No need for a lock when the scheduler is suspended.
*/
task_yield_sequence[idx++] = task_id;
/* Increment task count to notify unity task */
count++;
/* Resume scheduler */
xTaskResumeAll();
/* Delete self */
vTaskDelete(NULL);
}
static void test_critical_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must not happen when scheduler is suspended", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(test_critical_task1, "test_critical_task1", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(test_critical_task2, "test_critical_task2", 2048, (void *)2, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
/* Wait for both the tasks to finish up */
while (count != 2) {
vTaskDelay(10);
}
idx = 0;
/* Verify that test_critical_task1 runs first */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that the task yield, when the scheduler is suspended, did not result in a context switch */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that test_critical_task2 is scheduled once the scheduler is resumed */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
/*
* Test yielding behavior when a lower priority task creates a higher priority task
*
* The test performs the following actions:
* - Creates a task with a priority higher than the unity task.
* - Unity task verifies that it yields immediately to the newly created task.
*/
static void high_prio_task(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must happen when a task creates a higher priority task", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Create test task */
xTaskCreatePinnedToCore(high_prio_task, "high_prio_task", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY + 1, NULL, CONFIG_UNITY_FREERTOS_CPU);
uint32_t unity_task_id = 2;
/* Store task_id in the sequence array */
STORE_TASK_ID(unity_task_id);
/* Wait for the test task to finish up */
while (count == 0) {
vTaskDelay(10);
}
idx = 0;
/* Verify that the unity task yields as soon as a higher prio task is created */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that the unity task_id is stored after the higher priority task runs */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
/*
* Test yielding behavior when a lower priority task raises the priority of another task
*
* The test performs the following actions:
* - Creates a task with a priority lower than the unity task.
* - Unity task raises the priority of the newly created task.
* - Unity task verifies that it yields once the priority is raised.
*/
static void low_prio_task(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield must happed when a task raises the priority of another priority task", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Create test task */
TaskHandle_t task_handle;
xTaskCreatePinnedToCore(low_prio_task, "low_prio_task", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &task_handle, CONFIG_UNITY_FREERTOS_CPU);
uint32_t unity_task_id = 2;
/* Store task_id in the sequence array */
STORE_TASK_ID(unity_task_id);
/* Raise the priority of the lower priority task */
vTaskPrioritySet(task_handle, CONFIG_UNITY_FREERTOS_PRIORITY + 1);
/* Store unity task_id in the sequence array again */
STORE_TASK_ID(unity_task_id);
/* Wait for the test task to finish up */
while (count == 0) {
vTaskDelay(10);
}
idx = 0;
/* Verify that the unity task does not yield to a lower priority task when it is created */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
/* Verify that the unity task_id yielded once the priority of the lower priority task is raised */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx++]);
/* Verify that the unity task_id is stored again once the test task finishes up */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx++]);
}
#if (portNUM_PROCESSORS > 1) && !(CONFIG_FREERTOS_UNICORE)
/*
* Test yielding behavior when a task on one core forces an yield on the other core
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the core on which unity task is not running.
* - One task spins and does not let the other task run.
* - Force a cross-core yield from the unity task.
* - Verify that the cross-core yield happens and the second task is scheduled to run.
*/
static void other_core_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
while (1) {
vTaskDelay(10);
}
}
static void other_core_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
while (1) {
vTaskDelay(10);
}
}
TEST_CASE("Task yield must happen when issued from another core", "[freertos]")
{
TaskHandle_t other_core_taskhandle1;
TaskHandle_t other_core_taskhandle2;
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(other_core_task1, "test_task1", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &other_core_taskhandle1, !CONFIG_UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(other_core_task2, "test_task2", 2048, (void *)2, CONFIG_UNITY_FREERTOS_PRIORITY - 1, &other_core_taskhandle2, !CONFIG_UNITY_FREERTOS_CPU);
/* Wait for everything to be setup */
vTaskDelay(10);
uint32_t idx1 = 0;
/* Verify that other_core_task1 runs first */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx1++]);
/* Set the task sequence flag once other_core_task1 runs */
task_sequence_ready = true;
/* Force an yield on the other core */
#if CONFIG_FREERTOS_SMP
portYIELD_CORE(!CONFIG_UNITY_FREERTOS_CPU);
#else
vPortYieldOtherCore(!CONFIG_UNITY_FREERTOS_CPU);
#endif
/* Wait for the test task to finish up */
while (count == 0) {
vTaskDelay(10);
}
/* Verify that other_core_task1 yields and other_core_task2 runs */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx1++]);
/* Cleanup test tasks */
vTaskDelete(other_core_taskhandle1);
vTaskDelete(other_core_taskhandle2);
}
#if !CONFIG_FREERTOS_SMP
static volatile bool yield_triggered = false;
/*
* Test cross-core yielding behavior when the scheduler is suspended
*
* The test performs the following actions:
* - Creates 2 tasks with the same priority on the other core.
* - One task suspends the scheduler.
* - Unity task forces a cross-core yield.
* - Unity task verifies that the yield does not happen until the scheduler is resumed.
*
* Note: This test case is not valid when FreeRTOS SMP is used as the scheduler suspension
* is not per core but across cores and hence the test cannot be executed.
*/
static void other_core_critical_task1(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Suspend scheduler*/
vTaskSuspendAll();
/* Store task_id in the sequence array again.
* No need for a lock when the scheduler is supended.
*/
task_yield_sequence[idx++] = task_id;
/* Set the task sequence flag once other_core_critical_task1 runs */
task_sequence_ready = true;
/* Increment task count to notify unity task */
count++;
while (!yield_triggered) { }
/* Resume scheduler */
xTaskResumeAll();
/* Delete self */
vTaskDelete(NULL);
}
static void other_core_critical_task2(void *arg)
{
uint32_t task_id = (uint32_t)arg;
/* Wait for the other task to run for the test to begin */
while (!task_sequence_ready) {
taskYIELD();
};
/* Store task_id in the sequence array */
STORE_TASK_ID(task_id);
/* Increment task count to notify unity task */
count++;
/* Delete self */
vTaskDelete(NULL);
}
TEST_CASE("Task yield on other core must not happen when scheduler is suspended", "[freertos]")
{
/* Reset yield sequence index */
idx = 0;
/* Reset yield sequence array */
memset((void *)task_yield_sequence, 0, sizeof(task_yield_sequence));
/* Initialize idx lock */
portMUX_INITIALIZE(&idx_lock);
/* Reset task count */
count = 0;
/* Reset task sequence flag */
task_sequence_ready = false;
/* Create test tasks */
xTaskCreatePinnedToCore(other_core_critical_task1, "other_core_critical_task1", 2048, (void *)1, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, !CONFIG_UNITY_FREERTOS_CPU);
xTaskCreatePinnedToCore(other_core_critical_task2, "other_core_critical_task2", 2048, (void *)2, CONFIG_UNITY_FREERTOS_PRIORITY - 1, NULL, !CONFIG_UNITY_FREERTOS_CPU);
/* Wait for at least one of the tasks to finish up */
while (count == 0) {
vTaskDelay(10);
}
/* Force an yield on the other core */
vPortYieldOtherCore(!CONFIG_UNITY_FREERTOS_CPU);
/* Set yield triggered flag */
yield_triggered = true;
uint32_t idx1 = 0;
/* Verify that the first task runs */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx1++]);
/* Verify that the task yield when the scheduler is suspended did not result in a context switch */
TEST_ASSERT_EQUAL(1, task_yield_sequence[idx1++]);
/* Wait for the second task to finish up */
while (count != 2) {
vTaskDelay(10);
}
/* Verify that the second task is scheduled once the critical section is over */
TEST_ASSERT_EQUAL(2, task_yield_sequence[idx1++]);
}
#endif // !CONFIG_FREERTOS_SMP
#endif // (portNUM_PROCESSORS > 1) && !(CONFIG_FREERTOS_UNICORE)

Wyświetl plik

@ -0,0 +1,91 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* FreeRTOS timer tests
*/
#include <stdio.h>
#include "unity.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/timers.h"
static void timer_callback(TimerHandle_t timer)
{
volatile int *count;
count = (volatile int *)pvTimerGetTimerID( timer );
(*count)++;
printf("Callback timer %p count %p = %d\n", timer, count, *count);
}
TEST_CASE("Oneshot FreeRTOS timers", "[freertos]")
{
volatile int count = 0;
TimerHandle_t oneshot = xTimerCreate("oneshot", 100 / portTICK_PERIOD_MS, pdFALSE,
(void *)&count, timer_callback);
TEST_ASSERT(oneshot);
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(oneshot));
TEST_ASSERT_EQUAL(0, count);
TEST_ASSERT( xTimerStart(oneshot, 1) );
vTaskDelay(2); /* give the timer task a chance to process the message */
TEST_ASSERT_EQUAL(pdTRUE, xTimerIsTimerActive(oneshot));
TEST_ASSERT_EQUAL(0, count);
vTaskDelay(250 / portTICK_PERIOD_MS); // 2.5 timer periods
TEST_ASSERT_EQUAL(1, count);
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(oneshot));
TEST_ASSERT( xTimerDelete(oneshot, 1) );
}
TEST_CASE("Recurring FreeRTOS timers", "[freertos]")
{
volatile int count = 0;
TimerHandle_t recurring = xTimerCreate("oneshot", 100 / portTICK_PERIOD_MS, pdTRUE,
(void *)&count, timer_callback);
TEST_ASSERT(recurring);
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(recurring));
TEST_ASSERT_EQUAL(0, count);
TEST_ASSERT( xTimerStart(recurring, 1) );
vTaskDelay(2); // let timer task process the queue
TEST_ASSERT_EQUAL(pdTRUE, xTimerIsTimerActive(recurring));
TEST_ASSERT_EQUAL(0, count);
vTaskDelay(250 / portTICK_PERIOD_MS); // 2.5 timer periods
TEST_ASSERT_EQUAL(2, count);
TEST_ASSERT_EQUAL(pdTRUE, xTimerIsTimerActive(recurring));
TEST_ASSERT( xTimerStop(recurring, 1) );
TEST_ASSERT_EQUAL(2, count);
vTaskDelay(100 / portTICK_PERIOD_MS); // One more timer period
TEST_ASSERT_EQUAL(2, count); // hasn't gone up
TEST_ASSERT_EQUAL(pdFALSE, xTimerIsTimerActive(recurring));
TEST_ASSERT( xTimerDelete(recurring, 1) );
}
TEST_CASE("Static timer creation", "[freertos]")
{
StaticTimer_t static_timer;
TimerHandle_t created_timer;
volatile int count = 0;
created_timer = xTimerCreateStatic("oneshot", 100 / portTICK_PERIOD_MS,
pdTRUE,
(void *)&count,
timer_callback,
&static_timer);
TEST_ASSERT_NOT_NULL(created_timer);
}

Wyświetl plik

@ -0,0 +1,3 @@
idf_component_register(SRCS "linux_freertos.c"
INCLUDE_DIRS "."
PRIV_REQUIRES "unity" "kernel_tests")

Wyświetl plik

@ -0,0 +1,36 @@
menu "IDF unit test"
config UNITY_FREERTOS_PRIORITY
int "Priority of Unity test task"
default 5
config UNITY_FREERTOS_CPU
int "CPU to run Unity test task on"
default 0
config UNITY_FREERTOS_STACK_SIZE
int "Stack size of Unity test task, in bytes"
default 8192
config UNITY_WARN_LEAK_LEVEL_GENERAL
int "Leak warning level"
default 255
config UNITY_CRITICAL_LEAK_LEVEL_GENERAL
int "Critical leak"
default 1024
config UNITY_CRITICAL_LEAK_LEVEL_LWIP
int "Critical leak for UT which use LWIP component"
default 4095
config UNITY_IGNORE_PERFORMANCE_TESTS
bool "Ignore performance test results"
default y if IDF_ENV_FPGA
default n
help
If set, performance tests that use TEST_PERFORMANCE_LESS_THAN and
TEST_PERFORMANCE_GREATER_THAN macros will log the performance value
but not fail the test if the threshold is not met.
endmenu

Wyświetl plik

@ -0,0 +1,26 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "unity.h"
#include "unity_test_runner.h"
void app_main(void)
{
/*
Some FreeRTOS tests are reliant on the main task being at priority UNITY_FREERTOS_PRIORITY to test scheduling
behavior. Thus, we raise the main task's priority before any tasks are run. See IDF-6088
*/
vTaskPrioritySet(NULL, CONFIG_UNITY_FREERTOS_PRIORITY);
printf(" ______ _____ _______ ____ _____\n");
printf("| ____| | __ \\__ __/ __ \\ / ____|\n");
printf("| |__ _ __ ___ ___| |__) | | | | | | | (___\n");
printf("| __| '__/ _ \\/ _ \\ _ / | | | | | |\\___ \\\n");
printf("| | | | | __/ __/ | \\ \\ | | | |__| |____) |\n");
printf("|_| |_| \\___|\\___|_| \\_\\ |_| \\____/|_____/\n");
unity_run_menu();
}

Wyświetl plik

@ -0,0 +1,13 @@
# SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytest_embedded import Dut
@pytest.mark.linux
@pytest.mark.host_test
def test_linux_freertos_SMP(dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests.')
dut.write('![ignore]')
dut.expect('[0-9][0-9] Tests 0 Failures 0 Ignored')

Wyświetl plik

@ -0,0 +1,2 @@
CONFIG_IDF_TARGET="linux"
CONFIG_FREERTOS_SMP=y