Merge branch 'bugfix/small_unit_test_fixes' into 'master'

test: Small unit test fixes

See merge request espressif/esp-idf!6981
pull/4494/head
Angus Gratton 2019-12-13 14:00:05 +08:00
commit ecf85555b6
7 zmienionych plików z 96 dodań i 35 usunięć

Wyświetl plik

@ -752,7 +752,7 @@ ptest_func_t frhost_slave = {
PARAM_GROUP_DECLARE_TYPE(IO_MODE, sdio_test_config_t, test_cfg_array);
TEST_MASTER_SLAVE(FRHOST, test_cfg_array, "[sdio][timeout=180][test_env=UT_SDIO]", &frhost_master, &frhost_slave);
TEST_MASTER_SLAVE(SDIO_FRHOST, test_cfg_array, "[sdio][timeout=180][test_env=UT_SDIO]", &frhost_master, &frhost_slave);
ptest_func_t tohost_master = {
.pre_test = null_pre,
@ -766,4 +766,4 @@ ptest_func_t tohost_slave = {
.post_test = null_post,
};
TEST_MASTER_SLAVE(TOHOST, test_cfg_array, "[sdio][timeout=180][test_env=UT_SDIO]", &tohost_master, &tohost_slave);
TEST_MASTER_SLAVE(SDIO_TOHOST, test_cfg_array, "[sdio][timeout=180][test_env=UT_SDIO]", &tohost_master, &tohost_slave);

Wyświetl plik

@ -21,7 +21,7 @@ static const ptest_func_t local_test_func = {
#define TEST_SPI_LOCAL(name, param_set) \
PARAM_GROUP_DECLARE(name, param_set) \
TEST_LOCAL(name, param_set, "[spi][timeout=120]", &local_test_func)
TEST_SINGLE_BOARD(SPI_##name, param_set, "[spi][timeout=120]", &local_test_func)
static void local_test_init(void** arg)
{

Wyświetl plik

@ -75,11 +75,11 @@
* 4. Declare the group by PARAM_GROUP_DECLARE right after the param group:
* PARAM_GROUP_DECLARE(MODE, mode_pgroup)
*
* 5. Declare the test function by TEST_LOCAL (for single board test), or TEST_MASTER_SLAVE(for multiboard test)
* 5. Declare the test function by TEST_SINGLE_BOARD (for single board test), or TEST_MASTER_SLAVE(for multiboard test)
* TEST_MASTER_SLAVE(MODE, mode_pgroup, "[spi][timeout=120]", &master_test_func, &slave_test_func)
*
* or
* TEST_LOCAL(TIMING, timing_pgroup, "[spi][timeout=120]", &local_test_func)
* TEST_SINGLE_BOARD(TIMING, timing_pgroup, "[spi][timeout=120]", &local_test_func)
*
* NOTE: suggest to define your own macro to wrap 4 and 5 if your tag and test functions are the same. E.g.:
* #define TEST_SPI_MASTER_SLAVE(name, pgroup) (backslash)
@ -146,8 +146,8 @@ void test_serializer(const param_group_t *param_group, const ptest_func_t* test_
* @param tag Tag for environment, etc. e.g. [spi][timeout=120]
* @param test_func ``ptest_func_t`` to be executed.
*/
#define TEST_LOCAL(name, param_group, tag, test_func) \
TEST_CASE("local test: "#name, tag) { test_serializer(&PGROUP_NAME(param_group), test_func); }
#define TEST_SINGLE_BOARD(name, param_group, tag, test_func) \
TEST_CASE("single board test: "#name, tag) { test_serializer(&PGROUP_NAME(param_group), test_func); }
/**
* Test parameter group for master-slave framework

Wyświetl plik

@ -383,7 +383,7 @@ class UT(IDFApp):
path = os.path.join(self.idf_path, app_path)
default_build_path = os.path.join(path, "build")
if os.path.exists(default_build_path):
return path
return default_build_path
# first try to get from build folder of unit-test-app
path = os.path.join(self.idf_path, "tools", "unit-test-app", "build")

Wyświetl plik

@ -213,6 +213,7 @@ class IDFDUT(DUT.SerialDUT):
Structured this way so @_uses_esptool will reconnect each time
"""
flash_files = []
try:
# note: opening here prevents us from having to seek back to 0 each time
flash_files = [(offs, open(path, "rb")) for (offs, path) in self.app.flash_files]

Wyświetl plik

@ -125,30 +125,80 @@ If you want to reproduce locally, you need to:
3. Run the failed case on your board (refer to Running Unit Tests section).
* There're some special UT cases (multiple stages case, multiple devices cases) which requires user interaction:
* You can refer to [unit test document](https://docs.espressif.com/projects/esp-idf/en/latest/api-guides/unit-tests.html#running-unit-tests) to run test manually.
* Or, you can use `tools/unit-test-app/unit_test.py` to run the test cases:
* read document of tiny-test-fw, set correct `TEST_FW_PATH` and `IDF_PATH`
* run `unit_test.py` (see examples below)
* You can also use `tools/tiny-test-fw/Runner.py` to run test cases (it will be the same as what Runner do). Please use `python Runner.py -c $CONFIG_FILE $IDF_PATH/tools/unit-test-app` command, where `CONFIG_FILE` is a YAML file with same name with CI job in `components/idf_test/unit_test/CIConfigs` (artifacts, need to be download from `assign_test` job).
* Or, you can use `tools/unit-test-app/unit_test.py` to run the test cases (see below)
## Running unit tests on local machine by `unit_test.py`
A couple of examples follow for running unit tests on local machine.
First, install Python dependencies and export the Python path where the IDF CI Python modules are found:
```bash
# run a simple unit test
./unit_test.py "UART can do select()"
# repeat the tests two times
./unit_test.py -r 2 "UART can do select()"
# use custom environment config file
./unit_test.py -e /tmp/EnvConfigTemplate.yml "UART can do select()"
# use custom application binary
./unit_test.py -b /tmp/app.bin "UART can do select()"
# run a list of unit tests
./unit_test.py "UART can do select()" "concurent selects work"
# add some options for unit tests
./unit_test.py "UART can do select()",timeout:10 "concurent selects work",config:release,env_tag:UT_T2_1
# run a multi stage test (type of test and child case numbers are autodetected)
./unit_test.py "check a time after wakeup from deep sleep"
# run a list of different unit tests (one simple and one multi stage test)
./unit_test.py "concurent selects work" "NOINIT attributes behavior"
pip install -r $IDF_PATH/tools/ci/python_packages/tiny_test_fw/requirements.txt
export PYTHONPATH=$IDF_PATH/tools/ci/python_packages
```
Change to the unit test app directory, configure the app as needed and build it in the default "build" directory. For example:
```bash
cd $IDF_PATH/tools/unit-test-app
idf.py ut-apply-config-psram
idf.py build -T vfs
```
(Instead of these steps, you can do whatever is needed to configure & build a unit test app with the tests and config that you need.)
### run a single test case by name
```bash
./unit_test.py "UART can do select()"
```
unit_test.py script will flash the unit test binary from the (default) build directory, then run the test case.
### Run a single test case twice
```bash
./unit_test.py -r 2 "UART can do select()"
```
### run multiple unit test cases
```bash
./unit_test.py "UART can do select()" "concurrent selects work"
```
### run a multi-stage test (type of test and child case numbers are autodetected)
```bash
./unit_test.py "check a time after wakeup from deep sleep"
```
### run a list of different unit tests (one simple and one multi-stage test)
```bash
./unit_test.py "concurrent selects work" "check a time after wakeup from deep sleep"
```
### Use custom environment config file
```bash
./unit_test.py -e /tmp/EnvConfigTemplate.yml "UART can do select()"
```
Note: No sample YAML file is currently available.
### use custom application binary
```bash
./unit_test.py -b /tmp/app.bin "UART can do select()"
```
Note: This option doesn't currently work without an EnvConfigTemplate also supplied, use the default unit-test-app binaries only.
### add some options for unit tests
```bash
./unit_test.py "UART can do select()",timeout:10 "concurrent selects work",config:release,env_tag:UT_T2_1
```
Note: Setting the `config` and `env_tag` values doesn't significantly change anything but the console log output, the same binary is used.

Wyświetl plik

@ -168,6 +168,11 @@ def reset_dut(dut):
raise AssertionError("Reset {} ({}) failed!".format(dut.name, dut.port))
def log_test_case(description, test_case, ut_config):
Utility.console_log("Running {} '{}' (config {})".format(description, test_case["name"], ut_config), color="orange")
Utility.console_log("Tags: %s" % ", ".join("%s=%s" % (k,v) for (k,v) in test_case.items() if k != "name" and v is not None), color="orange")
def run_one_normal_case(dut, one_case, junit_test_case):
reset_dut(dut)
@ -231,14 +236,15 @@ def run_one_normal_case(dut, one_case, junit_test_case):
while not test_finish:
try:
timeout_value = one_case["timeout"]
dut.expect_any((RESET_PATTERN, handle_exception_reset),
(EXCEPTION_PATTERN, handle_exception_reset),
(ABORT_PATTERN, handle_exception_reset),
(FINISH_PATTERN, handle_test_finish),
(UT_APP_BOOT_UP_DONE, handle_reset_finish),
timeout=one_case["timeout"])
timeout=timeout_value)
except DUT.ExpectTimeout:
Utility.console_log("Timeout in expect", color="orange")
Utility.console_log("Timeout in expect (%s seconds)" % timeout_value, color="orange")
junit_test_case.add_failure_info("timeout")
one_case_finish(False)
break
@ -278,6 +284,7 @@ def run_unit_test_cases(env, extra_data):
Utility.console_log("Download finished, start running test cases", "O")
for one_case in case_config[ut_config]:
log_test_case("test case", one_case, ut_config)
performance_items = []
# create junit report test case
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
@ -394,7 +401,7 @@ class Handler(threading.Thread):
(self.FINISH_PATTERN, handle_device_test_finish), # test finish pattern
timeout=self.timeout)
except DUT.ExpectTimeout:
Utility.console_log("Timeout in expect", color="orange")
Utility.console_log("Timeout in expect (%s seconds)" % self.timeout, color="orange")
one_device_case_finish(False)
break
@ -481,6 +488,7 @@ def run_multiple_devices_cases(env, extra_data):
for ut_config in case_config:
Utility.console_log("Running unit test for config: " + ut_config, "O")
for one_case in case_config[ut_config]:
log_test_case("multi-device test", one_case, ut_config, )
result = False
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
try:
@ -594,14 +602,15 @@ def run_one_multiple_stage_case(dut, one_case, junit_test_case):
while not stage_finish:
try:
timeout_value = one_case["timeout"]
dut.expect_any((RESET_PATTERN, handle_exception_reset),
(EXCEPTION_PATTERN, handle_exception_reset),
(ABORT_PATTERN, handle_exception_reset),
(FINISH_PATTERN, handle_test_finish),
(UT_APP_BOOT_UP_DONE, handle_next_stage),
timeout=one_case["timeout"])
timeout=timeout_value)
except DUT.ExpectTimeout:
Utility.console_log("Timeout in expect", color="orange")
Utility.console_log("Timeout in expect (%s seconds)" % timeout_value, color="orange")
one_case_finish(False)
break
if stage_finish[0] == "break":
@ -637,6 +646,7 @@ def run_multiple_stage_cases(env, extra_data):
dut.start_app()
for one_case in case_config[ut_config]:
log_test_case("multi-stage test", one_case, ut_config)
performance_items = []
junit_test_case = TinyFW.JunitReport.create_test_case("[{}] {}".format(ut_config, one_case["name"]))
try:
@ -755,7 +765,7 @@ if __name__ == '__main__':
for test_item in test_args:
if len(test_item) == 0:
continue
pair = test_item.split(r':')
pair = test_item.split(r':', 1)
if len(pair) == 1 or pair[0] is 'name':
test_dict['name'] = pair[0]
elif len(pair) == 2: