stages: - pre_check - build - assign_test - build_doc - target_test - host_test - test_deploy - deploy - post_deploy workflow: rules: # Disable those non-protected push triggered pipelines - if: '$CI_COMMIT_REF_NAME != "master" && $CI_COMMIT_BRANCH !~ /^release\/v/ && $CI_COMMIT_TAG !~ /^v\d+\.\d+(\.\d+)?($|-)/ && $CI_PIPELINE_SOURCE == "push"' when: never # when running merged result pipelines, it would create a temp commit id. use $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA instead of $CI_COMMIT_SHA. # Please use PIPELINE_COMMIT_SHA at all places that require a commit sha - if: $CI_OPEN_MERGE_REQUESTS != null variables: PIPELINE_COMMIT_SHA: $CI_MERGE_REQUEST_SOURCE_BRANCH_SHA - if: $CI_OPEN_MERGE_REQUESTS == null variables: PIPELINE_COMMIT_SHA: $CI_COMMIT_SHA - when: always variables: # System environment # Common parameters for the 'make' during CI tests MAKEFLAGS: "-j5 --no-keep-going" # GitLab-CI environment # XXX_ATTEMPTS variables (https://docs.gitlab.com/ce/ci/yaml/README.html#job-stages-attempts) are not defined here. # Use values from "CI / CD Settings" - "Variables". # GIT_STRATEGY is not defined here. # Use an option from "CI / CD Settings" - "General pipelines". # we will download archive for each submodule instead of clone. # we don't do "recursive" when fetch submodule as they're not used in CI now. GIT_SUBMODULE_STRATEGY: none SUBMODULE_FETCH_TOOL: "tools/ci/ci_fetch_submodule.py" # by default we will fetch all submodules # jobs can overwrite this variable to only fetch submodules they required # set to "none" if don't need to fetch submodules SUBMODULES_TO_FETCH: "all" # tell build system do not check submodule update as we download archive instead of clone IDF_SKIP_CHECK_SUBMODULES: 1 IDF_PATH: "$CI_PROJECT_DIR" BATCH_BUILD: "1" V: "0" CHECKOUT_REF_SCRIPT: "$CI_PROJECT_DIR/tools/ci/checkout_project_ref.py" # Docker images BOT_DOCKER_IMAGE_TAG: ":latest" ESP_IDF_DOC_ENV_IMAGE: "$CI_DOCKER_REGISTRY/esp-idf-doc-env:v4.4-1-v4" ESP_ENV_IMAGE: "$CI_DOCKER_REGISTRY/esp-env:v4.4-1" AFL_FUZZER_TEST_IMAGE: "$CI_DOCKER_REGISTRY/afl-fuzzer-test:v4.4-1-1" CLANG_STATIC_ANALYSIS_IMAGE: "${CI_DOCKER_REGISTRY}/clang-static-analysis:v4.4-1-2" SONARQUBE_SCANNER_IMAGE: "${CI_DOCKER_REGISTRY}/sonarqube-scanner:3" # target test config file, used by assign test job CI_TARGET_TEST_CONFIG_FILE: "$CI_PROJECT_DIR/.gitlab/ci/target-test.yml" # target test repo parameters TEST_ENV_CONFIG_REPO: "https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/qa/ci-test-runner-configs.git" CI_AUTO_TEST_SCRIPT_REPO_URL: "https://gitlab-ci-token:${BOT_TOKEN}@${CI_SERVER_HOST}:${CI_SERVER_PORT}/qa/auto_test_script.git" CI_AUTO_TEST_SCRIPT_REPO_BRANCH: "ci/v3.1" .setup_tools_unless_target_test: &setup_tools_unless_target_test | if [[ -n "$IDF_DONT_USE_MIRRORS" ]]; then export IDF_MIRROR_PREFIX_MAP= fi if [[ "$SETUP_TOOLS" == "1" || "$CI_JOB_STAGE" != "target_test" ]]; then tools/idf_tools.py --non-interactive install ${SETUP_TOOLS_LIST:-} && eval "$(tools/idf_tools.py --non-interactive export)" || exit 1 if [[ ! -z "$OOCD_DISTRO_URL" && "$CI_JOB_STAGE" == "target_test" ]]; then echo "Using custom OpenOCD from ${OOCD_DISTRO_URL}" wget $OOCD_DISTRO_URL ARCH_NAME=$(basename $OOCD_DISTRO_URL) tar -x -f $ARCH_NAME export OPENOCD_SCRIPTS=$PWD/openocd-esp32/share/openocd/scripts export PATH=$PWD/openocd-esp32/bin:$PATH fi fi before_script: - source tools/ci/utils.sh - source tools/ci/setup_python.sh - add_gitlab_ssh_keys - source tools/ci/configure_ci_environment.sh - *setup_tools_unless_target_test - fetch_submodules # used for check scripts which we want to run unconditionally .before_script_no_sync_submodule: before_script: - echo "Not setting up GitLab key, not fetching submodules" - source tools/ci/utils.sh - source tools/ci/setup_python.sh - source tools/ci/configure_ci_environment.sh .before_script_minimal: before_script: - echo "Only load utils.sh" - source tools/ci/utils.sh .before_script_macos: before_script: - source tools/ci/utils.sh - export IDF_TOOLS_PATH="${HOME}/.espressif_runner_${CI_RUNNER_ID}_${CI_CONCURRENT_ID}" - $IDF_PATH/tools/idf_tools.py install-python-env # On macOS, these tools need to be installed - $IDF_PATH/tools/idf_tools.py --non-interactive install cmake ninja # This adds tools (compilers) and the version-specific Python environment to PATH - *setup_tools_unless_target_test # Install packages required by CI scripts into IDF Python environment - pip install -r $IDF_PATH/tools/ci/python_packages/ttfw_idf/requirements.txt - source tools/ci/configure_ci_environment.sh # Part of tools/ci/setup_python.sh; we don't use pyenv on macOS, so can't run the rest of the script. - export PYTHONPATH="$IDF_PATH/tools:$IDF_PATH/tools/ci/python_packages:$PYTHONPATH" - fetch_submodules default: retry: max: 2 # In case of a runner failure we could hop to another one, or a network error could go away. when: runner_system_failure include: - '.gitlab/ci/rules.yml' - '.gitlab/ci/docs.yml' - '.gitlab/ci/static-code-analysis.yml' - '.gitlab/ci/pre_check.yml' - '.gitlab/ci/build.yml' - '.gitlab/ci/assign-test.yml' - '.gitlab/ci/host-test.yml' - '.gitlab/ci/target-test.yml' - '.gitlab/ci/deploy.yml'