From d112e52f44c946df0d75374e202813e742649245 Mon Sep 17 00:00:00 2001 From: Roman Bredehoft Date: Thu, 8 Feb 2024 16:32:27 +0100 Subject: [PATCH] chore: improve flaky rerun step in CI (pytest crash, appended coverage) --- Makefile | 7 +++++-- script/actions_utils/pytest_failed_test_report.py | 10 ++++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5aec85462..f73b049fc 100644 --- a/Makefile +++ b/Makefile @@ -235,8 +235,7 @@ pytest_internal_parallel: # --global-coverage-infos-json=global-coverage-infos.json is to dump the coverage report in the file # --cov PATH is the directory PATH to consider for coverage. Default to SRC_DIR=src # --cov-fail-under=100 is to make the command fail if coverage does not reach a 100% -# --cov-report=term-missing:skip-covered is used to print the missing lines for coverage withtout -# taking into account skiped tests +# --cov-report=term-missing:skip-covered is used to avoid printing covered lines for all files .PHONY: pytest # Run pytest on all tests pytest: "$(MAKE)" pytest_internal_parallel \ @@ -273,6 +272,8 @@ pytest_no_flaky: check_current_flaky_tests # --cov PATH is the directory PATH to consider for coverage. Default to SRC_DIR=src # --cov-append is to make the coverage of the previous pytest run to also consider the tests that are # going to be re-executed by 'pytest_run_last_failed' +# --cov-fail-under=100 is to make the command fail if coverage does not reach a 100% +# --cov-report=term-missing:skip-covered is used to avoid printing covered lines for all files # --global-coverage-infos-json=global-coverage-infos.json is to dump the coverage report in the file # --last-failed runs all last failed tests # --last-failed-no-failures none' indicates pytest not to run anything (instead of running @@ -282,6 +283,8 @@ pytest_run_last_failed: poetry run pytest $(TEST) \ --cov=$(SRC_DIR) \ --cov-append \ + --cov-fail-under=100 \ + --cov-report=term-missing:skip-covered \ --global-coverage-infos-json=global-coverage-infos.json \ --last-failed \ --last-failed-no-failures none diff --git a/script/actions_utils/pytest_failed_test_report.py b/script/actions_utils/pytest_failed_test_report.py index 2ed6301ad..15e4cefd0 100755 --- a/script/actions_utils/pytest_failed_test_report.py +++ b/script/actions_utils/pytest_failed_test_report.py @@ -97,8 +97,14 @@ def write_failed_tests_report( else: failed_tests_report["non_flaky"].append(test_name) # type: ignore[attr-defined] - # If no non-flaky tests failed, report that all failed tests were known flaky tests - if not failed_tests_report["non_flaky"]: + # If there are some flaky tests but no non-flaky tests failed, report that all failed tests + # were known flaky tests + # We need to make sure that at least one flaky test has been detected for one specific + # reason: if, for example, a test file has a syntax error, pytest will "crash" and therefore + # won't collect any tests in the file. The problem is that this will return an 'exitcode' + # of 1, making this script unexpectedly return 'all_failed_tests_are_flaky=True' in the + # case where 'failed_tests_report["non_flaky"]' is empty + if failed_tests_report["flaky"] and not failed_tests_report["non_flaky"]: failed_tests_report["all_failed_tests_are_flaky"] = True else: