Skip to content

Commit

Permalink
Add workflow, continued work on parser
Browse files Browse the repository at this point in the history
  • Loading branch information
multiplemonomials committed Apr 7, 2024
1 parent 7bbc157 commit 006634a
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 2 deletions.
58 changes: 58 additions & 0 deletions .github/workflows/generate-test-report-website.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
name: Generate and Publish Test Report Website
on: push
jobs:
generate-site:
runs-on: ubuntu-latest
steps:

# Check out this repo and the Mbed submodule
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive

- name: Set up Python environment
with:
python-version: '3.9'
cache: 'pip' # caching pip dependencies

- name: Install Python dependencies
run: |
pip install -r Test-Result-Evaluator/requirements.txt
- name: Run website generator
run: |
cd Test-Result-Evaluator
python -m test_result_evaluator.create_database ../CI-Shield-Tests/mbed-os mbed_tests.db
python -m test_result_evaluator.generate_results mbed_tests.db generated-site
# Per the docs, the HTML files have to be inside a file called github-pages.tar.gz
# See here: https://github.com/actions/upload-pages-artifact
cd generated-site
tar czf ../../github-pages.tar.gz *
- name: Upload Pages artifact
uses: actions/upload-pages-artifact@v3

# Note: The below is copied almost verbatim from the example job from the deploy-pages action.
# See https://github.com/actions/deploy-pages
deploy-site:
# Add a dependency to the build job
needs: generate-site

# Grant GITHUB_TOKEN the permissions required to make a Pages deployment
permissions:
pages: write # to deploy to Pages
id-token: write # to verify the deployment originates from an appropriate source

# Deploy to the github-pages environment
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}

# Specify runner + deployment step
runs-on: ubuntu-latest
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4 # or specific "vX.X.X" version tag for this action
2 changes: 1 addition & 1 deletion CI-Shield-Tests/mbed-os
Submodule mbed-os updated 297 files
44 changes: 43 additions & 1 deletion Test-Result-Evaluator/test_result_evaluator/parse_test_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,24 @@
"""
import pathlib
import sys
import re
from typing import Tuple, List

import junitparser.junitparser
from junitparser import JUnitXml

from test_result_evaluator import mbed_test_database
from test_result_evaluator.mbed_test_database import TestResult

# Regexes for parsing Greentea output
# ------------------------------------------------------------------------

# Matches one line of the test case list sent at the start of each test
GREENTEA_TESTCASE_NAME_RE = re.compile(r"\{\{__testcase_name;([^}]+)}}")

# Matches a test case which completes (either successfully or not) and allows extracting the output
GREENTEA_TESTCASE_OUTPUT_RE = re.compile(r"(\{\{__testcase_start;[^|]+?}}.+?\{\{__testcase_finish;[^|]+?;(\d);\d}})", re.DOTALL)

if len(sys.argv) != 4:
print(f"Usage: {sys.argv[0]} <path to database to use> <Mbed target> <path to JUnit XML to parse>")
sys.exit(1)
Expand All @@ -24,6 +35,8 @@

test_report: junitparser.junitparser.TestCase
for test_report in junit_report:

# First record info about the larger test. We can do this entirely using the data recorded by CTest.
if test_report.is_passed:
test_suite_result = TestResult.PASSED
elif test_report.is_skipped:
Expand All @@ -32,8 +45,37 @@
test_suite_result = TestResult.FAILED

print(f"Parsing results of {test_report.classname} ({test_suite_result.name})...")
database.add_test_record(test_report.classname, mbed_target, test_report.time, test_suite_result,
test_report.system_out)

if test_suite_result != TestResult.SKIPPED:
# Now things get a bit more complicated as we have to parse Greentea's output directly to determine
# the list of tests.
# First use a regex to extract the list of test cases...
test_case_names = re.findall(GREENTEA_TESTCASE_NAME_RE, test_report.system_out)

if len(test_case_names) > 0:
# This is a "normal" test with test cases. Parse them.
# Regex returns tuple of (output, passed/failed indicator)
test_case_records: List[Tuple[str, str]] = re.findall(GREENTEA_TESTCASE_OUTPUT_RE, test_report.system_out)

if len(test_case_records) < len(test_case_names):
# Did one test case crash the test?
# See if we can find the start of this test case but no end.
crashing_test_name = test_case_names[len(test_case_records)]
crash_re = re.compile(r"\{\{__testcase_start;" + crashing_test_name + r"}}(.+?)teardown\(\) finished", re.DOTALL)
test_case_crash_output = re.search(crash_re, test_report.system_out)

if test_case_crash_output is None:
raise RuntimeError(f"Parse error: found {len(test_case_names)} test cases but only found {len(test_case_records)} records of test cases executing")
else:

print(f"Note: Test case '{crashing_test_name}' in test {test_report.classname} appears to have crashed and prevented {len(test_case_names) - len(test_case_records) - 1} subsequent tests from running")
test_case_records.append((test_case_crash_output.group(0), "0"))

database.add_test_record(test_report.classname, mbed_target, test_report.time, test_suite_result, test_report.system_out)
# However, there are some tests (e.g. test-mbed-drivers-dev-null) which don't use the greentea
# system in a standard way and therefore can't be divided evenly into test cases. These tests need special
# handling.

print(">> Done.")
database.close()
Expand Down

0 comments on commit 006634a

Please sign in to comment.