Skip to content

Commit

Permalink
refactor(gherkin-parser): return dataclass from run_tests (#464)
Browse files Browse the repository at this point in the history
  • Loading branch information
ajclyall committed Aug 2, 2024
1 parent 02c734a commit a9fcda0
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 20 deletions.
34 changes: 24 additions & 10 deletions reana_commons/gherkin_parser/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from gherkin.parser import Parser
from gherkin.pickles.compiler import Compiler
from parse import parse
from dataclasses import dataclass
from reana_commons.gherkin_parser.errors import (
StepDefinitionNotFound,
StepSkipped,
Expand All @@ -29,6 +30,19 @@ class AnalysisTestStatus(enum.Enum):
skipped = 2


@dataclass
class TestResult:
"""Dataclass for storing test results."""

scenario: str
failed_testcase: str
result: AnalysisTestStatus
error_log: str
analysis_run_id: str
feature: str
checked_at: datetime


def _get_step_text(step: Dict) -> str:
"""Return the text of the step, including possible multiline arguments.
Expand Down Expand Up @@ -167,7 +181,7 @@ def run_tests(
feature_name: str,
feature_file,
step_mapping: Dict,
) -> List[Dict]:
) -> List[TestResult]:
"""Run all the tests in the parsed feature file.
:param feature_name: The name of the feature inside the feature file.
Expand Down Expand Up @@ -205,15 +219,15 @@ def run_tests(
logging.error(f"Error log: {e}")
break
test_results.append(
{
"scenario": scenario["name"],
"failed_testcase": failed_testcase,
"result": result,
"error_log": error_log,
"analysis_run_id": analysis_run_id,
"feature": feature_name,
"checked_at": datetime.now(),
}
TestResult(
scenario=scenario["name"],
failed_testcase=failed_testcase,
result=result,
error_log=error_log,
analysis_run_id=analysis_run_id,
feature=feature_name,
checked_at=datetime.now(),
)
)
if result == AnalysisTestStatus.passed:
logging.info(f"Scenario `{scenario['name']}` passed!")
Expand Down
8 changes: 4 additions & 4 deletions tests/test_gherkin_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def test_workflow_execution_completes(
mock_data_fetcher,
)
for scenario in test_results:
assert scenario["result"] == expected_tests_result
assert scenario.result == expected_tests_result


def test_log_content(mock_data_fetcher):
Expand All @@ -70,7 +70,7 @@ def test_log_content(mock_data_fetcher):
"run-id", feature_file_path, "test-workflow", mock_data_fetcher
)
for scenario in test_results:
assert scenario["result"] in (
assert scenario.result in (
AnalysisTestStatus.passed,
AnalysisTestStatus.skipped,
)
Expand All @@ -88,7 +88,7 @@ def test_workflow_duration(mock_data_fetcher):
mock_data_fetcher,
)
for scenario in test_results:
assert scenario["result"] in (
assert scenario.result in (
AnalysisTestStatus.passed,
AnalysisTestStatus.skipped,
)
Expand Down Expand Up @@ -119,7 +119,7 @@ def get_mocked_workflow_disk_usage(workflow, parameters):
)

for scenario in test_results:
assert scenario["result"] in (
assert scenario.result in (
AnalysisTestStatus.passed,
AnalysisTestStatus.skipped,
)
12 changes: 6 additions & 6 deletions tests/test_gherkin_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def test_run_tests(mock_data_fetcher):
)
# Assert that each of the test results has a status of "passed"
for test_result in test_results:
assert test_result["result"] == AnalysisTestStatus.passed
assert test_result.result == AnalysisTestStatus.passed


def test_run_tests_no_feature_file():
Expand Down Expand Up @@ -130,9 +130,9 @@ def test_test_result_fail(mock_data_fetcher):
"test-workflow",
mock_data_fetcher,
)
assert test_results[0]["result"] == AnalysisTestStatus.passed
assert test_results[1]["result"] == AnalysisTestStatus.failed
assert test_results[2]["result"] == AnalysisTestStatus.passed
assert test_results[0].result == AnalysisTestStatus.passed
assert test_results[1].result == AnalysisTestStatus.failed
assert test_results[2].result == AnalysisTestStatus.passed


@pytest.mark.parametrize(
Expand Down Expand Up @@ -168,5 +168,5 @@ def test_test_expected_workflow_fail_not_skipped(
mock_data_fetcher,
)
for scenario in test_results:
assert scenario["result"] == expected_tests_result
assert scenario["error_log"] == expected_error_log
assert scenario.result == expected_tests_result
assert scenario.error_log == expected_error_log

0 comments on commit a9fcda0

Please sign in to comment.