diff --git a/anta/device.py b/anta/device.py index 894569e7f..561323f96 100644 --- a/anta/device.py +++ b/anta/device.py @@ -255,7 +255,7 @@ class AsyncEOSDevice(AntaDevice): """ - def __init__( + def __init__( # noqa: PLR0913 self, host: str, username: str, diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index 81c4659cb..ef0c4dfcb 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -9,6 +9,7 @@ from collections import defaultdict from functools import cached_property from itertools import chain +from typing import Any from anta.result_manager.models import AntaTestStatus, TestResult @@ -89,6 +90,10 @@ def __init__(self) -> None: If the status of the added test is error, the status is untouched and the error_status is set to True. """ + self.reset() + + def reset(self) -> None: + """Create or reset the attributes of the ResultManager instance.""" self._result_entries: list[TestResult] = [] self.status: AntaTestStatus = AntaTestStatus.UNSET self.error_status = False @@ -122,10 +127,15 @@ def results(self, value: list[TestResult]) -> None: for result in value: self.add(result) + @property + def dump(self) -> list[dict[str, Any]]: + """Get a list of dictionary of the results.""" + return [result.model_dump() for result in self._result_entries] + @property def json(self) -> str: """Get a JSON representation of the results.""" - return json.dumps([result.model_dump() for result in self._result_entries], indent=4) + return json.dumps(self.dump, indent=4) @property def sorted_category_stats(self) -> dict[str, CategoryStats]: diff --git a/anta/runner.py b/anta/runner.py index 7b0eadf75..eda23f3a0 100644 --- a/anta/runner.py +++ b/anta/runner.py @@ -211,7 +211,7 @@ def get_coroutines(selected_tests: defaultdict[AntaDevice, set[AntaTestDefinitio @cprofile() -async def main( # noqa: PLR0913 +async def main( manager: ResultManager, inventory: AntaInventory, catalog: AntaCatalog, diff --git a/asynceapi/device.py b/asynceapi/device.py index 7793ce519..c423c366c 100644 --- a/asynceapi/device.py +++ b/asynceapi/device.py @@ -121,7 +121,7 @@ async def check_connection(self) -> bool: """ return await port_check_url(self.base_url) - async def cli( # noqa: PLR0913 + async def cli( self, command: str | dict[str, Any] | None = None, commands: Sequence[str | dict[str, Any]] | None = None, @@ -195,7 +195,7 @@ async def cli( # noqa: PLR0913 return None raise - def _jsonrpc_command( # noqa: PLR0913 + def _jsonrpc_command( self, commands: Sequence[str | dict[str, Any]] | None = None, ofmt: str | None = None, diff --git a/pyproject.toml b/pyproject.toml index ff2357865..c76983db3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -367,6 +367,7 @@ convention = "numpy" # we have not removed pylint completely, these settings should be kept in sync with our pylintrc file. # https://github.com/astral-sh/ruff/issues/970 max-branches = 13 +max-args = 10 [tool.ruff.lint.mccabe] # Unlike Flake8, default to a complexity level of 10. @@ -377,6 +378,7 @@ max-complexity = 10 "RICH_COLOR_PALETTE" ] + [tool.ruff.lint.flake8-type-checking] # These classes require that type annotations be available at runtime runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.Input"] @@ -390,7 +392,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "tests/units/*" = [ "ARG002", # Sometimes we need to declare unused arguments when a parameter is not used but declared in @pytest.mark.parametrize "FBT001", # Boolean-typed positional argument in function definition - "PLR0913", # Too many arguments to function call "PLR2004", # Magic value used in comparison, consider replacing {value} with a constant variable "S105", # Passwords are indeed hardcoded in tests "S106", # Passwords are indeed hardcoded in tests @@ -412,7 +413,7 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In "T201", # Allow print statements ] "anta/cli/*" = [ - "PLR0913", # Allow more than 5 input arguments in CLI functions + "PLR0913", # CLI has many arguments defined in functions "ANN401", # TODO: Check if we can update the Any type hints in the CLI ] "anta/tests/field_notices.py" = [ @@ -429,13 +430,6 @@ runtime-evaluated-base-classes = ["pydantic.BaseModel", "anta.models.AntaTest.In ] "anta/tools.py" = [ "ANN401", # Ok to use Any type hint in our custom get functions - "PLR0913", # Ok to have more than 5 arguments in our custom get functions -] -"anta/device.py" = [ - "PLR0913", # Ok to have more than 5 arguments in the AntaDevice classes -] -"anta/inventory/__init__.py" = [ - "PLR0913", # Ok to have more than 5 arguments in the AntaInventory class ] "examples/*.py" = [ # These are example scripts and linked in snippets "S105", # Possible hardcoded password @@ -470,6 +464,7 @@ disable = [ # Any rule listed here can be disabled: https://github.com/astral-sh "reimported", "wrong-import-order", "wrong-import-position", + "unnecessary-lambda", "abstract-class-instantiated", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-instantiation-of-abstract-classes-abstract "unexpected-keyword-arg", # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg and other rules "no-value-for-parameter" # Overlap with https://mypy.readthedocs.io/en/stable/error_code_list.html#check-arguments-in-calls-call-arg diff --git a/tests/benchmark/conftest.py b/tests/benchmark/conftest.py index 61f2fa11c..04ce54c24 100644 --- a/tests/benchmark/conftest.py +++ b/tests/benchmark/conftest.py @@ -4,12 +4,14 @@ """Fixtures for benchmarking ANTA.""" import logging +from collections import defaultdict import pytest import respx from _pytest.terminal import TerminalReporter from anta.catalog import AntaCatalog +from anta.result_manager import ResultManager from .utils import AntaMockEnvironment @@ -17,6 +19,12 @@ TEST_CASE_COUNT = None +# Used to globally configure the benchmarks by specifying parameters for inventories +BENCHMARK_PARAMETERS = [ + pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"), + pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"), +] + @pytest.fixture(name="anta_mock_env", scope="session") # We want this fixture to have a scope set to session to avoid reparsing all the unit tests data. def anta_mock_env_fixture() -> AntaMockEnvironment: @@ -35,6 +43,22 @@ def catalog(anta_mock_env: AntaMockEnvironment) -> AntaCatalog: return anta_mock_env.catalog +@pytest.fixture(name="session_results", scope="session") # We want this fixture to be reused across test modules within tests/benchmark +def session_results_fixture() -> defaultdict[str, ResultManager]: + """Return a dictionary of ResultManger objects for the benchmarks. + + The key is the test id as defined in the pytest_generate_tests in this module. + Used to pass a populated ResultManager from one benchmark to another. + """ + return defaultdict(lambda: ResultManager()) + + +@pytest.fixture +def results(request: pytest.FixtureRequest, session_results: defaultdict[str, ResultManager]) -> ResultManager: + """Return the unique ResultManger object for the current benchmark parameter.""" + return session_results[request.node.callspec.id] + + def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: """Display the total number of ANTA unit test cases used to benchmark.""" terminalreporter.write_sep("=", f"{TEST_CASE_COUNT} ANTA test cases") @@ -49,9 +73,12 @@ def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: return metafunc.parametrize( "inventory", - [ - pytest.param({"count": 1, "disable_cache": True, "reachable": True}, id="1-device"), - pytest.param({"count": 2, "disable_cache": True, "reachable": True}, id="2-devices"), - ], + BENCHMARK_PARAMETERS, + indirect=True, + ) + elif "results" in metafunc.fixturenames: + metafunc.parametrize( + "results", + BENCHMARK_PARAMETERS, indirect=True, ) diff --git a/tests/benchmark/test_anta.py b/tests/benchmark/test_anta.py index e82de645d..7d1f21c60 100644 --- a/tests/benchmark/test_anta.py +++ b/tests/benchmark/test_anta.py @@ -5,6 +5,7 @@ import asyncio import logging +from collections import defaultdict from unittest.mock import patch import pytest @@ -22,45 +23,61 @@ logger = logging.getLogger(__name__) -def test_anta_dry_run(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None: +def test_anta_dry_run( + benchmark: BenchmarkFixture, + event_loop: asyncio.AbstractEventLoop, + catalog: AntaCatalog, + inventory: AntaInventory, + request: pytest.FixtureRequest, + session_results: defaultdict[str, ResultManager], +) -> None: """Benchmark ANTA in Dry-Run Mode.""" # Disable logging during ANTA execution to avoid having these function time in benchmarks logging.disable() - def _() -> ResultManager: - manager = ResultManager() - catalog.clear_indexes() - event_loop.run_until_complete(main(manager, inventory, catalog, dry_run=True)) - return manager + results = session_results[request.node.callspec.id] - manager = benchmark(_) + @benchmark + def _() -> None: + results.reset() + catalog.clear_indexes() + event_loop.run_until_complete(main(results, inventory, catalog, dry_run=True)) logging.disable(logging.NOTSET) - if len(manager.results) != len(inventory) * len(catalog.tests): - pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(manager.results)}", pytrace=False) - bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(manager.results)}\n" "-----------------------------------------------" + + if len(results.results) != len(inventory) * len(catalog.tests): + pytest.fail(f"Expected {len(inventory) * len(catalog.tests)} tests but got {len(results.results)}", pytrace=False) + bench_info = "\n--- ANTA NRFU Dry-Run Benchmark Information ---\n" f"Test count: {len(results.results)}\n" "-----------------------------------------------" logger.info(bench_info) @patch("anta.models.AntaTest.collect", collect) @patch("anta.device.AntaDevice.collect_commands", collect_commands) +@pytest.mark.dependency(name="anta_benchmark", scope="package") @respx.mock # Mock eAPI responses -def test_anta(benchmark: BenchmarkFixture, event_loop: asyncio.AbstractEventLoop, catalog: AntaCatalog, inventory: AntaInventory) -> None: +def test_anta( + benchmark: BenchmarkFixture, + event_loop: asyncio.AbstractEventLoop, + catalog: AntaCatalog, + inventory: AntaInventory, + request: pytest.FixtureRequest, + session_results: defaultdict[str, ResultManager], +) -> None: """Benchmark ANTA.""" # Disable logging during ANTA execution to avoid having these function time in benchmarks logging.disable() - def _() -> ResultManager: - manager = ResultManager() - catalog.clear_indexes() - event_loop.run_until_complete(main(manager, inventory, catalog)) - return manager + results = session_results[request.node.callspec.id] - manager = benchmark(_) + @benchmark + def _() -> None: + results.reset() + catalog.clear_indexes() + event_loop.run_until_complete(main(results, inventory, catalog)) logging.disable(logging.NOTSET) - if len(catalog.tests) * len(inventory) != len(manager.results): + if len(catalog.tests) * len(inventory) != len(results.results): # This could mean duplicates exist. # TODO: consider removing this code and refactor unit test data as a dictionary with tuple keys instead of a list seen = set() @@ -74,17 +91,17 @@ def _() -> ResultManager: for test in dupes: msg = f"Found duplicate in test catalog: {test}" logger.error(msg) - pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(manager.results)}", pytrace=False) + pytest.fail(f"Expected {len(catalog.tests) * len(inventory)} tests but got {len(results.results)}", pytrace=False) bench_info = ( "\n--- ANTA NRFU Benchmark Information ---\n" - f"Test results: {len(manager.results)}\n" - f"Success: {manager.get_total_results({AntaTestStatus.SUCCESS})}\n" - f"Failure: {manager.get_total_results({AntaTestStatus.FAILURE})}\n" - f"Skipped: {manager.get_total_results({AntaTestStatus.SKIPPED})}\n" - f"Error: {manager.get_total_results({AntaTestStatus.ERROR})}\n" - f"Unset: {manager.get_total_results({AntaTestStatus.UNSET})}\n" + f"Test results: {len(results.results)}\n" + f"Success: {results.get_total_results({AntaTestStatus.SUCCESS})}\n" + f"Failure: {results.get_total_results({AntaTestStatus.FAILURE})}\n" + f"Skipped: {results.get_total_results({AntaTestStatus.SKIPPED})}\n" + f"Error: {results.get_total_results({AntaTestStatus.ERROR})}\n" + f"Unset: {results.get_total_results({AntaTestStatus.UNSET})}\n" "---------------------------------------" ) logger.info(bench_info) - assert manager.get_total_results({AntaTestStatus.ERROR}) == 0 - assert manager.get_total_results({AntaTestStatus.UNSET}) == 0 + assert results.get_total_results({AntaTestStatus.ERROR}) == 0 + assert results.get_total_results({AntaTestStatus.UNSET}) == 0 diff --git a/tests/benchmark/test_reporter.py b/tests/benchmark/test_reporter.py new file mode 100644 index 000000000..ea74fb5da --- /dev/null +++ b/tests/benchmark/test_reporter.py @@ -0,0 +1,71 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Benchmark tests for anta.reporter.""" + +import json +import logging +from pathlib import Path + +import pytest + +from anta.reporter import ReportJinja, ReportTable +from anta.reporter.csv_reporter import ReportCsv +from anta.reporter.md_reporter import MDReportGenerator +from anta.result_manager import ResultManager + +logger = logging.getLogger(__name__) + +DATA_DIR: Path = Path(__file__).parents[1].resolve() / "data" + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_all(results: ResultManager) -> None: + """Benchmark ReportTable.report_all().""" + reporter = ReportTable() + reporter.report_all(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_devices(results: ResultManager) -> None: + """Benchmark ReportTable.report_summary_devices().""" + reporter = ReportTable() + reporter.report_summary_devices(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_table_tests(results: ResultManager) -> None: + """Benchmark ReportTable.report_summary_tests().""" + reporter = ReportTable() + reporter.report_summary_tests(results) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_json(results: ResultManager) -> None: + """Benchmark JSON report.""" + assert isinstance(results.json, str) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_jinja(results: ResultManager) -> None: + """Benchmark ReportJinja.""" + assert isinstance(ReportJinja(template_path=DATA_DIR / "template.j2").render(json.loads(results.json)), str) + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_csv(results: ResultManager, tmp_path: Path) -> None: + """Benchmark ReportCsv.generate().""" + ReportCsv.generate(results=results, csv_filename=tmp_path / "report.csv") + + +@pytest.mark.benchmark +@pytest.mark.dependency(depends=["anta_benchmark"], scope="package") +def test_markdown(results: ResultManager, tmp_path: Path) -> None: + """Benchmark MDReportGenerator.generate().""" + MDReportGenerator.generate(results=results, md_filename=tmp_path / "report.md") diff --git a/tests/benchmark/test_runner.py b/tests/benchmark/test_runner.py index b020a85d0..a8639af3e 100644 --- a/tests/benchmark/test_runner.py +++ b/tests/benchmark/test_runner.py @@ -5,19 +5,21 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any from anta.result_manager import ResultManager from anta.runner import get_coroutines, prepare_tests if TYPE_CHECKING: from collections import defaultdict + from collections.abc import Coroutine from pytest_codspeed import BenchmarkFixture from anta.catalog import AntaCatalog, AntaTestDefinition from anta.device import AntaDevice from anta.inventory import AntaInventory + from anta.result_manager.models import TestResult def test_prepare_tests(benchmark: BenchmarkFixture, catalog: AntaCatalog, inventory: AntaInventory) -> None: @@ -40,9 +42,13 @@ def test_get_coroutines(benchmark: BenchmarkFixture, catalog: AntaCatalog, inven assert selected_tests is not None - coroutines = benchmark(lambda: get_coroutines(selected_tests=selected_tests, manager=ResultManager())) - for coros in coroutines: - coros.close() + def bench() -> list[Coroutine[Any, Any, TestResult]]: + coros = get_coroutines(selected_tests=selected_tests, manager=ResultManager()) + for c in coros: + c.close() + return coros + + coroutines = benchmark(bench) count = sum(len(tests) for tests in selected_tests.values()) assert count == len(coroutines) diff --git a/tests/units/cli/nrfu/test_commands.py b/tests/units/cli/nrfu/test_commands.py index 817ab7830..0d8046c01 100644 --- a/tests/units/cli/nrfu/test_commands.py +++ b/tests/units/cli/nrfu/test_commands.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: from click.testing import CliRunner -DATA_DIR: Path = Path(__file__).parent.parent.parent.parent.resolve() / "data" +DATA_DIR: Path = Path(__file__).parents[3].resolve() / "data" def test_anta_nrfu_table_help(click_runner: CliRunner) -> None: