diff --git a/.gitignore b/.gitignore index 167e84af0..d3d5267b9 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,6 @@ test/charms/test_smoke/.charmcraft_output_packages.txt test/charms/test_smoke/requirements.txt test/charms/test_smoke/charmcraft.yaml juju-crashdump*.tar.xz + +# Benchmark test artifacts +.benchmarks diff --git a/test/benchmark/__init__.py b/test/benchmark/__init__.py new file mode 100644 index 000000000..d3b88643a --- /dev/null +++ b/test/benchmark/__init__.py @@ -0,0 +1,24 @@ +# Copyright 2024 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Benchmark tests for ops. + +Optimising performance is not a current goal with ops - any gains are +unlikely to be significant compared with ones from Juju or the charm and +its workload. However, we do want to ensure that we do not unknowingly +regress in performance. + +This package is for tests that cover core functionality, to be used for +performance benchmarking. +""" diff --git a/test/charms/test_benchmark/charmcraft.yaml b/test/charms/test_benchmark/charmcraft.yaml new file mode 100644 index 000000000..c58f03a5e --- /dev/null +++ b/test/charms/test_benchmark/charmcraft.yaml @@ -0,0 +1,40 @@ +name: benchmark +type: charm +title: ops-benchmark +summary: A simple charm used for benchmark tests +description: Read the summary. +bases: + - build-on: + - name: ubuntu + channel: "22.04" + run-on: + - name: ubuntu + channel: "22.04" +config: + options: + log-level: + description: Configures the log level. + default: "info" + type: string +actions: + act: + description: Do something to the workload. +containers: + foo: +resources: + baz: + type: oci-image +storage: + bar: + type: filesystem +requires: + rel: + interface: qux +peers: + peer: + interface: chat +extra-bindings: + MySpace: null +parts: + charm: + charm-entrypoint: src/bcharm.py diff --git a/test/charms/test_benchmark/requirements.txt b/test/charms/test_benchmark/requirements.txt new file mode 100644 index 000000000..0356c38b5 --- /dev/null +++ b/test/charms/test_benchmark/requirements.txt @@ -0,0 +1 @@ +ops ~= 2.17 diff --git a/test/charms/test_benchmark/src/bcharm.py b/test/charms/test_benchmark/src/bcharm.py new file mode 100755 index 000000000..5f6e105bd --- /dev/null +++ b/test/charms/test_benchmark/src/bcharm.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Basic benchmarking charm.""" + +import logging + +import ops + +logger = logging.getLogger('__name__') + + +class BenchmarkCharm(ops.CharmBase): + """Charm the service.""" + + _stored = ops.StoredState() + + def __init__(self, framework: ops.Framework): + super().__init__(framework) + framework.observe(self.on.update_status, self._on_update_status) + framework.observe(self.on.stop, self._on_stop) + framework.observe(self.on.config_changed, self._on_config_changed) + + def _on_update_status(self, _: ops.UpdateStatusEvent): + # Say a bunch of things. + for level in ('debug', 'info', 'warning', 'error'): + for i in range(50): + getattr(logger, level)('This is message %s', i) + + def _on_stop(self, _: ops.StopEvent): + pass + + def _on_config_changed(self, event: ops.ConfigChangedEvent): + event.defer() + + +if __name__ == '__main__': # pragma: nocover + ops.main(BenchmarkCharm) diff --git a/testing/tests/benchmark/__init__.py b/testing/tests/benchmark/__init__.py new file mode 100644 index 000000000..499d5758d --- /dev/null +++ b/testing/tests/benchmark/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2024 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Benchmark tests for ops-scenario. + +Optimising performance is not a current goal with ops-scenario. However, +we do want to ensure that we do not unknowingly regress in performance. + +This package contains a small set of tests that cover core functionality, +to be used for performance benchmarking. +""" diff --git a/testing/tests/benchmark/test_testing.py b/testing/tests/benchmark/test_testing.py new file mode 100644 index 000000000..79b06bd22 --- /dev/null +++ b/testing/tests/benchmark/test_testing.py @@ -0,0 +1,142 @@ +# Copyright 2024 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Benchmark tests for ops-scenario.""" + +import dataclasses +import pathlib +import sys + +import ops +from ops import testing + +sys.path.append( + str( + pathlib.Path(__file__).parent.parent.parent.parent + / "test" + / "charms" + / "test_benchmark" + / "src" + ) +) + +from bcharm import BenchmarkCharm + + +# Note: the 'benchmark' argument here is a fixture that pytest-benchmark +# automatically makes available to all tests. +def test_context_explicit_meta(benchmark): + ctx = benchmark(testing.Context, ops.CharmBase, meta={"name": "foo"}) + assert isinstance(ctx, testing.Context) + + +def test_run_no_observer(benchmark): + ctx = testing.Context(BenchmarkCharm) + benchmark(ctx.run, ctx.on.start(), testing.State()) + assert len({e.handle.kind for e in ctx.emitted_events}) == 1 + + +def test_run_observed(benchmark): + ctx = testing.Context(BenchmarkCharm) + benchmark(ctx.run, ctx.on.stop(), testing.State()) + assert len({e.handle.kind for e in ctx.emitted_events}) == 1 + + +def test_context_explicit_meta_config_actions(benchmark): + ctx = benchmark( + testing.Context, + ops.CharmBase, + meta={"name": "foo"}, + actions={"act": {"description": "foo"}}, + config={"options": {"conf": {"type": "int", "description": "bar"}}}, + ) + ctx.run(ctx.on.action("act"), testing.State(config={"conf": 10})) + assert len({e.handle.kind for e in ctx.emitted_events}) == 1 + + +def test_context_autoload_meta(benchmark): + ctx = benchmark(testing.Context, BenchmarkCharm) + assert isinstance(ctx, testing.Context) + + +def test_many_tests_explicit_meta(benchmark): + def mock_pytest(): + """Simulate running multiple tests against the same charm.""" + for event in ("install", "start", "stop", "remove"): + for _ in range(5): + ctx = testing.Context(ops.CharmBase, meta={"name": "foo"}) + ctx.run(getattr(ctx.on, event)(), testing.State()) + assert len({e.handle.kind for e in ctx.emitted_events}) == 1 + + benchmark(mock_pytest) + + +def test_many_tests_autoload_meta(benchmark): + def mock_pytest(): + """Simulate running multiple tests against the same charm.""" + for event in ("install", "start", "stop", "remove"): + for _ in range(5): + ctx = testing.Context(BenchmarkCharm) + ctx.run(getattr(ctx.on, event)(), testing.State()) + assert len({e.handle.kind for e in ctx.emitted_events}) == 1 + + benchmark(mock_pytest) + + +def test_lots_of_logs(benchmark): + ctx = testing.Context(BenchmarkCharm) + benchmark(ctx.run, ctx.on.update_status(), testing.State()) + assert len(ctx.juju_log) > 200 + + +def ditest_full_state(benchmark): + def fill_state(): + rel = testing.Relation("rel") + peer = testing.PeerRelation("peer") + network = testing.Network("MySpace") + container = testing.Container("foo") + storage = testing.Storage("bar") + tcp = testing.TCPPort(22) + icmp = testing.ICMPPort() + udp = testing.UDPPort(8000) + secret = testing.Secret({"password": "admin"}) + resource = testing.Resource(name="baz", path=".") + stored_state = testing.StoredState() + state = testing.State( + relations={rel, peer}, + networks={network}, + containers={container}, + storages={storage}, + opened_ports={tcp, icmp, udp}, + secrets={secret}, + resources={resource}, + stored_states={stored_state}, + app_status=testing.ActiveStatus(), + unit_status=testing.BlockedStatus("I'm stuck!"), + ) + return state + + ctx = testing.Context(BenchmarkCharm) + state_in = benchmark(fill_state) + state_out = ctx.run(ctx.on.start(), state_in) + assert dataclasses.asdict(state_in) == dataclasses.asdict(state_out) + + +def test_deferred_events(benchmark): + ctx = testing.Context(BenchmarkCharm, capture_deferred_events=True) + deferred = ctx.on.stop().deferred(BenchmarkCharm._on_stop) + state_in = testing.State(deferred=[deferred]) + state_out = benchmark(ctx.run, ctx.on.config_changed(), state_in) + assert len(state_out.deferred) == 1 + assert len({e.handle.kind for e in ctx.emitted_events}) == 2 diff --git a/tox.ini b/tox.ini index fecaa4ba7..715746bdd 100644 --- a/tox.ini +++ b/tox.ini @@ -23,6 +23,8 @@ envlist = lint, static, unit src_path = ops/ tst_path = test/ all_path = {[vars]src_path} {[vars]tst_path} +testing_src_path = testing/src/scenario/ +testing_tst_path = testing/tests/ [testenv] basepython = python3 @@ -104,7 +106,9 @@ deps = -e . -e testing commands = - pytest -n auto --ignore={[vars]tst_path}smoke -v --tb native \ + pytest -n auto --ignore={[vars]tst_path}smoke \ + --ignore={[vars]tst_path}benchmark --ignore={[vars]testing_tst_path}benchmark \ + -v --tb native \ -W 'ignore:Harness is deprecated:PendingDeprecationWarning' {posargs} [testenv:coverage] @@ -124,11 +128,30 @@ deps = -e testing commands = mkdir -p .report - coverage run --source={[vars]src_path},testing/src/scenario \ - -m pytest --ignore={[vars]tst_path}smoke -v --tb native {posargs} + coverage run --source={[vars]src_path},{[vars]testing_src_path} \ + -m pytest --ignore={[vars]tst_path}smoke \ + --ignore={[vars]tst_path}benchmark --ignore={[vars]testing_tst_path}benchmark \ + -v --tb native \ + -W 'ignore:Harness is deprecated:PendingDeprecationWarning' {posargs} coverage xml -o .report/coverage.xml coverage report +[testenv:benchmark] +description = Run benchmark tests +passenv = + RUN_REAL_PEBBLE_TESTS + PEBBLE +deps = + PyYAML==6.* + websocket-client==1.* + pytest~=7.2 + pytest-benchmark + typing_extensions~=4.2 + -e . + -e testing +commands = + pytest -v --tb native {[vars]tst_path}benchmark {[vars]testing_tst_path}benchmark {posargs} + [testenv:pebble] description = Run real pebble tests allowlist_externals = pebble