From 711e0bc07f877ee361b8825e62fd5308a80d840b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Sun, 30 May 2021 13:54:51 +0200 Subject: [PATCH 01/10] Add typed metadata and py.typed pep561 file --- setup.py | 4 +++- src/pytest_split/py.typed | 0 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 src/pytest_split/py.typed diff --git a/setup.py b/setup.py index f11cda4..efeb630 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ install_requires=["pytest"], extras_require={"testing": tests_require}, classifiers=[ - "Development Status :: 1 - Planning", + "Development Status :: 4 - Beta" "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python :: 3", @@ -32,6 +32,8 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Framework :: Pytest", + "Typing :: Typed", ], entry_points={"pytest11": ["pytest-split = pytest_split.plugin"]}, + package_data={"pytest_split": ["py.typed"]}, ) diff --git a/src/pytest_split/py.typed b/src/pytest_split/py.typed new file mode 100644 index 0000000..e69de29 From 869700c87b49fad700cc36809bc0b2d0b2b75499 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Sun, 30 May 2021 14:57:24 +0200 Subject: [PATCH 02/10] Remove --store-durations arg and write to pytest-cache by default --- setup.cfg | 1 + setup.py | 3 +- src/pytest_split/__init__.py | 2 +- src/pytest_split/plugin.py | 232 ++++++++++++++++++++--------------- 4 files changed, 133 insertions(+), 105 deletions(-) diff --git a/setup.cfg b/setup.cfg index 1d4803a..c864293 100644 --- a/setup.cfg +++ b/setup.cfg @@ -2,3 +2,4 @@ max-line-length = 88 exclude = tests/* +ignore = ANN101, W503 diff --git a/setup.py b/setup.py index efeb630..caec675 100644 --- a/setup.py +++ b/setup.py @@ -22,8 +22,7 @@ install_requires=["pytest"], extras_require={"testing": tests_require}, classifiers=[ - "Development Status :: 4 - Beta" - "Intended Audience :: Developers", + "Development Status :: 4 - Beta" "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", diff --git a/src/pytest_split/__init__.py b/src/pytest_split/__init__.py index b570882..9fc9f22 100644 --- a/src/pytest_split/__init__.py +++ b/src/pytest_split/__init__.py @@ -1,3 +1,3 @@ -from ._version import version as __version__ +__version__ = '0.1.5' __all__ = ("__version__",) diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index 17cafb4..fbf9ef2 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -1,42 +1,44 @@ import json import os from collections import defaultdict, OrderedDict -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Tuple, Generator +from warnings import warn +import _pytest +import pytest from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser from _pytest.main import Session if TYPE_CHECKING: - from typing import Generator, List - from _pytest.config.argparsing import Parser + from typing import List from _pytest import nodes from _pytest.config import Config -# Ugly hacks for freezegun compatibility: https://github.com/spulec/freezegun/issues/286 +# Ugly hacks for freezegun compatibility: +# https://github.com/spulec/freezegun/issues/286 STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD = 60 * 10 # seconds +CACHE_PATH = ".pytest_cache/v/cache/pytest_split" - +@pytest.hookimpl() def pytest_addoption(parser: Parser) -> None: + """ + Declare plugin options. + """ group = parser.getgroup( "Split tests into groups which execution time is about the same. " "Run first the whole suite with --store-durations to save information " "about test execution times" ) - group.addoption( - "--store-durations", - dest="store_durations", - action="store_true", - help="Store durations into '--durations-path'", - ) group.addoption( "--durations-path", dest="durations_path", help=( - "Path to the file in which durations are (to be) stored, " - "default is .test_durations in the current working directory" + "Path to the file in which durations are (to be) stored. " + f"By default, durations will be written to {CACHE_PATH}" ), - default=os.path.join(os.getcwd(), ".test_durations"), + default=os.path.join(os.getcwd(), CACHE_PATH), ) group.addoption( "--splits", @@ -52,100 +54,126 @@ def pytest_addoption(parser: Parser) -> None: ) -def pytest_collection_modifyitems( - config: Config, items: List[nodes.Item] -) -> Generator[None, None, None]: - splits = config.option.splits - group = config.option.group - store_durations = config.option.store_durations - durations_report_path = config.option.durations_path - - if any((splits, group)): - if not all((splits, group)): - return - if not os.path.isfile(durations_report_path): - return - if store_durations: - # Don't split if we are storing durations - return - total_tests_count = len(items) - if splits and group: - with open(durations_report_path) as f: - stored_durations = OrderedDict(json.load(f)) - - start_idx, end_idx = _calculate_suite_start_and_end_idx( - splits, group, items, stored_durations +@pytest.hookimpl(trylast=True) +def pytest_configure(config: "Config") -> None: + """ + Configure plugin. + """ + if (config.option.splits and not config.option.group) or ( + config.option.group and not config.option.split + ): + warn( + "It looks like you passed an argument to run pytest with pytest-split, " + "but both the `splits` and `group` arguments are required for pytest-split to run" ) + if config.option.splits and config.option.group: + config.pluginmanager.register(PytestSplitPlugin(config), "pytestsplitplugin") + + +class PytestSplitPlugin: + cache_file = "cache/pytest-split" + + def __init__(self, config: "Config") -> None: + """ + Load cache and configure plugin. + """ + self.cached_durations = dict(config.cache.get(self.cache_file, {})) + if not self.cached_durations: + warn( + "No test durations found. Pytest-split will " + "split tests evenly when no durations are found, " + "so you should expect better results in following " + "runs when test timings have been documented." + ) + + def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Item]") -> Generator[None, None, None]: + """ + Instruct Pytest to run the tests we've selected. + + This method is called by Pytest right after Pytest internals finishes + collecting tests. + + See https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L670. + """ + # Load plugin arguments + splits: int = config.option.splits + group: int = config.option.group + durations_report_path: str = config.option.durations_path + + total_tests_count = len(items) + stored_durations = OrderedDict(config.cache.get(self.cache_file, {})) + + start_idx, end_idx = self._calculate_suite_start_and_end_idx(splits, group, items, stored_durations) items[:] = items[start_idx:end_idx] - terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") - terminal_writer = create_terminal_writer(config) - message = terminal_writer.markup( + writer = create_terminal_writer(config) + message = writer.markup( " Running group {}/{} ({}/{} tests)\n".format( group, splits, len(items), total_tests_count ) ) - terminal_reporter.write(message) - - -def pytest_sessionfinish(session: Session) -> None: - if session.config.option.store_durations: - report_path = session.config.option.durations_path - terminal_reporter = session.config.pluginmanager.get_plugin("terminalreporter") - durations = defaultdict(float) - for test_reports in terminal_reporter.stats.values(): - for test_report in test_reports: - if hasattr(test_report, "duration"): - stage = getattr(test_report, "when", "") - duration = test_report.duration - # These ifs be removed after this is solved: - # https://github.com/spulec/freezegun/issues/286 - if duration < 0: - continue - if ( - stage in ("teardown", "setup") - and duration > STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD - ): - # Ignore not legit teardown durations - continue - durations[test_report.nodeid] += test_report.duration - - with open(report_path, "w") as f: - f.write(json.dumps(list(durations.items()), indent=2)) - - terminal_writer = create_terminal_writer(session.config) - message = terminal_writer.markup( - " Stored test durations in {}\n".format(report_path) - ) - terminal_reporter.write(message) - - -def _calculate_suite_start_and_end_idx( - splits: int, group: int, items: List[nodes.Item], stored_durations: OrderedDict -) -> Tuple[int, int]: - item_node_ids = [item.nodeid for item in items] - stored_durations = {k: v for k, v in stored_durations.items() if k in item_node_ids} - avg_duration_per_test = sum(stored_durations.values()) / len(stored_durations) - - durations = OrderedDict() - for node_id in item_node_ids: - durations[node_id] = stored_durations.get(node_id, avg_duration_per_test) - - time_per_group = sum(durations.values()) / splits - start_time = time_per_group * (group - 1) - end_time = time_per_group + start_time - start_idx = end_idx = duration_rolling_sum = 0 - - for idx, duration in enumerate(durations.values()): - duration_rolling_sum += duration - if group != 1 and not start_idx and duration_rolling_sum > start_time: - start_idx = idx - if group == splits: + writer.line(message) + + def pytest_sessionfinish(self, session: "Session") -> None: + if session.config.option.store_durations: + report_path = session.config.option.durations_path + terminal_reporter = session.config.pluginmanager.get_plugin( + "terminalreporter" + ) + durations = defaultdict(float) + for test_reports in terminal_reporter.stats.values(): + for test_report in test_reports: + if hasattr(test_report, "duration"): + stage = getattr(test_report, "when", "") + duration = test_report.duration + # These ifs be removed after this is solved: + # https://github.com/spulec/freezegun/issues/286 + if duration < 0: + continue + if ( + stage in ("teardown", "setup") + and duration > STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD + ): + # Ignore not legit teardown durations + continue + durations[test_report.nodeid] += test_report.duration + + with open(report_path, "w") as f: + f.write(json.dumps(list(durations.items()), indent=2)) + + terminal_writer = create_terminal_writer(session.config) + message = terminal_writer.markup( + " Stored test durations in {}\n".format(report_path) + ) + terminal_reporter.write(message) + + @staticmethod + def _calculate_suite_start_and_end_idx(splits: int, group: int, items: "List[nodes.Item]", stored_durations: OrderedDict) -> Tuple[int, int]: + item_node_ids = [item.nodeid for item in items] + stored_durations = { + k: v for k, v in stored_durations.items() if k in item_node_ids + } + avg_duration_per_test = sum(stored_durations.values()) / len(stored_durations) + + durations = OrderedDict() + for node_id in item_node_ids: + durations[node_id] = stored_durations.get(node_id, avg_duration_per_test) + + time_per_group = sum(durations.values()) / splits + start_time = time_per_group * (group - 1) + end_time = time_per_group + start_time + start_idx = end_idx = duration_rolling_sum = 0 + + for idx, duration in enumerate(durations.values()): + duration_rolling_sum += duration + if group != 1 and not start_idx and duration_rolling_sum > start_time: + start_idx = idx + if group == splits: + break + elif group != splits and not end_idx and duration_rolling_sum > end_time: + end_idx = idx break - elif group != splits and not end_idx and duration_rolling_sum > end_time: - end_idx = idx - break - if not end_idx: - end_idx = len(items) + if not end_idx: + end_idx = len(items) - return start_idx, end_idx + return start_idx, end_idx From cd95f850b4eb175d3a415ec9a3cd49861dfadb26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Mon, 31 May 2021 00:55:20 +0200 Subject: [PATCH 03/10] Add --store-durations back --- src/pytest_split/plugin.py | 40 ++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index bc065ee..a6b8d03 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -27,6 +27,12 @@ def pytest_addoption(parser: "Parser") -> None: "Run first the whole suite with --store-durations to save information " "about test execution times" ) + group.addoption( + "--store-durations", + dest="store_durations", + action="store_true", + help="Store durations into '--durations-path'", + ) group.addoption( "--splits", dest="splits", @@ -65,11 +71,13 @@ def pytest_configure(config: "Config") -> None: "to run. Remove the `groups` argument or add a `splits` argument." ) elif config.option.splits and config.option.group: - # Register plugin to run only if we received a splits and group arg config.pluginmanager.register(PytestSplitPlugin(config), "pytestsplitplugin") + config.pluginmanager.register(PytestSplitCachePlugin(config), "pytestsplitcacheplugin") + elif config.option.store_durations: + config.pluginmanager.register(PytestSplitCachePlugin(config), "pytestsplitcacheplugin") -class PytestSplitPlugin: +class Base: cache_file = "cache/pytest-split" def __init__(self, config: "Config") -> None: @@ -93,7 +101,10 @@ def __init__(self, config: "Config") -> None: "when test timings have been documented." ) - @hookimpl(hookwrapper=True, tryfirst=True) + +class PytestSplitPlugin(Base): + + @hookimpl(tryfirst=True) def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Item]") -> Generator[None, None, None]: """ Instruct Pytest to run the tests we've selected. @@ -107,25 +118,21 @@ def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Ite splits: int = config.option.splits group: int = config.option.group - total_tests_count = len(items) - selected_tests, deselected_tests = self._split_tests(splits, group, items, self.cached_durations) items[:] = selected_tests config.hook.pytest_deselected(items=deselected_tests) - message = self.writer.markup( - " Running group {}/{} ({}/{} tests)\n".format(group, splits, len(items), total_tests_count) - ) + message = self.writer.markup(f"Running group {group}/{splits}\n") + self.writer.line() self.writer.line(message) - yield @staticmethod def _split_tests( - splits: int, - group: int, - items: "List[nodes.Item]", - stored_durations: OrderedDict, + splits: int, + group: int, + items: "List[nodes.Item]", + stored_durations: OrderedDict, ) -> Tuple[int, int]: """ Split tests by runtime. @@ -201,6 +208,9 @@ def _split_tests( return selected, deselected + +class PytestSplitCachePlugin(Base): + def pytest_sessionfinish(self) -> None: """ Write test runtimes to cache. @@ -219,8 +229,8 @@ def pytest_sessionfinish(self) -> None: if test_report.duration < 0: continue if ( - getattr(test_report, "when", "") in ("teardown", "setup") - and test_report.duration > STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD + getattr(test_report, "when", "") in ("teardown", "setup") + and test_report.duration > STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD ): # Ignore not legit teardown durations continue From 5f139b79fc2afc09883b77593756b28205d74567 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Mon, 31 May 2021 16:59:02 +0200 Subject: [PATCH 04/10] Only save durations if --store-durations is passed --- src/pytest_split/plugin.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index 6d95a9e..151a491 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -72,8 +72,7 @@ def pytest_configure(config: "Config") -> None: ) elif config.option.splits and config.option.group: config.pluginmanager.register(PytestSplitPlugin(config), "pytestsplitplugin") - config.pluginmanager.register(PytestSplitCachePlugin(config), "pytestsplitcacheplugin") - elif config.option.store_durations: + if config.option.store_durations: config.pluginmanager.register(PytestSplitCachePlugin(config), "pytestsplitcacheplugin") From aee5a160c3c1935bb3c229c7ee78c4995c16bda8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Mon, 31 May 2021 17:08:49 +0200 Subject: [PATCH 05/10] Make .test_durations the default output, and remove .pytest_cache writes --- src/pytest_split/plugin.py | 46 ++++++++++++++++++-------------------- tests/test_plugin.py | 8 +++---- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index 151a491..0f0e9fc 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -1,4 +1,5 @@ import json +import os from collections import OrderedDict from typing import TYPE_CHECKING from warnings import warn @@ -33,6 +34,15 @@ def pytest_addoption(parser: "Parser") -> None: action="store_true", help="Store durations into '--durations-path'", ) + group.addoption( + "--durations-path", + dest="durations_path", + help=( + "Path to the file in which durations are (to be) stored, " + "default is .test_durations in the current working directory" + ), + default=os.path.join(os.getcwd(), ".test_durations"), + ) group.addoption( "--splits", dest="splits", @@ -45,15 +55,6 @@ def pytest_addoption(parser: "Parser") -> None: type=int, help="The group of tests that should be executed (first one is 1)", ) - group.addoption( - "--durations-path", - dest="durations_path", - help=( - "Path to the file in which durations are (to be) stored. " - f"By default, durations will be written to {CACHE_PATH}" - ), - default="", - ) def pytest_configure(config: "Config") -> None: @@ -72,6 +73,7 @@ def pytest_configure(config: "Config") -> None: ) elif config.option.splits and config.option.group: config.pluginmanager.register(PytestSplitPlugin(config), "pytestsplitplugin") + if config.option.store_durations: config.pluginmanager.register(PytestSplitCachePlugin(config), "pytestsplitcacheplugin") @@ -84,13 +86,13 @@ def __init__(self, config: "Config") -> None: Load cache and configure plugin. """ self.config = config - if config.option.durations_path: - with open(config.option.durations_path, "r") as f: - self.cached_durations = json.loads(f.read()) - else: - self.cached_durations = dict(config.cache.get(self.cache_file, {})) - self.writer = create_terminal_writer(self.config) + + self.writer.line("") + self.writer.line(f"Reading durations from {config.option.durations_path}") + with open(config.option.durations_path, "r") as f: + self.cached_durations = json.loads(f.read()) + if not self.cached_durations: self.writer.line() self.writer.line( @@ -242,13 +244,9 @@ def pytest_sessionfinish(self) -> None: for k, v in test_durations.items(): self.cached_durations[k] = v - # Save to cache - self.config.cache.set(self.cache_file, self.cached_durations) + # Save durations + with open(self.config.option.durations_path, "w") as f: + f.write(json.dumps(self.cached_durations)) - # Save to custom file if needed - if self.config.option.durations_path: - with open(self.config.option.durations_path, "w") as f: - f.write(json.dumps(self.cached_durations)) - - message = self.writer.markup(" Stored test durations in {}\n".format(self.config.option.durations_path)) - self.writer.line(message) + message = self.writer.markup(" Stored test durations in {}\n".format(self.config.option.durations_path)) + self.writer.line(message) diff --git a/tests/test_plugin.py b/tests/test_plugin.py index be51272..e382b08 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -106,18 +106,18 @@ def test_it_does_not_split_with_invalid_args(self, example_suite, durations_path durations = {"test_it_does_not_split_with_invalid_args.py::test_1": 1} with open(durations_path, "w") as f: - json.dump(durations, f) + f.write(json.dumps(durations)) # Plugin doesn't run when splits is passed but group is missing - result = example_suite.inline_run("--splits", "2", "--durations-path", durations_path) # no --group + result = example_suite.inline_run("--splits", "2") result.assertoutcome(passed=10) # Plugin doesn't run when group is passed but splits is missing - result = example_suite.inline_run("--group", "2", "--durations-path", durations_path) # no --splits + result = example_suite.inline_run("--group", "2") result.assertoutcome(passed=10) # Runs if they both are - result = example_suite.inline_run("--splits", "2", "--group", "1") + result = example_suite.inline_run("--splits", "2", "--group", "1", "--durations-path", durations_path) result.assertoutcome(passed=6) def test_it_adapts_splits_based_on_new_and_deleted_tests(self, example_suite, durations_path): From 552cfa072d3d07040ecdac3090ccd7f4fdd6509d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Mon, 31 May 2021 18:29:23 +0200 Subject: [PATCH 06/10] Make test splits respect original ordering --- src/pytest_split/plugin.py | 64 ++++++---------- tests/test_plugin.py | 153 +++++++++++++++++++++++-------------- 2 files changed, 120 insertions(+), 97 deletions(-) diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index 0f0e9fc..fdbb74d 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -1,6 +1,5 @@ import json import os -from collections import OrderedDict from typing import TYPE_CHECKING from warnings import warn @@ -88,22 +87,25 @@ def __init__(self, config: "Config") -> None: self.config = config self.writer = create_terminal_writer(self.config) - self.writer.line("") - self.writer.line(f"Reading durations from {config.option.durations_path}") - with open(config.option.durations_path, "r") as f: - self.cached_durations = json.loads(f.read()) + try: + with open(config.option.durations_path, "r") as f: + self.cached_durations = json.loads(f.read()) + except FileNotFoundError: + self.cached_durations = {} + +class PytestSplitPlugin(Base): + def __init__(self, config: "Config"): + super().__init__(config) if not self.cached_durations: - self.writer.line() - self.writer.line( - "No test durations found. Pytest-split will " + message = self.writer.markup( + "\nNo test durations found. Pytest-split will " "split tests evenly when no durations are found. " "\nYou can expect better results in consequent runs, " - "when test timings have been documented." + "when test timings have been documented.\n" ) + self.writer.line(message) - -class PytestSplitPlugin(Base): @hookimpl(tryfirst=True) def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Item]") -> None: """ @@ -123,8 +125,7 @@ def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Ite items[:] = selected_tests # type: ignore config.hook.pytest_deselected(items=deselected_tests) - message = self.writer.markup(f"Running group {group}/{splits}\n") - self.writer.line() + message = self.writer.markup(f"\n\nRunning group {group}/{splits}\n") self.writer.line(message) return None @@ -133,7 +134,7 @@ def _split_tests( splits: int, group: int, items: "List[nodes.Item]", - stored_durations: OrderedDict, + stored_durations: dict, ) -> "Tuple[list, list]": """ Split tests by runtime. @@ -161,46 +162,29 @@ def _split_tests( The first list represents the tests we want to run, while the other represents the tests we want to deselect. """ - # Filter down stored durations to only relevant tests durations - - # this way the average duration per test is calculated on relevant tests only - test_names = [item.nodeid for item in items] - durations = {k: v for k, v in stored_durations.items() if k in test_names} + # Filtering down durations to relevant ones ensures the avg isn't skewed by irrelevant data + test_ids = [item.nodeid for item in items] + durations = {k: v for k, v in stored_durations.items() if k in test_ids} - # Get the average duration for each test not in the cache if durations: avg_duration_per_test = sum(durations.values()) / len(durations) else: - # If there are no durations, we give every test the same assumed arbitrary value + # If there are no durations, give every test the same arbitrary value avg_duration_per_test = 1 - # Create a dict of test-name: runtime tests_and_durations = {item: durations.get(item.nodeid, avg_duration_per_test) for item in items} - - # Set the threshold runtime value per group time_per_group = sum(tests_and_durations.values()) / splits - - # Order the dict so the slowest tests appear first - sorted_tests_and_durations = OrderedDict(sorted(tests_and_durations.items(), key=lambda x: x[1], reverse=True)) - selected, deselected = [], [] - # Finally, we split tests equally between groups for _group in range(1, splits + 1): group_tests, group_runtime = [], 0 - # Add slow tests up until *one more test would cross the threshold* - for item in OrderedDict(sorted_tests_and_durations): - if group_runtime + sorted_tests_and_durations[item] > time_per_group: - break - group_tests.append(item) - group_runtime += sorted_tests_and_durations.pop(item) - - # Add fast tests until *we do cross the threshold* - for item in OrderedDict(sorted(sorted_tests_and_durations.items(), key=lambda x: x[1], reverse=False)): + for item in dict(tests_and_durations): if group_runtime > time_per_group: break + group_tests.append(item) - group_runtime += sorted_tests_and_durations.pop(item) + group_runtime += tests_and_durations.pop(item) if _group == group: selected = group_tests @@ -246,7 +230,7 @@ def pytest_sessionfinish(self) -> None: # Save durations with open(self.config.option.durations_path, "w") as f: - f.write(json.dumps(self.cached_durations)) + json.dump(self.cached_durations, f) - message = self.writer.markup(" Stored test durations in {}\n".format(self.config.option.durations_path)) + message = self.writer.markup("\n\nStored test durations in {}\n".format(self.config.option.durations_path)) self.writer.line(message) diff --git a/tests/test_plugin.py b/tests/test_plugin.py index e382b08..654b90f 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -1,5 +1,6 @@ import itertools import json +import os import pytest @@ -30,6 +31,34 @@ def durations_path(tmpdir): return str(tmpdir.join(".durations")) +class TestStoreDurations: + def test_it_stores(self, example_suite, durations_path): + example_suite.runpytest("--store-durations", "--durations-path", durations_path) + + with open(durations_path) as f: + durations = json.load(f) + + assert list(durations.keys()) == [ + "test_it_stores.py::test_1", + "test_it_stores.py::test_2", + "test_it_stores.py::test_3", + "test_it_stores.py::test_4", + "test_it_stores.py::test_5", + "test_it_stores.py::test_6", + "test_it_stores.py::test_7", + "test_it_stores.py::test_8", + "test_it_stores.py::test_9", + "test_it_stores.py::test_10", + ] + + for duration in durations.values(): + assert isinstance(duration, float) + + def test_it_does_not_store_without_flag(self, example_suite, durations_path): + example_suite.runpytest("--durations-path", durations_path) + assert not os.path.exists(durations_path) + + class TestSplitToSuites: @pytest.mark.parametrize( "param_idx, splits, expected_tests_per_group", @@ -37,39 +66,53 @@ class TestSplitToSuites: ( 0, 1, - [["test_6", "test_7", "test_8", "test_9", "test_10", "test_1", "test_2", "test_3", "test_4", "test_5"]], - ), - ( - 1, - 2, - [ - ["test_6", "test_7", "test_8", "test_1", "test_2"], # 8 seconds - ["test_9", "test_10", "test_3", "test_4", "test_5"], # 7 seconds - ], - ), - ( - 2, - 3, [ - ["test_6", "test_7", "test_1", "test_2"], # 6 seconds - ["test_8", "test_9", "test_3", "test_4"], # 6 seconds - ["test_10", "test_5"], # 3 seconds - ], - ), - ( - 3, - 4, - [ - ["test_6", "test_1", "test_2"], # 4 seconds - ["test_7", "test_3", "test_4"], # 4 seconds - ["test_8", "test_5", "test_9"], # 5 seconds - ["test_10"], # 2 second + [ + "test_1", + "test_2", + "test_3", + "test_4", + "test_5", + "test_6", + "test_7", + "test_8", + "test_9", + "test_10", + ] ], ), + # ( + # 1, + # 2, + # [ + # ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6"], + # ["test_7", "test_8", "test_9", "test_10"], + # ], + # ), + # ( + # 2, + # 3, + # [ + # ["test_1", "test_2", "test_3", "test_4", "test_5"], + # ["test_6", "test_7"], + # ["test_8", "test_9", "test_10"], + # ], + # ), + # ( + # 3, + # 4, + # [ + # ["test_1", "test_2", "test_3"], + # ["test_4", "test_5", "test_6"], + # ["test_7", "test_8"], + # ["test_9", "test_10"], + # ], + # ), ], ) def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_suite, durations_path): assert len(list(itertools.chain(*expected_tests_per_group))) == 10 + durations = { "test_it_splits{}/test_it_splits.py::test_1".format(param_idx): 1, "test_it_splits{}/test_it_splits.py::test_2".format(param_idx): 1, @@ -83,41 +126,39 @@ def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_su "test_it_splits{}/test_it_splits.py::test_10".format(param_idx): 2, } - results = [] - for group in range(splits): - with open(durations_path, "w") as f: - f.write(json.dumps(durations)) - results.append( - example_suite.inline_run( - "--splits", - str(splits), - "--group", - str(group + 1), - "--durations-path", - durations_path, - ) + with open(durations_path, "w") as f: + json.dump(durations, f) + + results = [ + example_suite.inline_run( + "--splits", + str(splits), + "--group", + str(group + 1), + "--durations-path", + durations_path, ) - + for group in range(splits) + ] for result, expected_tests in zip(results, expected_tests_per_group): result.assertoutcome(passed=len(expected_tests)) assert _passed_test_names(result) == expected_tests def test_it_does_not_split_with_invalid_args(self, example_suite, durations_path): durations = {"test_it_does_not_split_with_invalid_args.py::test_1": 1} - with open(durations_path, "w") as f: - f.write(json.dumps(durations)) + json.dump(durations, f) # Plugin doesn't run when splits is passed but group is missing - result = example_suite.inline_run("--splits", "2") + result = example_suite.inline_run("--splits", "2", "--durations-path", durations_path) # no --group result.assertoutcome(passed=10) # Plugin doesn't run when group is passed but splits is missing - result = example_suite.inline_run("--group", "2") + result = example_suite.inline_run("--group", "2", "--durations-path", durations_path) # no --splits result.assertoutcome(passed=10) # Runs if they both are - result = example_suite.inline_run("--splits", "2", "--group", "1", "--durations-path", durations_path) + result = example_suite.inline_run("--splits", "2", "--group", "1") result.assertoutcome(passed=6) def test_it_adapts_splits_based_on_new_and_deleted_tests(self, example_suite, durations_path): @@ -135,25 +176,23 @@ def test_it_adapts_splits_based_on_new_and_deleted_tests(self, example_suite, du } with open(durations_path, "w") as f: - f.write(json.dumps(durations)) + json.dump(durations, f) result = example_suite.inline_run("--splits", "3", "--group", "1", "--durations-path", durations_path) result.assertoutcome(passed=4) - assert _passed_test_names(result) == ["test_5", "test_6", "test_10", "test_1"] - - with open(durations_path, "w") as f: - f.write(json.dumps(durations)) + assert _passed_test_names(result) == ["test_1", "test_2", "test_3", "test_4"] result = example_suite.inline_run("--splits", "3", "--group", "2", "--durations-path", durations_path) - result.assertoutcome(passed=4) - assert _passed_test_names(result) == ["test_2", "test_3", "test_4", "test_7"] - - with open(durations_path, "w") as f: - f.write(json.dumps(durations)) + result.assertoutcome(passed=3) + assert _passed_test_names(result) == ["test_5", "test_6", "test_7"] result = example_suite.inline_run("--splits", "3", "--group", "3", "--durations-path", durations_path) - result.assertoutcome(passed=2) - assert _passed_test_names(result) == ["test_8", "test_9"] + result.assertoutcome(passed=3) + assert _passed_test_names(result) == [ + "test_8", + "test_9", + "test_10", + ] def _passed_test_names(result): From e9206aaa06c9b464ae9b60941e2e3009ec58c2ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Mon, 31 May 2021 18:29:55 +0200 Subject: [PATCH 07/10] Cleanup --- .github/workflows/test.yml | 2 +- src/pytest_split/plugin.py | 24 ++----------- tests/test_plugin.py | 70 ++++++++++++++++---------------------- 3 files changed, 33 insertions(+), 63 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1fee0ee..9cd8d97 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: [ "3.6", "3.7", "3.8", "3.9", ] # "3.10.0-beta.1" + python-version: [ "3.6", "3.7", "3.8", "3.9" ] pytest-version: [ "4", "5", "6" ] steps: - name: Check out repository diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index fdbb74d..d8b6ea9 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -78,8 +78,6 @@ def pytest_configure(config: "Config") -> None: class Base: - cache_file = "cache/pytest-split" - def __init__(self, config: "Config") -> None: """ Load cache and configure plugin. @@ -116,17 +114,15 @@ def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Ite See https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L670. """ - # Load plugin arguments splits: int = config.option.splits group: int = config.option.group selected_tests, deselected_tests = self._split_tests(splits, group, items, self.cached_durations) - items[:] = selected_tests # type: ignore + items[:] = selected_tests config.hook.pytest_deselected(items=deselected_tests) - message = self.writer.markup(f"\n\nRunning group {group}/{splits}\n") - self.writer.line(message) + self.writer.line(self.writer.markup(f"\n\nRunning group {group}/{splits}\n")) return None @staticmethod @@ -137,21 +133,7 @@ def _split_tests( stored_durations: dict, ) -> "Tuple[list, list]": """ - Split tests by runtime. - - The splitting logic is very simple. We find out what our threshold runtime - is per group, then start adding tests (slowest tests ordered first) until we - get close to the threshold runtime per group. We then reverse the ordering and - add the fastest tests available until we go just *beyond* the threshold. - - The choice we're making is to overload the first groups a little bit. The reason - this reasonable is that ci-providers like GHA will usually spin up the first - groups first, meaning that if you had a perfect test split, the first groups - would still finish first. The *overloading* is also minimal, so shouldn't - matter in most cases. - - After assigning tests to each group we select the group we're in - and deselect all remaining tests. + Split tests into groups by runtime. :param splits: How many groups we're splitting in. :param group: Which group this run represents. diff --git a/tests/test_plugin.py b/tests/test_plugin.py index 654b90f..5840691 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -9,20 +9,7 @@ @pytest.fixture def example_suite(testdir): - testdir.makepyfile( - """ - def test_1(): pass - def test_2(): pass - def test_3(): pass - def test_4(): pass - def test_5(): pass - def test_6(): pass - def test_7(): pass - def test_8(): pass - def test_9(): pass - def test_10(): pass - """ - ) + testdir.makepyfile("".join(f"def test_{num}(): pass\n" for num in range(1, 11))) yield testdir @@ -81,33 +68,33 @@ class TestSplitToSuites: ] ], ), - # ( - # 1, - # 2, - # [ - # ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6"], - # ["test_7", "test_8", "test_9", "test_10"], - # ], - # ), - # ( - # 2, - # 3, - # [ - # ["test_1", "test_2", "test_3", "test_4", "test_5"], - # ["test_6", "test_7"], - # ["test_8", "test_9", "test_10"], - # ], - # ), - # ( - # 3, - # 4, - # [ - # ["test_1", "test_2", "test_3"], - # ["test_4", "test_5", "test_6"], - # ["test_7", "test_8"], - # ["test_9", "test_10"], - # ], - # ), + ( + 1, + 2, + [ + ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6", "test_7"], + ["test_8", "test_9", "test_10"], + ], + ), + ( + 2, + 3, + [ + ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6"], + ["test_7", "test_8", "test_9"], + ["test_10"], + ], + ), + ( + 3, + 4, + [ + ["test_1", "test_2", "test_3", "test_4"], + ["test_5", "test_6", "test_7"], + ["test_8", "test_9"], + ["test_10"], + ], + ), ], ) def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_suite, durations_path): @@ -140,6 +127,7 @@ def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_su ) for group in range(splits) ] + for result, expected_tests in zip(results, expected_tests_per_group): result.assertoutcome(passed=len(expected_tests)) assert _passed_test_names(result) == expected_tests From 41fe7757f96decc62b0079e0f78b7a66fde82e10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Tue, 8 Jun 2021 13:27:41 +0200 Subject: [PATCH 08/10] Clean up messages and add support for loading the old list format --- .test_durations | 1 + src/pytest_split/plugin.py | 56 +++++++++++++------------------------- tests/test_plugin.py | 34 +++-------------------- 3 files changed, 24 insertions(+), 67 deletions(-) create mode 100644 .test_durations diff --git a/.test_durations b/.test_durations new file mode 100644 index 0000000..95727ff --- /dev/null +++ b/.test_durations @@ -0,0 +1 @@ +{"tests/test_plugin.py::TestStoreDurations::test_it_stores": 0.08707766099999997, "tests/test_plugin.py::TestStoreDurations::test_it_does_not_store_without_flag": 0.09066593100000003, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[0-1-expected_tests_per_group0]": 0.08363778099999997, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[1-2-expected_tests_per_group1]": 0.12921703699999998, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[2-3-expected_tests_per_group2]": 0.17863698999999988, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[3-4-expected_tests_per_group3]": 0.22171246299999992, "tests/test_plugin.py::TestSplitToSuites::test_it_does_not_split_with_invalid_args": 0.15940266000000003, "tests/test_plugin.py::TestSplitToSuites::test_it_adapts_splits_based_on_new_and_deleted_tests": 0.17758088800000005, "tests/test_plugin.py::TestRaisesUsageErrors::test_returns_nonzero_when_group_but_not_splits": 0.04033764599999978} diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index bd27bd5..d7caa4d 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -1,6 +1,5 @@ import json import os -from collections import namedtuple from typing import TYPE_CHECKING import pytest @@ -17,9 +16,6 @@ # Ugly hack for freezegun compatibility: https://github.com/spulec/freezegun/issues/286 STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD = 60 * 10 # seconds -TestGroup = namedtuple("TestGroup", "index, num_tests") -TestSuite = namedtuple("TestSuite", "splits, num_tests") - def pytest_addoption(parser: "Parser") -> None: """ @@ -67,7 +63,7 @@ def pytest_cmdline_main(config: "Config") -> "Optional[Union[int, ExitCode]]": splits = config.getoption("splits") if splits is None and group is None: - return 0 + return None if splits and group is None: raise pytest.UsageError("argument `--group` is required") @@ -98,7 +94,9 @@ def pytest_configure(config: "Config") -> None: class Base: def __init__(self, config: "Config") -> None: """ - Load cache and configure plugin. + Load durations and set up a terminal writer. + + This logic is shared for both the split- and cache plugin. """ self.config = config self.writer = create_terminal_writer(self.config) @@ -109,47 +107,32 @@ def __init__(self, config: "Config") -> None: except FileNotFoundError: self.cached_durations = {} + # This code provides backwards compatibility after we switched + # from saving durations in a list-of-lists to a dict format + # Remove this when bumping to v1 + if isinstance(self.cached_durations, list): + self.cached_durations = {test_name: duration for test_name, duration in self.cached_durations} + class PytestSplitPlugin(Base): def __init__(self, config: "Config"): super().__init__(config) - self._suite: TestSuite - self._group: TestGroup + self._messages: "List[str]" = [] + if not self.cached_durations: message = self.writer.markup( - "\nNo test durations found. Pytest-split will " + "\n[pytest-split] No test durations found. Pytest-split will " "split tests evenly when no durations are found. " - "\nYou can expect better results in consequent runs, " + "\n[pytest-split] You can expect better results in consequent runs, " "when test timings have been documented.\n" ) self.writer.line(message) - def pytest_report_collectionfinish(self, config: "Config") -> "List[str]": - lines = [] - if self._messages: - lines += self._messages - - if hasattr(self, "_suite"): - lines.append( - f"Running group {self._group.index}/{self._suite.splits}" - f" ({self._group.num_tests}/{self._suite.num_tests}) tests" - ) - - prefix = "[pytest-split]" - lines = [f"{prefix} {m}" for m in lines] - - return lines - @hookimpl(tryfirst=True) def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Item]") -> None: """ - Instruct Pytest to run the tests we've selected. - - This method is called by Pytest right after Pytest internals finishes - collecting tests. - - See https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L670. + Collect and select the tests we want to run, and deselect the rest. """ splits: int = config.option.splits group: int = config.option.group @@ -159,10 +142,7 @@ def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Ite items[:] = selected_tests config.hook.pytest_deselected(items=deselected_tests) - self._suite = TestSuite(splits, len(items)) - self._group = TestGroup(group, end_idx - start_idx) - - self.writer.line(self.writer.markup(f"\n\nRunning group {group}/{splits}\n")) + self.writer.line(self.writer.markup(f"\n\n[pytest-split] Running group {group}/{splits}\n")) return None @staticmethod @@ -256,5 +236,7 @@ def pytest_sessionfinish(self) -> None: with open(self.config.option.durations_path, "w") as f: json.dump(self.cached_durations, f) - message = self.writer.markup("\n\nStored test durations in {}\n".format(self.config.option.durations_path)) + message = self.writer.markup( + "\n\n[pytest-split] Stored test durations in {}".format(self.config.option.durations_path) + ) self.writer.line(message) diff --git a/tests/test_plugin.py b/tests/test_plugin.py index dc1c1f2..3ba6ac1 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -3,6 +3,7 @@ import os import pytest +from _pytest.config import ExitCode pytest_plugins = ["pytester"] @@ -139,11 +140,11 @@ def test_it_does_not_split_with_invalid_args(self, example_suite, durations_path # Plugin doesn't run when splits is passed but group is missing result = example_suite.inline_run("--splits", "2", "--durations-path", durations_path) # no --group - result.assertoutcome(passed=10) + assert result.ret == ExitCode.USAGE_ERROR # Plugin doesn't run when group is passed but splits is missing result = example_suite.inline_run("--group", "2", "--durations-path", durations_path) # no --splits - result.assertoutcome(passed=10) + assert result.ret == ExitCode.USAGE_ERROR # Runs if they both are result = example_suite.inline_run("--splits", "2", "--group", "1") @@ -221,14 +222,6 @@ def test_returns_nonzero_when_splits_below_one(self, example_suite, capsys): class TestHasExpectedOutput: - def test_does_not_print_splitting_summary_when_durations_missing(self, example_suite, capsys): - result = example_suite.inline_run("--splits", "1", "--group", "1") - assert result.ret == 0 - - outerr = capsys.readouterr() - assert "[pytest-split] Not splitting tests because the durations_report is missing" in outerr.out - assert "[pytest-split] Running group" not in outerr.out - def test_prints_splitting_summary_when_durations_present(self, example_suite, capsys, durations_path): test_name = "test_prints_splitting_summary_when_durations_present" with open(durations_path, "w") as f: @@ -237,26 +230,7 @@ def test_prints_splitting_summary_when_durations_present(self, example_suite, ca assert result.ret == 0 outerr = capsys.readouterr() - assert "[pytest-split] Running group 1/1 (10/10) tests" in outerr.out - - def test_prints_splitting_summary_when_storing_durations(self, example_suite, capsys, durations_path): - test_name = "test_prints_splitting_summary_when_storing_durations" - with open(durations_path, "w") as f: - json.dump([[f"{test_name}0/{test_name}.py::test_1", 0.5]], f) - - result = example_suite.inline_run( - "--splits", - "1", - "--group", - "1", - "--durations-path", - durations_path, - "--store-durations", - ) - assert result.ret == 0 - - outerr = capsys.readouterr() - assert "[pytest-split] Not splitting tests because we are storing durations" in outerr.out + assert "[pytest-split] Running group 1/1" in outerr.out def test_does_not_print_splitting_summary_when_no_pytest_split_arguments(self, example_suite, capsys): result = example_suite.inline_run() From 0f7866884e6ca9cc2d57e3d83b1bb39f6cff8075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Tue, 8 Jun 2021 13:30:25 +0200 Subject: [PATCH 09/10] Run on pytest 5 and 6 - 4 is no compatible --- .github/workflows/test.yml | 2 +- .test_durations | 1 - src/pytest_split/plugin.py | 4 +++- tests/test_plugin.py | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 .test_durations diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9cd8d97..5432f4d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -33,7 +33,7 @@ jobs: fail-fast: false matrix: python-version: [ "3.6", "3.7", "3.8", "3.9" ] - pytest-version: [ "4", "5", "6" ] + pytest-version: [ "5", "6" ] steps: - name: Check out repository uses: actions/checkout@v2 diff --git a/.test_durations b/.test_durations deleted file mode 100644 index 95727ff..0000000 --- a/.test_durations +++ /dev/null @@ -1 +0,0 @@ -{"tests/test_plugin.py::TestStoreDurations::test_it_stores": 0.08707766099999997, "tests/test_plugin.py::TestStoreDurations::test_it_does_not_store_without_flag": 0.09066593100000003, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[0-1-expected_tests_per_group0]": 0.08363778099999997, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[1-2-expected_tests_per_group1]": 0.12921703699999998, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[2-3-expected_tests_per_group2]": 0.17863698999999988, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[3-4-expected_tests_per_group3]": 0.22171246299999992, "tests/test_plugin.py::TestSplitToSuites::test_it_does_not_split_with_invalid_args": 0.15940266000000003, "tests/test_plugin.py::TestSplitToSuites::test_it_adapts_splits_based_on_new_and_deleted_tests": 0.17758088800000005, "tests/test_plugin.py::TestRaisesUsageErrors::test_returns_nonzero_when_group_but_not_splits": 0.04033764599999978} diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index d7caa4d..0d129ef 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -10,7 +10,9 @@ from typing import List, Tuple, Optional, Union from _pytest import nodes - from _pytest.config import Config, ExitCode + + from _pytest.main import ExitCode + from _pytest.config import Config from _pytest.config.argparsing import Parser # Ugly hack for freezegun compatibility: https://github.com/spulec/freezegun/issues/286 diff --git a/tests/test_plugin.py b/tests/test_plugin.py index 3ba6ac1..ae870e3 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -3,7 +3,7 @@ import os import pytest -from _pytest.config import ExitCode +from _pytest.main import ExitCode pytest_plugins = ["pytester"] From 604948d85fdc6690947f95bdec7bb074f4a99ae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sondre=20Lilleb=C3=B8=20Gundersen?= Date: Tue, 8 Jun 2021 14:38:26 +0200 Subject: [PATCH 10/10] Add the old format into 'test_it_splits' to make sure we're reading both correctly --- tests/test_plugin.py | 62 ++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/tests/test_plugin.py b/tests/test_plugin.py index ae870e3..005fc4f 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -101,37 +101,37 @@ class TestSplitToSuites: def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_suite, durations_path): assert len(list(itertools.chain(*expected_tests_per_group))) == 10 - durations = { - "test_it_splits{}/test_it_splits.py::test_1".format(param_idx): 1, - "test_it_splits{}/test_it_splits.py::test_2".format(param_idx): 1, - "test_it_splits{}/test_it_splits.py::test_3".format(param_idx): 1, - "test_it_splits{}/test_it_splits.py::test_4".format(param_idx): 1, - "test_it_splits{}/test_it_splits.py::test_5".format(param_idx): 1, - "test_it_splits{}/test_it_splits.py::test_6".format(param_idx): 2, - "test_it_splits{}/test_it_splits.py::test_7".format(param_idx): 2, - "test_it_splits{}/test_it_splits.py::test_8".format(param_idx): 2, - "test_it_splits{}/test_it_splits.py::test_9".format(param_idx): 2, - "test_it_splits{}/test_it_splits.py::test_10".format(param_idx): 2, - } - - with open(durations_path, "w") as f: - json.dump(durations, f) - - results = [ - example_suite.inline_run( - "--splits", - str(splits), - "--group", - str(group + 1), - "--durations-path", - durations_path, - ) - for group in range(splits) - ] - - for result, expected_tests in zip(results, expected_tests_per_group): - result.assertoutcome(passed=len(expected_tests)) - assert _passed_test_names(result) == expected_tests + for durations in [ + # Legacy format - can be removed in v1 + [ + *[[f"test_it_splits{param_idx}/test_it_splits.py::test_{num}", 1] for num in range(1, 6)], + *[[f"test_it_splits{param_idx}/test_it_splits.py::test_{num}", 2] for num in range(6, 11)], + ], + # Current format + { + **{f"test_it_splits{param_idx}/test_it_splits.py::test_{num}": 1 for num in range(1, 6)}, + **{f"test_it_splits{param_idx}/test_it_splits.py::test_{num}": 2 for num in range(6, 11)}, + }, + ]: + + with open(durations_path, "w") as f: + json.dump(durations, f) + + results = [ + example_suite.inline_run( + "--splits", + str(splits), + "--group", + str(group + 1), + "--durations-path", + durations_path, + ) + for group in range(splits) + ] + + for result, expected_tests in zip(results, expected_tests_per_group): + result.assertoutcome(passed=len(expected_tests)) + assert _passed_test_names(result) == expected_tests def test_it_does_not_split_with_invalid_args(self, example_suite, durations_path): durations = {"test_it_does_not_split_with_invalid_args.py::test_1": 1}