diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1fee0ee..9cd8d97 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: [ "3.6", "3.7", "3.8", "3.9", ] # "3.10.0-beta.1" + python-version: [ "3.6", "3.7", "3.8", "3.9" ] pytest-version: [ "4", "5", "6" ] steps: - name: Check out repository diff --git a/src/pytest_split/plugin.py b/src/pytest_split/plugin.py index fdbb74d..d8b6ea9 100644 --- a/src/pytest_split/plugin.py +++ b/src/pytest_split/plugin.py @@ -78,8 +78,6 @@ def pytest_configure(config: "Config") -> None: class Base: - cache_file = "cache/pytest-split" - def __init__(self, config: "Config") -> None: """ Load cache and configure plugin. @@ -116,17 +114,15 @@ def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Ite See https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L670. """ - # Load plugin arguments splits: int = config.option.splits group: int = config.option.group selected_tests, deselected_tests = self._split_tests(splits, group, items, self.cached_durations) - items[:] = selected_tests # type: ignore + items[:] = selected_tests config.hook.pytest_deselected(items=deselected_tests) - message = self.writer.markup(f"\n\nRunning group {group}/{splits}\n") - self.writer.line(message) + self.writer.line(self.writer.markup(f"\n\nRunning group {group}/{splits}\n")) return None @staticmethod @@ -137,21 +133,7 @@ def _split_tests( stored_durations: dict, ) -> "Tuple[list, list]": """ - Split tests by runtime. - - The splitting logic is very simple. We find out what our threshold runtime - is per group, then start adding tests (slowest tests ordered first) until we - get close to the threshold runtime per group. We then reverse the ordering and - add the fastest tests available until we go just *beyond* the threshold. - - The choice we're making is to overload the first groups a little bit. The reason - this reasonable is that ci-providers like GHA will usually spin up the first - groups first, meaning that if you had a perfect test split, the first groups - would still finish first. The *overloading* is also minimal, so shouldn't - matter in most cases. - - After assigning tests to each group we select the group we're in - and deselect all remaining tests. + Split tests into groups by runtime. :param splits: How many groups we're splitting in. :param group: Which group this run represents. diff --git a/tests/test_plugin.py b/tests/test_plugin.py index 654b90f..5840691 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -9,20 +9,7 @@ @pytest.fixture def example_suite(testdir): - testdir.makepyfile( - """ - def test_1(): pass - def test_2(): pass - def test_3(): pass - def test_4(): pass - def test_5(): pass - def test_6(): pass - def test_7(): pass - def test_8(): pass - def test_9(): pass - def test_10(): pass - """ - ) + testdir.makepyfile("".join(f"def test_{num}(): pass\n" for num in range(1, 11))) yield testdir @@ -81,33 +68,33 @@ class TestSplitToSuites: ] ], ), - # ( - # 1, - # 2, - # [ - # ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6"], - # ["test_7", "test_8", "test_9", "test_10"], - # ], - # ), - # ( - # 2, - # 3, - # [ - # ["test_1", "test_2", "test_3", "test_4", "test_5"], - # ["test_6", "test_7"], - # ["test_8", "test_9", "test_10"], - # ], - # ), - # ( - # 3, - # 4, - # [ - # ["test_1", "test_2", "test_3"], - # ["test_4", "test_5", "test_6"], - # ["test_7", "test_8"], - # ["test_9", "test_10"], - # ], - # ), + ( + 1, + 2, + [ + ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6", "test_7"], + ["test_8", "test_9", "test_10"], + ], + ), + ( + 2, + 3, + [ + ["test_1", "test_2", "test_3", "test_4", "test_5", "test_6"], + ["test_7", "test_8", "test_9"], + ["test_10"], + ], + ), + ( + 3, + 4, + [ + ["test_1", "test_2", "test_3", "test_4"], + ["test_5", "test_6", "test_7"], + ["test_8", "test_9"], + ["test_10"], + ], + ), ], ) def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_suite, durations_path): @@ -140,6 +127,7 @@ def test_it_splits(self, param_idx, splits, expected_tests_per_group, example_su ) for group in range(splits) ] + for result, expected_tests in zip(results, expected_tests_per_group): result.assertoutcome(passed=len(expected_tests)) assert _passed_test_names(result) == expected_tests