Skip to content

Commit

Permalink
Clean up messages and add support for loading the old list format
Browse files Browse the repository at this point in the history
  • Loading branch information
sondrelg committed Jun 8, 2021
1 parent c506046 commit 41fe775
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 67 deletions.
1 change: 1 addition & 0 deletions .test_durations
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"tests/test_plugin.py::TestStoreDurations::test_it_stores": 0.08707766099999997, "tests/test_plugin.py::TestStoreDurations::test_it_does_not_store_without_flag": 0.09066593100000003, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[0-1-expected_tests_per_group0]": 0.08363778099999997, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[1-2-expected_tests_per_group1]": 0.12921703699999998, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[2-3-expected_tests_per_group2]": 0.17863698999999988, "tests/test_plugin.py::TestSplitToSuites::test_it_splits[3-4-expected_tests_per_group3]": 0.22171246299999992, "tests/test_plugin.py::TestSplitToSuites::test_it_does_not_split_with_invalid_args": 0.15940266000000003, "tests/test_plugin.py::TestSplitToSuites::test_it_adapts_splits_based_on_new_and_deleted_tests": 0.17758088800000005, "tests/test_plugin.py::TestRaisesUsageErrors::test_returns_nonzero_when_group_but_not_splits": 0.04033764599999978}
56 changes: 19 additions & 37 deletions src/pytest_split/plugin.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import json
import os
from collections import namedtuple
from typing import TYPE_CHECKING

import pytest
Expand All @@ -17,9 +16,6 @@
# Ugly hack for freezegun compatibility: https://github.com/spulec/freezegun/issues/286
STORE_DURATIONS_SETUP_AND_TEARDOWN_THRESHOLD = 60 * 10 # seconds

TestGroup = namedtuple("TestGroup", "index, num_tests")
TestSuite = namedtuple("TestSuite", "splits, num_tests")


def pytest_addoption(parser: "Parser") -> None:
"""
Expand Down Expand Up @@ -67,7 +63,7 @@ def pytest_cmdline_main(config: "Config") -> "Optional[Union[int, ExitCode]]":
splits = config.getoption("splits")

if splits is None and group is None:
return 0
return None

if splits and group is None:
raise pytest.UsageError("argument `--group` is required")
Expand Down Expand Up @@ -98,7 +94,9 @@ def pytest_configure(config: "Config") -> None:
class Base:
def __init__(self, config: "Config") -> None:
"""
Load cache and configure plugin.
Load durations and set up a terminal writer.
This logic is shared for both the split- and cache plugin.
"""
self.config = config
self.writer = create_terminal_writer(self.config)
Expand All @@ -109,47 +107,32 @@ def __init__(self, config: "Config") -> None:
except FileNotFoundError:
self.cached_durations = {}

# This code provides backwards compatibility after we switched
# from saving durations in a list-of-lists to a dict format
# Remove this when bumping to v1
if isinstance(self.cached_durations, list):
self.cached_durations = {test_name: duration for test_name, duration in self.cached_durations}


class PytestSplitPlugin(Base):
def __init__(self, config: "Config"):
super().__init__(config)
self._suite: TestSuite
self._group: TestGroup

self._messages: "List[str]" = []

if not self.cached_durations:
message = self.writer.markup(
"\nNo test durations found. Pytest-split will "
"\n[pytest-split] No test durations found. Pytest-split will "
"split tests evenly when no durations are found. "
"\nYou can expect better results in consequent runs, "
"\n[pytest-split] You can expect better results in consequent runs, "
"when test timings have been documented.\n"
)
self.writer.line(message)

def pytest_report_collectionfinish(self, config: "Config") -> "List[str]":
lines = []
if self._messages:
lines += self._messages

if hasattr(self, "_suite"):
lines.append(
f"Running group {self._group.index}/{self._suite.splits}"
f" ({self._group.num_tests}/{self._suite.num_tests}) tests"
)

prefix = "[pytest-split]"
lines = [f"{prefix} {m}" for m in lines]

return lines

@hookimpl(tryfirst=True)
def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Item]") -> None:
"""
Instruct Pytest to run the tests we've selected.
This method is called by Pytest right after Pytest internals finishes
collecting tests.
See https://github.com/pytest-dev/pytest/blob/main/src/_pytest/main.py#L670.
Collect and select the tests we want to run, and deselect the rest.
"""
splits: int = config.option.splits
group: int = config.option.group
Expand All @@ -159,10 +142,7 @@ def pytest_collection_modifyitems(self, config: "Config", items: "List[nodes.Ite
items[:] = selected_tests
config.hook.pytest_deselected(items=deselected_tests)

self._suite = TestSuite(splits, len(items))
self._group = TestGroup(group, end_idx - start_idx)

self.writer.line(self.writer.markup(f"\n\nRunning group {group}/{splits}\n"))
self.writer.line(self.writer.markup(f"\n\n[pytest-split] Running group {group}/{splits}\n"))
return None

@staticmethod
Expand Down Expand Up @@ -256,5 +236,7 @@ def pytest_sessionfinish(self) -> None:
with open(self.config.option.durations_path, "w") as f:
json.dump(self.cached_durations, f)

message = self.writer.markup("\n\nStored test durations in {}\n".format(self.config.option.durations_path))
message = self.writer.markup(
"\n\n[pytest-split] Stored test durations in {}".format(self.config.option.durations_path)
)
self.writer.line(message)
34 changes: 4 additions & 30 deletions tests/test_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os

import pytest
from _pytest.config import ExitCode

pytest_plugins = ["pytester"]

Expand Down Expand Up @@ -139,11 +140,11 @@ def test_it_does_not_split_with_invalid_args(self, example_suite, durations_path

# Plugin doesn't run when splits is passed but group is missing
result = example_suite.inline_run("--splits", "2", "--durations-path", durations_path) # no --group
result.assertoutcome(passed=10)
assert result.ret == ExitCode.USAGE_ERROR

# Plugin doesn't run when group is passed but splits is missing
result = example_suite.inline_run("--group", "2", "--durations-path", durations_path) # no --splits
result.assertoutcome(passed=10)
assert result.ret == ExitCode.USAGE_ERROR

# Runs if they both are
result = example_suite.inline_run("--splits", "2", "--group", "1")
Expand Down Expand Up @@ -221,14 +222,6 @@ def test_returns_nonzero_when_splits_below_one(self, example_suite, capsys):


class TestHasExpectedOutput:
def test_does_not_print_splitting_summary_when_durations_missing(self, example_suite, capsys):
result = example_suite.inline_run("--splits", "1", "--group", "1")
assert result.ret == 0

outerr = capsys.readouterr()
assert "[pytest-split] Not splitting tests because the durations_report is missing" in outerr.out
assert "[pytest-split] Running group" not in outerr.out

def test_prints_splitting_summary_when_durations_present(self, example_suite, capsys, durations_path):
test_name = "test_prints_splitting_summary_when_durations_present"
with open(durations_path, "w") as f:
Expand All @@ -237,26 +230,7 @@ def test_prints_splitting_summary_when_durations_present(self, example_suite, ca
assert result.ret == 0

outerr = capsys.readouterr()
assert "[pytest-split] Running group 1/1 (10/10) tests" in outerr.out

def test_prints_splitting_summary_when_storing_durations(self, example_suite, capsys, durations_path):
test_name = "test_prints_splitting_summary_when_storing_durations"
with open(durations_path, "w") as f:
json.dump([[f"{test_name}0/{test_name}.py::test_1", 0.5]], f)

result = example_suite.inline_run(
"--splits",
"1",
"--group",
"1",
"--durations-path",
durations_path,
"--store-durations",
)
assert result.ret == 0

outerr = capsys.readouterr()
assert "[pytest-split] Not splitting tests because we are storing durations" in outerr.out
assert "[pytest-split] Running group 1/1" in outerr.out

def test_does_not_print_splitting_summary_when_no_pytest_split_arguments(self, example_suite, capsys):
result = example_suite.inline_run()
Expand Down

0 comments on commit 41fe775

Please sign in to comment.