forked from mlflow/mlflow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
conftest.py
148 lines (126 loc) · 5.33 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import os
import posixpath
import pytest
import shutil
import json
import subprocess
from mlflow.environment_variables import _MLFLOW_TESTING
def pytest_addoption(parser):
parser.addoption(
"--requires-ssh",
action="store_true",
dest="requires_ssh",
default=False,
help="Run tests decorated with 'requires_ssh' annotation. "
"These tests require keys to be configured locally "
"for SSH authentication.",
)
parser.addoption(
"--ignore-flavors",
action="store_true",
dest="ignore_flavors",
default=False,
help="Ignore tests for model flavors.",
)
def pytest_configure(config):
# Register markers to suppress `PytestUnknownMarkWarning`
config.addinivalue_line("markers", "requires_ssh")
config.addinivalue_line("markers", "notrackingurimock")
config.addinivalue_line("markers", "allow_infer_pip_requirements_fallback")
def pytest_runtest_setup(item):
markers = [mark.name for mark in item.iter_markers()]
if "requires_ssh" in markers and not item.config.getoption("--requires-ssh"):
pytest.skip("use `--requires-ssh` to run this test")
@pytest.hookimpl(hookwrapper=True)
def pytest_ignore_collect(path, config):
outcome = yield
if not outcome.get_result() and config.getoption("ignore_flavors"):
# If not ignored by the default hook and `--ignore-flavors` specified
# Ignored files and directories must be included in dev/run-python-flavor-tests.sh
model_flavors = [
"tests/h2o",
"tests/keras",
"tests/pytorch",
"tests/pyfunc",
"tests/sagemaker",
"tests/sklearn",
"tests/spark",
"tests/mleap",
"tests/tensorflow",
"tests/azureml",
"tests/onnx",
"tests/gluon",
"tests/xgboost",
"tests/lightgbm",
"tests/catboost",
"tests/statsmodels",
"tests/spacy",
"tests/fastai",
"tests/models",
"tests/shap",
"tests/paddle",
"tests/prophet",
"tests/pmdarima",
"tests/diviner",
"tests/transformers",
"tests/sentence_transformers",
"tests/openai",
"tests/langchain",
"tests/johnsnowlabs",
"tests/test_mlflow_lazily_imports_ml_packages.py",
"tests/utils/test_model_utils.py",
# this test is included here because it imports many big libraries like tf, keras, etc
"tests/tracking/fluent/test_fluent_autolog.py",
# cross flavor autologging related tests.
"tests/autologging/test_autologging_safety_unit.py",
"tests/autologging/test_autologging_behaviors_unit.py",
"tests/autologging/test_autologging_behaviors_integration.py",
"tests/autologging/test_autologging_utils.py",
"tests/autologging/test_training_session.py",
# opt in authentication feature
"tests/server/auth",
"tests/gateway",
]
relpath = os.path.relpath(str(path))
relpath = relpath.replace(os.sep, posixpath.sep) # for Windows
if relpath in model_flavors:
outcome.force_result(True)
def pytest_collection_modifyitems(session, config, items): # pylint: disable=unused-argument
# Executing `tests.server.test_prometheus_exporter` after `tests.server.test_handlers`
# results in an error because Flask >= 2.2.0 doesn't allow calling setup method such as
# `before_request` on the application after the first request. To avoid this issue,
# execute `tests.server.test_prometheus_exporter` first by reordering the test items.
items.sort(key=lambda item: item.module.__name__ != "tests.server.test_prometheus_exporter")
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(
terminalreporter, exitstatus, config
): # pylint: disable=unused-argument
yield
failed_test_reports = terminalreporter.stats.get("failed", [])
if failed_test_reports:
if len(failed_test_reports) <= 30:
terminalreporter.section("command to run failed test cases")
ids = [repr(report.nodeid) for report in failed_test_reports]
else:
terminalreporter.section("command to run failed test suites")
# Use dict.fromkeys to preserve the order
ids = list(dict.fromkeys(report.fspath for report in failed_test_reports))
terminalreporter.write(" ".join(["pytest"] + ids))
terminalreporter.write("\n" * 2)
@pytest.fixture(scope="module", autouse=True)
def clean_up_envs():
yield
if "GITHUB_ACTIONS" in os.environ:
from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root
shutil.rmtree(_get_mlflow_virtualenv_root(), ignore_errors=True)
if os.name != "nt":
conda_info = json.loads(subprocess.check_output(["conda", "info", "--json"], text=True))
root_prefix = conda_info["root_prefix"]
for env in conda_info["envs"]:
if env != root_prefix:
shutil.rmtree(env, ignore_errors=True)
@pytest.fixture(scope="session", autouse=True)
def enable_mlflow_testing():
with pytest.MonkeyPatch.context() as mp:
mp.setenv(_MLFLOW_TESTING.name, "TRUE")
yield