Skip to content

Commit

Permalink
modify pytest_collection_modifyitems to use the executor flag, only…
Browse files Browse the repository at this point in the history
… iterate through test items once
  • Loading branch information
Priya2698 committed Nov 5, 2024
1 parent 65e2f60 commit 6c23c44
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 36 deletions.
61 changes: 26 additions & 35 deletions benchmarks/python/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,45 +96,36 @@ def pytest_configure(config):

def pytest_collection_modifyitems(session, config, items):
"""
The baseline benchmarks use `compile` parameter:
compile = false: Eager mode benchmark
compile = true: torch.compile benchmark
The baseline benchmarks use `executor` parameter with
values ["eager", "torchcompile", "thunder"] that are optionally
run using `--benchmark-{executor}` flag. They are skipped by
default.
"""
run_eager = config.getoption("--benchmark-eager")
run_thunder = config.getoption("--benchmark-thunder")
run_torchcompile = config.getoption("--benchmark-torchcompile")

from nvfuser.pytorch_utils import retry_on_oom_or_skip_test

def get_test_executor(item) -> str | None:
if (
hasattr(item, "callspec")
and "executor" in item.callspec.params
):
return item.callspec.params["executor"]
return None

executors_to_skip = []

for executor in ["eager", "torchcompile", "thunder"]:
if not config.getoption(f"--benchmark-{executor}"):
executors_to_skip.append(executor)

for item in items:
item.obj = retry_on_oom_or_skip_test(item.obj)

test_executor = get_test_executor(item)

if test_executor is not None and test_executor in executors_to_skip:
item.add_marker(
pytest.mark.skip(reason=f"need --benchmark-{test_executor} option to run.")
)


if not run_eager:
skip_eager = pytest.mark.skip(reason="need --benchmark-eager option to run")
for item in items:
# If the benchmark has compile=False parameter (eager mode), skip it.
if (
hasattr(item, "callspec")
and "compile" in item.callspec.params
and not item.callspec.params["compile"]
):
item.add_marker(skip_eager)

if not run_torchcompile:
skip_torchcompile = pytest.mark.skip(
reason="need --benchmark-torchcompile option to run"
)
for item in items:
# If the benchmark has compile=True parameter (torch.compile mode), skip it.
if (
hasattr(item, "callspec")
and "compile" in item.callspec.params
and item.callspec.params["compile"]
):
item.add_marker(skip_torchcompile)

if not run_thunder:
skip_thunder = pytest.mark.skip(reason="need --benchmark-thunder option to run")
for item in items:
if "thunder" in item.nodeid:
item.add_marker(skip_thunder)
2 changes: 1 addition & 1 deletion benchmarks/python/test_groupnorm_fwd.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def test_groupnorm_fwd_baseline_benchmark(
benchmark_fn = {
"eager": groupnorm_fwd,
"torchcompile": torch.compile(groupnorm_fwd),
"thunder": thunder.jit(groupnorm_fwd, , nv_enable_bookend=False, executors=[nvfuserex])
"thunder": thunder.jit(groupnorm_fwd, nv_enable_bookend=False, executors=[nvfuserex])
}
run_benchmark(
benchmark,
Expand Down

0 comments on commit 6c23c44

Please sign in to comment.