Skip to content

Commit

Permalink
Run mock milabench on a per bench basis to identify issues more easily
Browse files Browse the repository at this point in the history
  • Loading branch information
pierre.delaunay committed Jul 9, 2024
1 parent 575ba9f commit ec0def2
Show file tree
Hide file tree
Showing 5 changed files with 67 additions and 36 deletions.
20 changes: 20 additions & 0 deletions milabench/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,26 @@
from milabench.utils import multilogger, validation_layers


here = Path(__file__).parent


def official_config(name):
p = here / ".." / "config" / f"{name}.yaml"

if p.exists():
return p.resolve()

raise FileNotFoundError(f"{p} does not exist")


def resolved_config(name):
from .config import build_config

p = official_config(name)

return build_config(p)


class ReplayPackage(BasePackage):
"""Disable some folder creation for replay purposes"""

Expand Down
15 changes: 4 additions & 11 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
import stat
from pathlib import Path


from milabench.testing import official_config
import pytest
import voir.instruments.gpu as voirgpu

Expand All @@ -21,24 +23,15 @@ def runs_folder():
def config():
def get_config(name):
return here / "config" / f"{name}.yaml"

return get_config



@pytest.fixture
def official_config():
def get_config(name):
return here / ".." / "config" / f"{name}.yaml"
return get_config


@pytest.fixture
def standard_config(official_config):
def standard_config():
return official_config("standard")


@pytest.fixture
@pytest.fixture(scope='module')
def replayfolder():
return here / "replays"

Expand Down
2 changes: 1 addition & 1 deletion tests/test_command_reg/test_command_reg_one_node.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ export MILABENCH_DIR_DATA=$BASE/data
export MILABENCH_DIR_RUNS=$BASE/runs
export MILABENCH_DIR_EXTRA=$BASE/extra/llm
export MILABENCH_DIR_CACHE=$BASE/cache
export MILABENCH_CONFIG='{"system": {"arch": "cuda", "sshkey": null, "nodes": [{"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}], "self": {"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}}, "dirs": {"base": "$BASE", "venv": "$BASE/venv/torch", "data": "$BASE/data", "runs": "$BASE/runs", "extra": "$BASE/extra/llm", "cache": "$BASE/cache"}, "group": "llm", "install_group": "torch", "install_variant": "cuda", "run_name": "dev", "enabled": true, "capabilities": {"nodes": 1}, "max_duration": 800, "voir": {"options": {"stop": 30, "interval": "1s"}}, "validation": {"usage": {"gpu_load_threshold": 0.5, "gpu_mem_threshold": 0.5}}, "config_base": "$SRC/milabench/tests/../config", "config_file": "$SRC/milabench/tests/../config/standard.yaml", "definition": "$SRC/milabench/benchmarks/llama", "tags": ["inference", "llm", "nlp"], "plan": {"method": "per_gpu"}, "weight": 1.0, "name": "llama", "tag": ["llama"]}'
export MILABENCH_CONFIG='{"system": {"arch": "cuda", "sshkey": null, "nodes": [{"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}], "self": {"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}}, "dirs": {"base": "$BASE", "venv": "$BASE/venv/torch", "data": "$BASE/data", "runs": "$BASE/runs", "extra": "$BASE/extra/llm", "cache": "$BASE/cache"}, "group": "llm", "install_group": "torch", "install_variant": "cuda", "run_name": "dev", "enabled": true, "capabilities": {"nodes": 1}, "max_duration": 800, "voir": {"options": {"stop": 30, "interval": "1s"}}, "validation": {"usage": {"gpu_load_threshold": 0.5, "gpu_mem_threshold": 0.5}}, "config_base": "$SRC/milabench/config", "config_file": "$SRC/milabench/config/standard.yaml", "definition": "$SRC/milabench/benchmarks/llama", "tags": ["inference", "llm", "nlp"], "plan": {"method": "per_gpu"}, "weight": 1.0, "name": "llama", "tag": ["llama"]}'
export OMP_NUM_THREADS=8

echo "---"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_command_reg/test_command_reg_two_nodes.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ export MILABENCH_DIR_DATA=$BASE/data
export MILABENCH_DIR_RUNS=$BASE/runs
export MILABENCH_DIR_EXTRA=$BASE/extra/llm
export MILABENCH_DIR_CACHE=$BASE/cache
export MILABENCH_CONFIG='{"system": {"arch": "cuda", "sshkey": null, "nodes": [{"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}, {"ip": "192.168.0.11", "main": false, "name": "1", "port": 22, "user": "username", "hostname": "192.168.0.11", "aliaslist": [], "ipaddrlist": ["192.168.0.11"], "local": false}], "self": {"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}}, "dirs": {"base": "$BASE", "venv": "$BASE/venv/torch", "data": "$BASE/data", "runs": "$BASE/runs", "extra": "$BASE/extra/llm", "cache": "$BASE/cache"}, "group": "llm", "install_group": "torch", "install_variant": "cuda", "run_name": "dev", "enabled": true, "capabilities": {"nodes": 1}, "max_duration": 800, "voir": {"options": {"stop": 30, "interval": "1s"}}, "validation": {"usage": {"gpu_load_threshold": 0.5, "gpu_mem_threshold": 0.5}}, "config_base": "$SRC/milabench/tests/../config", "config_file": "$SRC/milabench/tests/../config/standard.yaml", "definition": "$SRC/milabench/benchmarks/llama", "tags": ["inference", "llm", "nlp"], "plan": {"method": "per_gpu"}, "weight": 1.0, "name": "llama", "tag": ["llama"]}'
export MILABENCH_CONFIG='{"system": {"arch": "cuda", "sshkey": null, "nodes": [{"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}, {"ip": "192.168.0.11", "main": false, "name": "1", "port": 22, "user": "username", "hostname": "192.168.0.11", "aliaslist": [], "ipaddrlist": ["192.168.0.11"], "local": false}], "self": {"ip": "127.0.0.1", "main": true, "name": "0", "port": 22, "user": "username", "hostname": "127.0.0.1", "aliaslist": [], "ipaddrlist": ["127.0.0.1"], "local": true}}, "dirs": {"base": "$BASE", "venv": "$BASE/venv/torch", "data": "$BASE/data", "runs": "$BASE/runs", "extra": "$BASE/extra/llm", "cache": "$BASE/cache"}, "group": "llm", "install_group": "torch", "install_variant": "cuda", "run_name": "dev", "enabled": true, "capabilities": {"nodes": 1}, "max_duration": 800, "voir": {"options": {"stop": 30, "interval": "1s"}}, "validation": {"usage": {"gpu_load_threshold": 0.5, "gpu_mem_threshold": 0.5}}, "config_base": "$SRC/milabench/config", "config_file": "$SRC/milabench/config/standard.yaml", "definition": "$SRC/milabench/benchmarks/llama", "tags": ["inference", "llm", "nlp"], "plan": {"method": "per_gpu"}, "weight": 1.0, "name": "llama", "tag": ["llama"]}'
export OMP_NUM_THREADS=8

echo "---"
Expand Down
64 changes: 41 additions & 23 deletions tests/test_mock.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,9 @@

import milabench.alt_async
import milabench.commands.executors
from milabench.testing import resolved_config

import traceback
from pytest import fixture


@fixture
def args(standard_config, tmp_path):
return [
"--base", str(tmp_path),
"--config", str(standard_config)
]


async def mock_exec(command, phase="run", timeout=False, timeout_delay=600, **kwargs):
return [0]
import pytest


def run_cli(*args):
Expand All @@ -27,18 +16,47 @@ def run_cli(*args):
assert not exc.code


def test_milabench(monkeypatch, args):
monkeypatch.setenv("MILABENCH_GPU_ARCH", "cuda")
monkeypatch.setattr(milabench.commands, "execute_command", mock_exec)
def benchlist(enabled=True):
standard = resolved_config("standard")

run_cli("install", *args)
for key, value in standard.items():
if value.get("enabled", False):
if key[0] != "_":
yield key

run_cli("prepare", *args)

# We want to reuse this fixtures for each bench
# so we do not run some steps multiple times
@pytest.fixture(scope='module')
def args(standard_config, tmp_path):
return [
"--base", str(tmp_path),
"--config", str(standard_config)
]


def mock_voir_run(argv, info, timeout=None, constructor=None, env=None, **options):
from voir.proc import Multiplexer
mp = Multiplexer(timeout=timeout, constructor=constructor)
mp.start(["sleep", "1"], info=info, env=env, **options)
return mp


@pytest.mark.parametrize("bench", benchlist())
def test_milabench(monkeypatch, args, bench):
from milabench.cli.dry import assume_gpu

monkeypatch.setenv("MILABENCH_GPU_ARCH", "cuda")

run_cli("install", *args, "--select", bench)

run_cli("prepare", *args, "--select", bench)

#
# use Mock GPU-SMI
#
monkeypatch.setenv("MILABENCH_GPU_ARCH", "mock")
from milabench.cli.dry import assume_gpu
with assume_gpu(8):
run_cli("run", *args, "--no-report")
with monkeypatch.context() as ctx:
ctx.setattr(milabench.alt_async, "voir_run", mock_voir_run)
ctx.setenv("MILABENCH_GPU_ARCH", "mock")
with assume_gpu(8):
run_cli("run", *args, "--no-report", "--select", bench)

0 comments on commit ec0def2

Please sign in to comment.