Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Dec 9, 2024
1 parent 5d5c88d commit bb90478
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 25 deletions.
1 change: 1 addition & 0 deletions .github/workflows/test_api_cpu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,4 @@ jobs:
pytest tests/test_examples.py -s -k "api and cpu"
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/cpu
1 change: 1 addition & 0 deletions .github/workflows/test_api_cuda.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,4 @@ jobs:
pytest tests/test_examples.py -x -s -k "api and cuda"
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/cuda
17 changes: 5 additions & 12 deletions examples/cuda_pytorch_bert.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,11 @@
import os

from huggingface_hub import whoami

from optimum_benchmark import Benchmark, BenchmarkConfig, InferenceConfig, ProcessConfig, PyTorchConfig
from optimum_benchmark.logging_utils import setup_logging

try:
USERNAME = whoami()["name"]
except Exception as e:
print(f"Failed to get username from Hugging Face Hub: {e}")
USERNAME = None

BENCHMARK_NAME = "cuda_pytorch_bert"
MODEL = "google-bert/bert-base-uncased"
PUSH_REPO_ID = os.environ.get("PUSH_REPO_ID", None)


def run_benchmark():
Expand Down Expand Up @@ -40,7 +33,7 @@ def run_benchmark():
benchmark_config, benchmark_report = run_benchmark()
benchmark = Benchmark(config=benchmark_config, report=benchmark_report)

if USERNAME is not None:
benchmark_config.push_to_hub(repo_id=f"{USERNAME}/benchmarks", subfolder=BENCHMARK_NAME)
benchmark_report.push_to_hub(repo_id=f"{USERNAME}/benchmarks", subfolder=BENCHMARK_NAME)
benchmark.push_to_hub(repo_id=f"{USERNAME}/benchmarks", subfolder=BENCHMARK_NAME)
if PUSH_REPO_ID is not None:
benchmark_config.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=BENCHMARK_NAME)
benchmark_report.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=BENCHMARK_NAME)
benchmark.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=BENCHMARK_NAME)
15 changes: 3 additions & 12 deletions examples/cuda_pytorch_llama.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,11 @@
import os

from huggingface_hub import whoami

from optimum_benchmark import Benchmark, BenchmarkConfig, InferenceConfig, ProcessConfig, PyTorchConfig
from optimum_benchmark.logging_utils import setup_logging

try:
USERNAME = whoami()["name"]
except Exception as e:
print(f"Failed to get username from Hugging Face Hub: {e}")
USERNAME = None

BENCHMARK_NAME = "cuda_pytorch_llama"
MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
PUSH_REPO_ID = os.environ.get("PUSH_REPO_ID", None)

WEIGHTS_CONFIGS = {
"float16": {
Expand Down Expand Up @@ -78,7 +71,5 @@ def run_benchmark(weight_config: str):
benchmark_config, benchmark_report = run_benchmark(weight_config)
benchmark = Benchmark(config=benchmark_config, report=benchmark_report)

if USERNAME is not None:
benchmark.push_to_hub(
repo_id=f"{USERNAME}/benchmarks", filename=f"{weight_config}.json", subfolder=BENCHMARK_NAME
)
if PUSH_REPO_ID is not None:
benchmark.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=BENCHMARK_NAME, filename=f"{weight_config}.json")
2 changes: 1 addition & 1 deletion tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def test_cli_configs(config_name):
assert popen.returncode == 0, f"Failed to run {config_name}"


@pytest.mark.parametrize("config_name", TEST_SCRIPT_PATHS)
@pytest.mark.parametrize("script_path", TEST_SCRIPT_PATHS)
def test_api_scripts(script_path):
args = ["python", script_path]

Expand Down

0 comments on commit bb90478

Please sign in to comment.