Skip to content

Commit

Permalink
add intel pytorch ort and openvino to leaderboard
Browse files Browse the repository at this point in the history
  • Loading branch information
baptistecolle committed Aug 21, 2024
1 parent 92a5cea commit 0168063
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 7 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/update_llm_perf_cuda_pytorch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ jobs:
pip install packaging && pip install flash-attn einops scipy auto-gptq optimum bitsandbytes autoawq codecarbon
pip install -U transformers huggingface_hub[hf_transfer]
pip install -e .
python llm_perf/update_llm_perf_cuda_pytorch.py
python llm_perf/hardware/cuda/update_llm_perf_cuda_pytorch.py
4 changes: 1 addition & 3 deletions llm_perf/hardware/intel/update_llm_perf_intel_openvino.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
is_benchmark_conducted,
is_benchmark_supported,
)
from optimum_benchmark import Benchmark, BenchmarkConfig, BenchmarkReport, InferenceConfig, ProcessConfig, OVConfig
from optimum_benchmark import Benchmark, BenchmarkConfig, BenchmarkReport, InferenceConfig, OVConfig, ProcessConfig
from optimum_benchmark.logging_utils import setup_logging

SUBSET = os.getenv("SUBSET", None)
Expand Down Expand Up @@ -101,8 +101,6 @@ def benchmark_intel_openvino(model, attn_implementation, weights_config):
benchmark_name = f"{weights_config}-{attn_implementation}"
subfolder = f"{benchmark_name}/{model.replace('/', '--')}"

torch_dtype = WEIGHTS_CONFIGS[weights_config]["torch_dtype"]
quant_scheme = WEIGHTS_CONFIGS[weights_config]["quant_scheme"]
quant_config = WEIGHTS_CONFIGS[weights_config]["quant_config"]

if not is_benchmark_supported(weights_config, attn_implementation, HARDWARE):
Expand Down
4 changes: 1 addition & 3 deletions llm_perf/hardware/intel/update_llm_perf_intel_ort.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import traceback
from itertools import product
from logging import getLogger

Expand All @@ -12,7 +11,7 @@
is_benchmark_conducted,
is_benchmark_supported,
)
from optimum_benchmark import Benchmark, BenchmarkConfig, BenchmarkReport, InferenceConfig, ProcessConfig, ORTConfig
from optimum_benchmark import Benchmark, BenchmarkConfig, InferenceConfig, ORTConfig, ProcessConfig
from optimum_benchmark.logging_utils import setup_logging

SUBSET = os.getenv("SUBSET", None)
Expand Down Expand Up @@ -102,7 +101,6 @@ def benchmark_intel_ort(model, attn_implementation, weights_config):
subfolder = f"{benchmark_name}/{model.replace('/', '--')}"

torch_dtype = WEIGHTS_CONFIGS[weights_config]["torch_dtype"]
quant_scheme = WEIGHTS_CONFIGS[weights_config]["quant_scheme"]
quant_config = WEIGHTS_CONFIGS[weights_config]["quant_config"]

if not is_benchmark_supported(weights_config, attn_implementation, HARDWARE):
Expand Down

0 comments on commit 0168063

Please sign in to comment.