Skip to content

Commit

Permalink
Merge branch 'fix-quantization-lib' of https://github.com/huggingface…
Browse files Browse the repository at this point in the history
…/optimum-benchmark into fix-quantization-lib
  • Loading branch information
baptistecolle committed Aug 19, 2024
2 parents fd4992e + 38e4cc9 commit 60b2bf2
Show file tree
Hide file tree
Showing 6 changed files with 17 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ on:
- main
paths:
- docker/**
- .github/workflows/build_and_publish_docker_images.yaml
- .github/workflows/images.yaml
schedule:
- cron: "0 0 * * *"

Expand All @@ -29,24 +29,27 @@ jobs:
runs-on: ubuntu-latest

permissions:
contents: read
contents: write
packages: write
id-token: write

steps:
- name: Checkout
- name: Free Disk Space
uses: jlumbroso/free-disk-space@main

- name: Checkout code
uses: actions/checkout@v4

- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GH_TOKEN }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Extract metadata (tags, labels) for Docker images
id: meta
uses: docker/metadata-action@v4
uses: docker/metadata-action@v5
with:
flavor: |
latest=false
Expand Down
3 changes: 1 addition & 2 deletions .github/workflows/quality.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ jobs:
- name: Install quality requirements
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[quality]
pip install ruff
- name: Check style
run: make quality
4 changes: 1 addition & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,5 @@ amdsmi/
amd-*

external_repos/
outputs/

# Mac specific
.DS_Store
outputs/
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ install_cli_cuda_onnxruntime:
# Run tests

test_api_misc:
pytest -s -k "api and not (cpu or cuda)
pytest -s -k "api and not (cpu or cuda or mps)"

test_api_cpu:
pytest -s -k "api and cpu"
Expand All @@ -135,7 +135,7 @@ test_api_rocm:
pytest -s -k "api and cuda"

test_cli_misc:
pytest -s -k "cli and not (cpu or cuda)"
pytest -s -k "cli and not (cpu or cuda or mps)"

test_cli_cpu_pytorch:
pytest -s -k "cli and cpu and pytorch"
Expand Down
1 change: 0 additions & 1 deletion optimum_benchmark/backends/neural_compressor/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
"quant_level": "auto",
"accuracy_criterion": ACCURACY_CRITERION_CONFIG,
"tuning_criterion": TUNING_CRITERION_CONFIG,
"diagnosis": False,
}


Expand Down
5 changes: 4 additions & 1 deletion optimum_benchmark/system_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,10 @@ def get_gpus():
pynvml.nvmlInit()
for i in range(pynvml.nvmlDeviceGetCount()):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
gpus.append(pynvml.nvmlDeviceGetName(handle))
gpu = pynvml.nvmlDeviceGetName(handle)
# Older pynvml versions may return bytes
gpu = gpu.decode("utf-8") if isinstance(gpu, bytes) else gpu
gpus.append(gpu)
pynvml.nvmlShutdown()

elif is_rocm_system():
Expand Down

0 comments on commit 60b2bf2

Please sign in to comment.