From 6de86775cce378bc0c32dce0c60b8784748ec3e3 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 3 Oct 2024 13:56:37 +0530 Subject: [PATCH 1/4] added hf token - to prevent user interaction if model is absent --- .github/workflows/test-mlperf-inference-llama2.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index b73f3fd875..2ca2fec29d 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -26,6 +26,8 @@ jobs: export CM_REPOS=$HOME/GH_CM python3 -m pip install cm4mlops cm pull repo + pip install -U "huggingface_hub[cli]" + huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - name: Test MLPerf Inference LLAMA 2 70B reference implementation run: | cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --adr.inference-src.env.CM_GIT_URL=https://github.com/anandhu-eng/inference.git --clean From 30751f00441b97147b69cc11e134c6f253618ef2 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 3 Oct 2024 14:00:30 +0530 Subject: [PATCH 2/4] installation of hf cli library limited to local virtual env --- .github/workflows/test-mlperf-inference-llama2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index 2ca2fec29d..b26990608b 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -26,7 +26,7 @@ jobs: export CM_REPOS=$HOME/GH_CM python3 -m pip install cm4mlops cm pull repo - pip install -U "huggingface_hub[cli]" + pip install "huggingface_hub[cli]" huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - name: Test MLPerf Inference LLAMA 2 70B reference implementation run: | From 1d177dd5fd33d312699116ae0df70b1666df7169 Mon Sep 17 00:00:00 2001 From: ANANDHU S <71482562+anandhu-eng@users.noreply.github.com> Date: Thu, 3 Oct 2024 14:02:46 +0530 Subject: [PATCH 3/4] pip run through python interpreter --- .github/workflows/test-mlperf-inference-llama2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index b26990608b..d974ee5b4e 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -26,7 +26,7 @@ jobs: export CM_REPOS=$HOME/GH_CM python3 -m pip install cm4mlops cm pull repo - pip install "huggingface_hub[cli]" + python3 -m pip install "huggingface_hub[cli]" huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - name: Test MLPerf Inference LLAMA 2 70B reference implementation run: | From 3e5855c2224f79f1d767f51f4db156e6bacca719 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Thu, 3 Oct 2024 10:02:51 +0100 Subject: [PATCH 4/4] Update test-mlperf-inference-llama2.yml --- .github/workflows/test-mlperf-inference-llama2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-mlperf-inference-llama2.yml b/.github/workflows/test-mlperf-inference-llama2.yml index d974ee5b4e..97bd1bc6fc 100644 --- a/.github/workflows/test-mlperf-inference-llama2.yml +++ b/.github/workflows/test-mlperf-inference-llama2.yml @@ -30,4 +30,4 @@ jobs: huggingface-cli login --token ${{ secrets.HF_TOKEN }} --add-to-git-credential - name: Test MLPerf Inference LLAMA 2 70B reference implementation run: | - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --adr.inference-src.env.CM_GIT_URL=https://github.com/anandhu-eng/inference.git --clean + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --model=llama2-70b-99 --implementation=reference --backend=${{ matrix.backend }} --category=datacenter --scenario=Offline --execution_mode=test --device=${{ matrix.device }} --docker --quiet --test_query_count=1 --target_qps=1 --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --hw_name=gh_action --docker_dt=yes --results_dir=$HOME/gh_action_results --submission_dir=$HOME/gh_action_submissions --env.CM_MLPERF_MODEL_LLAMA2_70B_DOWNLOAD_TO_HOST=yes --adr.inference-src.tags=_repo.https://github.com/anandhu-eng/inference.git --clean