diff --git a/cm-mlops/script/get-sys-utils-cm/do_pip_installs.sh b/cm-mlops/script/get-sys-utils-cm/do_pip_installs.sh new file mode 100644 index 0000000000..55a1492492 --- /dev/null +++ b/cm-mlops/script/get-sys-utils-cm/do_pip_installs.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +PIP_EXTRA=`python3 -c "import pkg_resources; print(' --break-system-packages ' if int(pkg_resources.get_distribution('pip').version.split('.')[0]) >= 23 else '')"` +cmd="python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} ${PIP_EXTRA}" +echo $cmd +eval $cmd diff --git a/cm-mlops/script/get-sys-utils-cm/run-arch.sh b/cm-mlops/script/get-sys-utils-cm/run-arch.sh index 1f786fd8cd..1a5338c58a 100644 --- a/cm-mlops/script/get-sys-utils-cm/run-arch.sh +++ b/cm-mlops/script/get-sys-utils-cm/run-arch.sh @@ -31,4 +31,5 @@ ${CM_SUDO} ${CM_PACKAGE_TOOL} -Syu && \ xz \ zip -python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/cm-mlops/script/get-sys-utils-cm/run-debian.sh b/cm-mlops/script/get-sys-utils-cm/run-debian.sh index 4c35bd52ba..c7de244641 100644 --- a/cm-mlops/script/get-sys-utils-cm/run-debian.sh +++ b/cm-mlops/script/get-sys-utils-cm/run-debian.sh @@ -52,4 +52,5 @@ ${CM_SUDO} ${CM_APT_TOOL} update && \ libgl1 \ libncurses5 -python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/cm-mlops/script/get-sys-utils-cm/run-macos.sh b/cm-mlops/script/get-sys-utils-cm/run-macos.sh index 39e256cecf..1e5eab4ad5 100644 --- a/cm-mlops/script/get-sys-utils-cm/run-macos.sh +++ b/cm-mlops/script/get-sys-utils-cm/run-macos.sh @@ -35,4 +35,5 @@ brew update && \ zlib \ python3 -python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/cm-mlops/script/get-sys-utils-cm/run-rhel.sh b/cm-mlops/script/get-sys-utils-cm/run-rhel.sh index 0fcfe252e0..9e3959d36d 100644 --- a/cm-mlops/script/get-sys-utils-cm/run-rhel.sh +++ b/cm-mlops/script/get-sys-utils-cm/run-rhel.sh @@ -34,4 +34,5 @@ ${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ xz \ zip -python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/cm-mlops/script/get-sys-utils-cm/run-sles.sh b/cm-mlops/script/get-sys-utils-cm/run-sles.sh index 1ea118f0e0..32cfdbabc9 100644 --- a/cm-mlops/script/get-sys-utils-cm/run-sles.sh +++ b/cm-mlops/script/get-sys-utils-cm/run-sles.sh @@ -34,4 +34,5 @@ ${CM_SUDO} ${CM_PACKAGE_TOOL} update && \ xz \ zip -python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/cm-mlops/script/get-sys-utils-cm/run-ubuntu.sh b/cm-mlops/script/get-sys-utils-cm/run-ubuntu.sh index a5f7648de6..c87dbeb728 100644 --- a/cm-mlops/script/get-sys-utils-cm/run-ubuntu.sh +++ b/cm-mlops/script/get-sys-utils-cm/run-ubuntu.sh @@ -56,4 +56,5 @@ ${CM_SUDO} ${CM_APT_TOOL} update && \ libgl1-mesa-glx \ zlib1g-dev -python3 -m pip install -r ${CM_TMP_CURRENT_SCRIPT_PATH}/requirements.txt ${CM_PYTHON_PIP_USER} ${CM_PYTHON_PIP_COMMON_EXTRA} +. ${CM_TMP_CURRENT_SCRIPT_PATH}/do_pip_installs.sh +test $? -eq 0 || exit $? diff --git a/docs/mlperf/inference/README_a100.md b/docs/mlperf/inference/README_a100.md index c60e0fdb9d..92035ce01a 100644 --- a/docs/mlperf/inference/README_a100.md +++ b/docs/mlperf/inference/README_a100.md @@ -8,7 +8,7 @@ cmr "generate-run-cmds inference _performance-only" \ --model=bert-99 --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --scenario=Offline --execution_mode=fast \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=3560 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -20,7 +20,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \ --model=bert-99 --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --execution_mode=valid \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=3560 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -38,7 +38,7 @@ cmr "generate-run-cmds inference _performance-only" \ --model=resnet50 --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --scenario=Offline --execution_mode=fast \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=43000 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -50,7 +50,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \ --model=resnet50 --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --execution_mode=valid \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=43000 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -62,7 +62,7 @@ cmr "generate-run-cmds inference _performance-only" \ --model=retinanet --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --scenario=Offline --execution_mode=fast \ ---target_qps=100 --rerun --gpu_name=a100 \ +--target_qps=715 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -74,7 +74,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \ --model=retinanet --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --execution_mode=valid \ ---target_qps=100 --rerun --gpu_name=a100 \ +--target_qps=715 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -87,7 +87,7 @@ cmr "generate-run-cmds inference _performance-only" \ --model=rnnt --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --scenario=Offline --execution_mode=fast \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=14000 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -99,7 +99,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \ --model=rnnt --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --execution_mode=valid \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=14000 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -112,7 +112,7 @@ cmr "generate-run-cmds inference _performance-only" \ --model=3d-unet-99 --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --scenario=Offline --execution_mode=fast \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=3.7 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ``` @@ -124,7 +124,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \ --model=3d-unet-99 --implementation=nvidia-original \ --device=cuda --backend=tensorrt --category=edge \ --division=open --quiet --execution_mode=valid \ ---target_qps=1000 --rerun --gpu_name=a100 \ +--target_qps=3.7 --rerun --gpu_name=a100 \ --adr.nvidia-harness.tags=_sxm ```