Skip to content

Commit

Permalink
Update README_a100.md (#902)
Browse files Browse the repository at this point in the history
  • Loading branch information
gfursin authored Aug 8, 2023
2 parents 243cdd7 + 9bed93c commit c1aa3fc
Showing 1 changed file with 10 additions and 10 deletions.
20 changes: 10 additions & 10 deletions docs/mlperf/inference/README_a100.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
cmr "generate-run-cmds inference _performance-only" \
--model=bert-99 --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --scenario=Offline --execution_mode=fast \
--division=open --quiet --scenario=Offline --execution-mode=fast \
--target_qps=3560 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -20,7 +20,7 @@ This will do performance+accuracy+compliance for singlestream+offline scenarios.
cmr "generate-run-cmds inference _submission _all-scenarios" \
--model=bert-99 --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --execution_mode=valid \
--division=open --quiet --execution-mode=valid \
--target_qps=3560 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -39,7 +39,7 @@ cmr "get dataset original imagenet _full" --input="<Path to imagenet dir contain
cmr "generate-run-cmds inference _performance-only" \
--model=resnet50 --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --scenario=Offline --execution_mode=fast \
--division=open --quiet --scenario=Offline --execution-mode=fast \
--target_qps=43000 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -52,7 +52,7 @@ This will do performance+accuracy+compliance for singlestream+offline scenarios.
cmr "generate-run-cmds inference _submission _all-scenarios" \
--model=resnet50 --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --execution_mode=valid \
--division=open --quiet --execution-mode=valid \
--target_qps=43000 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -65,7 +65,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \
cmr "generate-run-cmds inference _performance-only" \
--model=retinanet --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --scenario=Offline --execution_mode=fast \
--division=open --quiet --scenario=Offline --execution-mode=fast \
--target_qps=715 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -78,7 +78,7 @@ This will do performance+accuracy+compliance for singlestream+offline scenarios.
cmr "generate-run-cmds inference _submission _all-scenarios" \
--model=retinanet --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --execution_mode=valid \
--division=open --quiet --execution-mode=valid \
--target_qps=715 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -92,7 +92,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \
cmr "generate-run-cmds inference _performance-only" \
--model=rnnt --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --scenario=Offline --execution_mode=fast \
--division=open --quiet --scenario=Offline --execution-mode=fast \
--target_qps=14000 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -105,7 +105,7 @@ This will do performance+accuracy+compliance for singlestream+offline scenarios.
cmr "generate-run-cmds inference _submission _all-scenarios" \
--model=rnnt --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --execution_mode=valid \
--division=open --quiet --execution-mode=valid \
--target_qps=14000 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -119,7 +119,7 @@ cmr "generate-run-cmds inference _submission _all-scenarios" \
cmr "generate-run-cmds inference _performance-only" \
--model=3d-unet-99 --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --scenario=Offline --execution_mode=fast \
--division=open --quiet --scenario=Offline --execution-mode=fast \
--target_qps=3.7 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand All @@ -132,7 +132,7 @@ This will do performance+accuracy+compliance for singlestream+offline scenarios.
cmr "generate-run-cmds inference _submission _all-scenarios" \
--model=3d-unet-99 --implementation=nvidia-original \
--device=cuda --backend=tensorrt --category=edge \
--division=open --quiet --execution_mode=valid \
--division=open --quiet --execution-mode=valid \
--target_qps=3.7 --rerun --gpu_name=a100 \
--adr.nvidia-harness.tags=_sxm \
--results_dir=$HOME/results_dir
Expand Down

0 comments on commit c1aa3fc

Please sign in to comment.