Skip to content

Commit

Permalink
Fixes for intel mlperf inference retinanet
Browse files Browse the repository at this point in the history
  • Loading branch information
arjunsuresh committed Jun 29, 2024
1 parent af02a2c commit 47ed8c5
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 43 deletions.
2 changes: 1 addition & 1 deletion script/app-mlperf-inference-intel/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ variations:
tags: _name.retinanet-pt
deps:
- tags: get,conda,_name.retinanet-pt
- tags: get,python,_conda.resnet50-pt
- tags: get,python,_conda.retinanet-pt
adr:
conda-python:
version: "3.9"
Expand Down
2 changes: 2 additions & 0 deletions script/app-mlperf-inference-intel/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ def preprocess(i):
elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "compilation":
if master_model == "resnet50":
i['run_script_input']['script_name'] = "compile_resnet50"
elif master_model == "retinanet":
i['run_script_input']['script_name'] = "compile_retinanet"

elif env['CM_LOCAL_MLPERF_INFERENCE_INTEL_RUN_MODE'] == "build_harness":
print(f"Harness Root: {harness_root}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def load_image(image_path, target_size, data_type='uint8', convert_to_bgr=False,

def quantize_to_uint8(image, scale, offset):
quantized_image = (image.astype(np.float64) / scale + offset).astype(np.float64)
output = np.round_(quantized_image)
output = np.round(quantized_image)
output = np.clip(output, 0, 255)
return output.astype(np.uint8)

Expand Down
4 changes: 2 additions & 2 deletions script/install-pytorch-from-src/_cm.json
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
"env": {
"CM_CONDA_ENV": "yes",
"CM_MLPERF_INFERENCE_INTEL": "yes",
"CM_MLPERF_INFERENCE_INTEL_RESNET50_MODEL": "yes",
"CM_MLPERF_INFERENCE_INTEL_MODEL": "resnet50",
"USE_CUDA": "0"
},
"deps": [
Expand Down Expand Up @@ -196,7 +196,7 @@
"env": {
"CM_CONDA_ENV": "yes",
"CM_MLPERF_INFERENCE_INTEL": "yes",
"CM_MLPERF_INFERENCE_INTEL_RETINANET_MODEL": "yes",
"CM_MLPERF_INFERENCE_INTEL_MODEL": "retinanet",
"USE_CUDA": "0"
},
"deps": [
Expand Down
4 changes: 2 additions & 2 deletions script/install-pytorch-from-src/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ def preprocess(i):
run_cmd="CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . "

env['CM_RUN_CMD'] = run_cmd
elif env.get('CM_MLPERF_INFERENCE_INTEL_RESNET50_MODEL', '') == "yes":
i['run_script_input']['script_name'] = "run-intel-mlperf-inference-resnet50"
elif env.get('CM_MLPERF_INFERENCE_INTEL_MODEL', '') in [ "resnet50", "retinanet" ]:
i['run_script_input']['script_name'] = "run-intel-mlperf-inference-vision"
run_cmd="CC=clang CXX=clang++ USE_CUDA=OFF python -m pip install -e . "
run_cmd=f"CC={env['CM_C_COMPILER_WITH_PATH']} CXX={env['CM_CXX_COMPILER_WITH_PATH']} USE_CUDA=OFF python -m pip install -e . "

Expand Down

This file was deleted.

0 comments on commit 47ed8c5

Please sign in to comment.