Skip to content

Commit 5b468c0

Browse files
gfursinpgmpablo157321
authored andcommitted
improved CM-MLPerf GUI for Intel implementation
1 parent 30b4a0f commit 5b468c0

File tree

2 files changed

+29
-7
lines changed

2 files changed

+29
-7
lines changed

cm-mlops/script/gui/script.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,9 @@ def page(i):
349349
if len(meta.get('docker',{}))>0:
350350
run_via_docker = st.toggle('Use Docker', key='run_via_docker', value=False)
351351

352+
if run_via_docker:
353+
st.markdown("*WARNING: CM automatically generates containers for a give script - it's a beta functionality - feel free to [test and provide feedback](https://discord.gg/JjWNWXKxwT)!*")
354+
352355
action = 'docker' if run_via_docker else 'run'
353356
cli = 'cm {} script {} {}\n'.format(action, tags, flags)
354357

cm-mlops/script/run-mlperf-inference-app/customize.py

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,7 @@ def gui(i):
338338
script_tags = i.get('script_tags', '')
339339

340340
compute_meta = i.get('compute_meta',{})
341+
compute_tags = compute_meta.get('tags', [])
341342
bench_meta = i.get('bench_meta',{})
342343

343344
compute_uid = compute_meta.get('uid','')
@@ -371,9 +372,12 @@ def gui(i):
371372

372373
if device == 'cpu':
373374
inp['implementation']['choices']=['mlcommons-python', 'mlcommons-cpp', 'intel', 'ctuning-cpp-tflite']
374-
inp['implementation']['default']='mlcommons-python'
375-
inp['backend']['choices']=['onnxruntime','deepsparse','pytorch','tf','tvm-onnx']
376-
inp['backend']['default']='onnxruntime'
375+
if 'intel' in compute_tags:
376+
inp['implementation']['default']='intel'
377+
else:
378+
inp['implementation']['default']='mlcommons-python'
379+
inp['backend']['choices']=['onnxruntime','deepsparse','pytorch','tf','tvm-onnx']
380+
inp['backend']['default']='onnxruntime'
377381
elif device == 'rocm':
378382
inp['implementation']['force']='mlcommons-python'
379383
inp['precision']['choices']=['']
@@ -452,14 +456,14 @@ def gui(i):
452456
inp['precision']['force']='float32'
453457
inp['model']['force']='resnet50'
454458
st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-ctuning-cpp-tflite)]*')
455-
456459
elif implementation == 'nvidia':
457460
inp['backend']['force'] = 'tensorrt'
458461
st.markdown('*:red[[CM automation recipe for this implementation](https://github.com/mlcommons/ck/tree/master/cm-mlops/script/app-mlperf-inference-nvidia)]*')
459462
elif implementation == 'intel':
460-
inp['model']['choices'] = ['bert-99', 'bert-99.9', 'gptj-99']
463+
inp['model']['choices'] = ['bert-99', 'gptj-99']
461464
inp['model']['default'] = 'bert-99'
462-
inp['precision']['force'] = 'uint8'
465+
inp['precision']['choices'] = ['int8', 'int4']
466+
inp['precision']['default'] = 'int8'
463467
inp['category']['force'] = 'datacenter'
464468
inp['backend']['force'] = 'pytorch'
465469
# st.markdown('*:red[Note: Intel implementation require extra CM command to build and run Docker container - you will run CM commands to run MLPerf benchmarks there!]*')
@@ -491,6 +495,8 @@ def gui(i):
491495

492496

493497

498+
#############################################################################
499+
# Model
494500
r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_model', 'desc':inp['model']})
495501
model = r.get('value2')
496502
inp['model']['force'] = model
@@ -526,7 +532,20 @@ def gui(i):
526532

527533
if github_doc_model == '': github_doc_model = model
528534

529-
extra_notes_online = '[Extra notes online](https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{})\n'.format(github_doc_model)
535+
model_cm_url='https://github.com/mlcommons/ck/tree/master/docs/mlperf/inference/{}'.format(github_doc_model)
536+
extra_notes_online = '[Extra notes online]({})\n'.format(model_cm_url)
537+
538+
st.markdown('*[CM GitHub docs for this model]({})*'.format(model_cm_url))
539+
540+
#############################################################################
541+
# Precision
542+
if implementation == 'intel':
543+
if model == 'bert-99':
544+
inp['precision']['force'] = 'int8'
545+
elif model == 'gptj-99':
546+
inp['precision']['force'] = 'int4'
547+
548+
530549

531550
r = misc.make_selector({'st':st, 'st_inputs':st_inputs_custom, 'params':params, 'key': 'mlperf_inference_precision', 'desc':inp['precision']})
532551
precision = r.get('value2')

0 commit comments

Comments
 (0)