Skip to content

Commit 9e25025

Browse files
authoredJan 10, 2022
AC: remove usage MO (openvinotoolkit#3046)
* AC: remove usage MO * fix tests * fix tests * minor fixes
1 parent a1bf5f7 commit 9e25025

17 files changed

+51
-1486
lines changed
 

‎tools/accuracy_checker/openvino/tools/accuracy_checker/argparser.py

-46
Original file line numberDiff line numberDiff line change
@@ -236,45 +236,6 @@ def add_openvino_specific_args(parser):
236236
choices=['avx512', 'avx2', 'sse4'],
237237
required=False
238238
)
239-
openvino_specific_args.add_argument(
240-
'-M', '--model_optimizer',
241-
help='path to model optimizer directory',
242-
type=partial(get_path, is_directory=True),
243-
# there is no default value because if user did not specify it we use specific locations
244-
# defined in model_conversion.py
245-
required=False
246-
)
247-
openvino_specific_args.add_argument(
248-
'--tf_custom_op_config_dir',
249-
help='path to directory with tensorflow custom operation configuration files for model optimizer',
250-
type=partial(get_path, is_directory=True),
251-
# there is no default value because if user did not specify it we use specific location
252-
# defined in model_conversion.py
253-
required=False
254-
)
255-
openvino_specific_args.add_argument(
256-
'--transformations_config_dir',
257-
help='path to directory with Model Optimizer transformations configuration files',
258-
type=partial(get_path, is_directory=True),
259-
# there is no default value because if user did not specify it we use specific location
260-
# defined in model_conversion.py
261-
required=False
262-
)
263-
openvino_specific_args.add_argument(
264-
'--tf_obj_detection_api_pipeline_config_path',
265-
help='path to directory with tensorflow object detection api pipeline configuration files for model optimizer',
266-
type=partial(get_path, is_directory=True),
267-
# there is no default value because if user did not specify it we use specific location
268-
# defined in model_conversion.py
269-
required=False
270-
)
271-
openvino_specific_args.add_argument(
272-
'--deprecated_ir_v7',
273-
help='allow generation IR v7 via Model Optimizer',
274-
type=cast_to_bool,
275-
default=False,
276-
required=False
277-
)
278239
openvino_specific_args.add_argument(
279240
'-dc', '--device_config',
280241
help='Inference Engine device specific config file',
@@ -299,13 +260,6 @@ def add_openvino_specific_args(parser):
299260
help='model format for automatic search (e.g. blob, xml, onnx)',
300261
required=False
301262
)
302-
openvino_specific_args.add_argument(
303-
'-C', '--converted_models',
304-
help='directory to store Model Optimizer converted models. Used for DLSDK launcher only',
305-
type=partial(get_path, is_directory=True),
306-
default=Path.cwd(),
307-
required=False
308-
)
309263
openvino_specific_args.add_argument(
310264
'-am', '--affinity_map',
311265
help='prefix path to the affinity maps',

‎tools/accuracy_checker/openvino/tools/accuracy_checker/config/config_reader.py

-41
Original file line numberDiff line numberDiff line change
@@ -68,13 +68,6 @@
6868
'model': 'models',
6969
'weights': 'models',
7070
'color_coeff': ['model_attributes', 'models'],
71-
'caffe_model': 'models',
72-
'caffe_weights': 'models',
73-
'tf_model': 'models',
74-
'tf_meta': 'models',
75-
'mxnet_weights': 'models',
76-
'onnx_model': 'models',
77-
'kaldi_model': 'models',
7871
'saved_model_dir': 'models',
7972
'params': 'models'
8073
}
@@ -90,11 +83,6 @@
9083
DEFINITION_ENV_VAR = 'DEFINITIONS_FILE'
9184
CONFIG_SHARED_PARAMETERS = []
9285
ACCEPTABLE_MODEL = [
93-
'caffe_model', 'caffe_weights',
94-
'tf_model', 'tf_meta',
95-
'mxnet_weights',
96-
'onnx_model',
97-
'kaldi_model',
9886
'model',
9987
'saved_model_dir',
10088
'params'
@@ -393,9 +381,6 @@ def merge_modules(config, arguments, update_launcher_entry):
393381
}
394382

395383
additional_keys = [
396-
'model_optimizer', 'tf_custom_op_config_dir',
397-
'tf_obj_detection_api_pipeline_config_path',
398-
'transformations_config_dir',
399384
'cpu_extensions_mode', 'vpu_log_level'
400385
]
401386
arguments_dict = arguments if isinstance(arguments, dict) else vars(arguments)
@@ -843,32 +828,7 @@ def get_mode(config):
843828
return next(iter(evaluation_keys))
844829

845830

846-
def merge_converted_model_path(converted_models_dir, mo_output_dir):
847-
if mo_output_dir:
848-
mo_output_dir = Path(mo_output_dir)
849-
if mo_output_dir.is_absolute():
850-
return mo_output_dir
851-
return converted_models_dir / mo_output_dir
852-
return converted_models_dir
853-
854-
855831
def merge_dlsdk_launcher_args(arguments, launcher_entry, update_launcher_entry):
856-
def _convert_models_args(launcher_entry):
857-
if 'deprecated_ir_v7' in arguments and arguments.deprecated_ir_v7:
858-
mo_flags = launcher_entry.get('mo_flags', [])
859-
mo_flags.append('generate_deprecated_IR_V7')
860-
launcher_entry['mo_flags'] = mo_flags
861-
if 'converted_models' in arguments and arguments.converted_models:
862-
mo_params = launcher_entry.get('mo_params', {})
863-
mo_params.update({
864-
'output_dir': merge_converted_model_path(arguments.converted_models,
865-
mo_params.get('output_dir'))
866-
})
867-
868-
launcher_entry['mo_params'] = mo_params
869-
870-
return launcher_entry
871-
872832
def _async_evaluation_args(launcher_entry):
873833
if 'async_mode' in arguments:
874834
launcher_entry['async_mode'] = arguments.async_mode
@@ -888,7 +848,6 @@ def _async_evaluation_args(launcher_entry):
888848
return launcher_entry
889849

890850
launcher_entry.update(update_launcher_entry)
891-
_convert_models_args(launcher_entry)
892851
_async_evaluation_args(launcher_entry)
893852

894853
if 'device_config' in arguments and arguments.device_config:

‎tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/base_models.py

+2-10
Original file line numberDiff line numberDiff line change
@@ -215,11 +215,7 @@ def set_input_and_output(self):
215215
self.adapter.output_blob = output_blob
216216

217217
def load_model(self, network_info, launcher, log=False):
218-
if 'onnx_model' in network_info:
219-
network_info.update(launcher.config)
220-
model, weights = launcher.convert_model(network_info)
221-
else:
222-
model, weights = self.automatic_model_search(network_info)
218+
model, weights = self.automatic_model_search(network_info)
223219
if weights is None and model.suffix != '.onnx':
224220
self.exec_network = launcher.ie_core.import_network(str(model))
225221
else:
@@ -302,11 +298,7 @@ def load_network(self, network, launcher):
302298
self.exec_network = launcher.ie_core.compile_model(self.network, launcher.device)
303299

304300
def load_model(self, network_info, launcher, log=False):
305-
if 'onnx_model' in network_info:
306-
network_info.update(launcher.config)
307-
model, weights = launcher.convert_model(network_info)
308-
else:
309-
model, weights = self.automatic_model_search(network_info)
301+
model, weights = self.automatic_model_search(network_info)
310302
if weights is None and model.suffix != '.onnx':
311303
self.exec_network = launcher.ie_core.import_network(str(model))
312304
else:

‎tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_models.py

+10-68
Original file line numberDiff line numberDiff line change
@@ -353,37 +353,8 @@ def fit_to_input(self, data, layer_name, layout, precision, template=None):
353353
data = data.astype(precision)
354354
return data
355355

356-
def prepare_model(self, launcher):
357-
launcher_specific_entries = [
358-
'model', 'weights', 'caffe_model', 'caffe_weights', 'tf_model', 'inputs', 'outputs', '_model_optimizer'
359-
]
360-
361-
def update_mo_params(launcher_config, model_config):
362-
for entry in launcher_specific_entries:
363-
if entry not in launcher_config:
364-
continue
365-
if entry in model_config:
366-
continue
367-
model_config[entry] = launcher_config[entry]
368-
model_mo_flags, model_mo_params = model_config.get('mo_flags', []), model_config.get('mo_params', {})
369-
launcher_mo_flags, launcher_mo_params = launcher_config.get('mo_flags', []), launcher_config.get(
370-
'mo_params', {})
371-
for launcher_flag in launcher_mo_flags:
372-
if launcher_flag not in model_mo_flags:
373-
model_mo_flags.append(launcher_flag)
374-
375-
for launcher_mo_key, launcher_mo_value in launcher_mo_params.items():
376-
if launcher_mo_key not in model_mo_params:
377-
model_mo_params[launcher_mo_key] = launcher_mo_value
378-
379-
model_config['mo_flags'] = model_mo_flags
380-
model_config['mo_params'] = model_mo_params
381-
382-
update_mo_params(launcher.config, self.model_info)
383-
if 'caffe_model' in self.model_info:
384-
model, weights = launcher.convert_model(self.model_info)
385-
else:
386-
model, weights = self.auto_model_search(self.model_info)
356+
def prepare_model(self):
357+
model, weights = self.auto_model_search(self.model_info)
387358
return model, weights
388359

389360
def auto_model_search(self, network_info):
@@ -547,37 +518,8 @@ def fit_to_input(self, data, layer_name, layout, precision, template=None):
547518
data = data.astype(precision)
548519
return data
549520

550-
def prepare_model(self, launcher):
551-
launcher_specific_entries = [
552-
'model', 'weights', 'caffe_model', 'caffe_weights', 'tf_model', 'inputs', 'outputs', '_model_optimizer'
553-
]
554-
555-
def update_mo_params(launcher_config, model_config):
556-
for entry in launcher_specific_entries:
557-
if entry not in launcher_config:
558-
continue
559-
if entry in model_config:
560-
continue
561-
model_config[entry] = launcher_config[entry]
562-
model_mo_flags, model_mo_params = model_config.get('mo_flags', []), model_config.get('mo_params', {})
563-
launcher_mo_flags, launcher_mo_params = launcher_config.get('mo_flags', []), launcher_config.get(
564-
'mo_params', {})
565-
for launcher_flag in launcher_mo_flags:
566-
if launcher_flag not in model_mo_flags:
567-
model_mo_flags.append(launcher_flag)
568-
569-
for launcher_mo_key, launcher_mo_value in launcher_mo_params.items():
570-
if launcher_mo_key not in model_mo_params:
571-
model_mo_params[launcher_mo_key] = launcher_mo_value
572-
573-
model_config['mo_flags'] = model_mo_flags
574-
model_config['mo_params'] = model_mo_params
575-
576-
update_mo_params(launcher.config, self.model_info)
577-
if 'caffe_model' in self.model_info:
578-
model, weights = launcher.convert_model(self.model_info)
579-
else:
580-
model, weights = self.auto_model_search(self.model_info)
521+
def prepare_model(self):
522+
model, weights = self.auto_model_search(self.model_info)
581523
return model, weights
582524

583525
def auto_model_search(self, network_info):
@@ -707,7 +649,7 @@ def __init__(
707649
self.adapter = None
708650
self.is_dynamic = False
709651
if not delayed_model_loading:
710-
model_xml, model_bin = self.prepare_model(launcher)
652+
model_xml, model_bin = self.prepare_model()
711653
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'pnet_', log=True)
712654
pnet_outs = model_info['outputs']
713655
pnet_adapter_config = launcher.config.get('adapter', {'type': 'mtcnn_p', **pnet_outs})
@@ -758,7 +700,7 @@ def __init__(
758700
self.adapter = None
759701
self.is_dynamic = False
760702
if not delayed_model_loading:
761-
model_xml, model_bin = self.prepare_model(launcher)
703+
model_xml, model_bin = self.prepare_model()
762704
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'pnet_', log=True)
763705
pnet_outs = model_info['outputs']
764706
pnet_adapter_config = launcher.config.get('adapter', {'type': 'mtcnn_p', **pnet_outs})
@@ -810,7 +752,7 @@ def __init__(
810752
self.is_dynamic = False
811753
self.launcher = launcher
812754
if not delayed_model_loading:
813-
model_xml, model_bin = self.prepare_model(launcher)
755+
model_xml, model_bin = self.prepare_model()
814756
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'rnet_', log=True)
815757

816758
def predict(self, input_blobs, batch_meta, output_callback=None):
@@ -829,7 +771,7 @@ def __init__(
829771
self.is_dynamic = False
830772
self.launcher = launcher
831773
if not delayed_model_loading:
832-
model_xml, model_bin = self.prepare_model(launcher)
774+
model_xml, model_bin = self.prepare_model()
833775
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'rnet_', log=True)
834776

835777
def predict(self, input_blobs, batch_meta, output_callback=None):
@@ -848,7 +790,7 @@ def __init__(
848790
self.is_dynamic = False
849791
self.launcher = launcher
850792
if not delayed_model_loading:
851-
model_xml, model_bin = self.prepare_model(launcher)
793+
model_xml, model_bin = self.prepare_model()
852794
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'onet_', log=True)
853795

854796
def predict(self, input_blobs, batch_meta, output_callback=None):
@@ -865,7 +807,7 @@ def __init__(
865807
self.is_dynamic = False
866808
self.launcher = launcher
867809
if not delayed_model_loading:
868-
model_xml, model_bin = self.prepare_model(launcher)
810+
model_xml, model_bin = self.prepare_model()
869811
self.load_model({'model': model_xml, 'weights': model_bin}, launcher, 'onet_', log=True)
870812

871813
def predict(self, input_blobs, batch_meta, output_callback=None):

‎tools/accuracy_checker/openvino/tools/accuracy_checker/launcher/dlsdk_launcher.py

+6-11
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525
from .dlsdk_launcher_config import (
2626
HETERO_KEYWORD, MULTI_DEVICE_KEYWORD, NIREQ_REGEX, VPU_PLUGINS,
27-
get_cpu_extension, mo_convert_model,
27+
get_cpu_extension,
2828
DLSDK_LAUNCHER_PARAMETERS,
2929
DLSDKLauncherConfigValidator,
3030
automatic_model_search
@@ -110,16 +110,11 @@ def __init__(self, config_entry, model_name='', delayed_model_loading=False,
110110
self.preprocessor = preprocessor
111111

112112
if not delayed_model_loading:
113-
if dlsdk_launcher_config.need_conversion:
114-
self._model, self._weights = mo_convert_model(
115-
self.config, self.parameters(), dlsdk_launcher_config.framework
116-
)
117-
else:
118-
self._model, self._weights = automatic_model_search(
119-
self._model_name, self.get_value_from_config('model'),
120-
self.get_value_from_config('weights'),
121-
self.get_value_from_config('_model_type')
122-
)
113+
self._model, self._weights = automatic_model_search(
114+
self._model_name, self.get_value_from_config('model'),
115+
self.get_value_from_config('weights'),
116+
self.get_value_from_config('_model_type')
117+
)
123118
self.load_network(log=True, preprocessing=preprocessor)
124119
self.allow_reshape_input = self.get_value_from_config('allow_reshape_input') and self.network is not None
125120
else:

0 commit comments

Comments
 (0)