|
20 | 20 | from pathlib import Path
|
21 | 21 | from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
22 | 22 |
|
| 23 | +import onnx |
23 | 24 | from transformers.utils import is_tf_available, is_torch_available
|
24 | 25 |
|
25 |
| -from openvino.runtime import PartialShape, save_model |
| 26 | +from openvino.runtime import Model, PartialShape, save_model |
26 | 27 | from openvino.runtime.exceptions import OVTypeError
|
27 | 28 | from openvino.runtime.utils.types import get_element_type
|
28 | 29 | from openvino.tools.ovc import convert_model
|
|
32 | 33 | from optimum.exporters.onnx.convert import export_pytorch as export_pytorch_to_onnx
|
33 | 34 | from optimum.exporters.onnx.convert import export_tensorflow as export_tensorflow_onnx
|
34 | 35 | from optimum.exporters.utils import _get_submodels_and_export_configs
|
| 36 | +from optimum.intel.utils.import_utils import ( |
| 37 | + _nncf_version, |
| 38 | + _optimum_intel_version, |
| 39 | + _optimum_version, |
| 40 | + _timm_version, |
| 41 | + _torch_version, |
| 42 | + _transformers_version, |
| 43 | +) |
35 | 44 | from optimum.utils import DEFAULT_DUMMY_SHAPES, is_diffusers_available
|
36 | 45 | from optimum.utils.save_utils import maybe_save_preprocessors
|
37 | 46 |
|
@@ -81,6 +90,8 @@ def _save_model(model, path: str, ov_config: Optional["OVConfig"] = None):
|
81 | 90 |
|
82 | 91 | compress_to_fp16 = ov_config.dtype == "fp16"
|
83 | 92 |
|
| 93 | + library_name = TasksManager.infer_library_from_model(Path(path).parent) |
| 94 | + model = _add_version_info_to_model(model, library_name) |
84 | 95 | save_model(model, path, compress_to_fp16)
|
85 | 96 |
|
86 | 97 |
|
@@ -689,3 +700,34 @@ def export_tokenizer(
|
689 | 700 |
|
690 | 701 | for model, file_name in zip(converted, (OV_TOKENIZER_NAME, OV_DETOKENIZER_NAME)):
|
691 | 702 | save_model(model, output / file_name.format(suffix))
|
| 703 | + |
| 704 | + |
| 705 | +def _add_version_info_to_model(model: Model, library_name: Optional[str] = None): |
| 706 | + """ |
| 707 | + Add dependency versions to OpenVINO model |
| 708 | + """ |
| 709 | + try: |
| 710 | + model.set_rt_info(_transformers_version, ["optimum", "transformers_version"]) |
| 711 | + model.set_rt_info(_torch_version, ["optimum", "pytorch_version"]) |
| 712 | + model.set_rt_info(_optimum_intel_version, ["optimum", "optimum_intel_version"]) |
| 713 | + model.set_rt_info(_optimum_version, ["optimum", "optimum_version"]) |
| 714 | + |
| 715 | + if any("token_embeddings" in output.get_names() for output in model.outputs): |
| 716 | + import sentence_transformers |
| 717 | + |
| 718 | + model.set_rt_info(sentence_transformers.__version__, ["optimum", "sentence_transformers_version"]) |
| 719 | + if library_name == "diffusers": |
| 720 | + model.set_rt_info(_optimum_version, ["optimum", "diffusers_version"]) |
| 721 | + elif library_name == "timm": |
| 722 | + model.set_rt_info(_timm_version, ["optimum", "timm_version"]) |
| 723 | + rt_info = model.get_rt_info() |
| 724 | + if "nncf" in rt_info: |
| 725 | + model.set_rt_info(_nncf_version, ["optimum", "nncf_version"]) |
| 726 | + input_model = rt_info["conversion_parameters"].get("input_model", None) |
| 727 | + if input_model is not None and "onnx" in input_model.value: |
| 728 | + model.set_rt_info(onnx.__version__, ["optimum", "onnx_version"]) |
| 729 | + |
| 730 | + except Exception: |
| 731 | + pass |
| 732 | + |
| 733 | + return model |
0 commit comments