|
73 | 73 | from optimum.intel.openvino import OV_DECODER_NAME, OV_DECODER_WITH_PAST_NAME, OV_ENCODER_NAME, OV_XML_FILE_NAME
|
74 | 74 | from optimum.intel.openvino.modeling_seq2seq import OVDecoder, OVEncoder
|
75 | 75 | from optimum.intel.openvino.modeling_timm import TimmImageProcessor
|
| 76 | +from optimum.intel.utils.import_utils import is_openvino_version |
76 | 77 | from optimum.utils import (
|
77 | 78 | DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
|
78 | 79 | DIFFUSION_MODEL_UNET_SUBFOLDER,
|
|
89 | 90 |
|
90 | 91 | SEED = 42
|
91 | 92 |
|
| 93 | +F32_CONFIG = {"CACHE_DIR": "", "INFERENCE_PRECISION_HINT": "f32"} |
| 94 | + |
92 | 95 |
|
93 | 96 | class Timer(object):
|
94 | 97 | def __enter__(self):
|
@@ -125,7 +128,10 @@ def test_load_from_hub_and_save_model(self):
|
125 | 128 | loaded_model = OVModelForSequenceClassification.from_pretrained(self.OV_MODEL_ID, ov_config=ov_config)
|
126 | 129 | self.assertTrue(manual_openvino_cache_dir.is_dir())
|
127 | 130 | self.assertGreaterEqual(len(list(manual_openvino_cache_dir.glob("*.blob"))), 1)
|
128 |
| - self.assertEqual(loaded_model.request.get_property("PERFORMANCE_HINT").name, "THROUGHPUT") |
| 131 | + if is_openvino_version("<", "2023.3"): |
| 132 | + self.assertEqual(loaded_model.request.get_property("PERFORMANCE_HINT").name, "THROUGHPUT") |
| 133 | + else: |
| 134 | + self.assertEqual(loaded_model.request.get_property("PERFORMANCE_HINT"), "THROUGHPUT") |
129 | 135 |
|
130 | 136 | with tempfile.TemporaryDirectory() as tmpdirname:
|
131 | 137 | loaded_model.save_pretrained(tmpdirname)
|
@@ -247,7 +253,7 @@ class OVModelForSequenceClassificationIntegrationTest(unittest.TestCase):
|
247 | 253 | def test_compare_to_transformers(self, model_arch):
|
248 | 254 | model_id = MODEL_NAMES[model_arch]
|
249 | 255 | set_seed(SEED)
|
250 |
| - ov_model = OVModelForSequenceClassification.from_pretrained(model_id, export=True) |
| 256 | + ov_model = OVModelForSequenceClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
251 | 257 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
252 | 258 | transformers_model = AutoModelForSequenceClassification.from_pretrained(model_id)
|
253 | 259 | tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -313,7 +319,7 @@ class OVModelForQuestionAnsweringIntegrationTest(unittest.TestCase):
|
313 | 319 | def test_compare_to_transformers(self, model_arch):
|
314 | 320 | model_id = MODEL_NAMES[model_arch]
|
315 | 321 | set_seed(SEED)
|
316 |
| - ov_model = OVModelForQuestionAnswering.from_pretrained(model_id, export=True) |
| 322 | + ov_model = OVModelForQuestionAnswering.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
317 | 323 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
318 | 324 | transformers_model = AutoModelForQuestionAnswering.from_pretrained(model_id)
|
319 | 325 | tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -386,7 +392,7 @@ class OVModelForTokenClassificationIntegrationTest(unittest.TestCase):
|
386 | 392 | def test_compare_to_transformers(self, model_arch):
|
387 | 393 | model_id = MODEL_NAMES[model_arch]
|
388 | 394 | set_seed(SEED)
|
389 |
| - ov_model = OVModelForTokenClassification.from_pretrained(model_id, export=True) |
| 395 | + ov_model = OVModelForTokenClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
390 | 396 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
391 | 397 | transformers_model = AutoModelForTokenClassification.from_pretrained(model_id)
|
392 | 398 | tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -430,7 +436,7 @@ class OVModelForFeatureExtractionIntegrationTest(unittest.TestCase):
|
430 | 436 | def test_compare_to_transformers(self, model_arch):
|
431 | 437 | model_id = MODEL_NAMES[model_arch]
|
432 | 438 | set_seed(SEED)
|
433 |
| - ov_model = OVModelForFeatureExtraction.from_pretrained(model_id, export=True) |
| 439 | + ov_model = OVModelForFeatureExtraction.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
434 | 440 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
435 | 441 | transformers_model = AutoModel.from_pretrained(model_id)
|
436 | 442 | tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -492,7 +498,7 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
|
492 | 498 | def test_compare_to_transformers(self, model_arch):
|
493 | 499 | model_id = MODEL_NAMES[model_arch]
|
494 | 500 | set_seed(SEED)
|
495 |
| - ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True) |
| 501 | + ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
496 | 502 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
497 | 503 | transformers_model = AutoModelForCausalLM.from_pretrained(model_id)
|
498 | 504 | tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -637,7 +643,7 @@ class OVModelForMaskedLMIntegrationTest(unittest.TestCase):
|
637 | 643 | def test_compare_to_transformers(self, model_arch):
|
638 | 644 | model_id = MODEL_NAMES[model_arch]
|
639 | 645 | set_seed(SEED)
|
640 |
| - ov_model = OVModelForMaskedLM.from_pretrained(model_id, export=True) |
| 646 | + ov_model = OVModelForMaskedLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
641 | 647 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
642 | 648 | transformers_model = AutoModelForMaskedLM.from_pretrained(model_id)
|
643 | 649 | tokenizer = AutoTokenizer.from_pretrained(model_id)
|
@@ -693,7 +699,7 @@ class OVModelForImageClassificationIntegrationTest(unittest.TestCase):
|
693 | 699 | def test_compare_to_transformers(self, model_arch):
|
694 | 700 | model_id = MODEL_NAMES[model_arch]
|
695 | 701 | set_seed(SEED)
|
696 |
| - ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True) |
| 702 | + ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
697 | 703 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
698 | 704 | transformers_model = AutoModelForImageClassification.from_pretrained(model_id)
|
699 | 705 | preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
|
@@ -729,7 +735,7 @@ def test_pipeline(self, model_arch):
|
729 | 735 |
|
730 | 736 | @parameterized.expand(TIMM_MODELS)
|
731 | 737 | def test_compare_to_timm(self, model_id):
|
732 |
| - ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True) |
| 738 | + ov_model = OVModelForImageClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
733 | 739 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
734 | 740 | timm_model = timm.create_model(model_id, pretrained=True)
|
735 | 741 | preprocessor = TimmImageProcessor.from_pretrained(model_id)
|
@@ -781,7 +787,7 @@ class OVModelForSeq2SeqLMIntegrationTest(unittest.TestCase):
|
781 | 787 | def test_compare_to_transformers(self, model_arch):
|
782 | 788 | model_id = MODEL_NAMES[model_arch]
|
783 | 789 | set_seed(SEED)
|
784 |
| - ov_model = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True) |
| 790 | + ov_model = OVModelForSeq2SeqLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
785 | 791 |
|
786 | 792 | self.assertIsInstance(ov_model.encoder, OVEncoder)
|
787 | 793 | self.assertIsInstance(ov_model.decoder, OVDecoder)
|
@@ -920,7 +926,7 @@ def _generate_random_audio_data(self):
|
920 | 926 | def test_compare_to_transformers(self, model_arch):
|
921 | 927 | model_id = MODEL_NAMES[model_arch]
|
922 | 928 | set_seed(SEED)
|
923 |
| - ov_model = OVModelForAudioClassification.from_pretrained(model_id, export=True) |
| 929 | + ov_model = OVModelForAudioClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
924 | 930 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
925 | 931 | transformers_model = AutoModelForAudioClassification.from_pretrained(model_id)
|
926 | 932 | preprocessor = AutoFeatureExtractor.from_pretrained(model_id)
|
@@ -985,7 +991,7 @@ def test_load_vanilla_transformers_which_is_not_supported(self):
|
985 | 991 | def test_compare_to_transformers(self, model_arch):
|
986 | 992 | model_id = MODEL_NAMES[model_arch]
|
987 | 993 | set_seed(SEED)
|
988 |
| - ov_model = OVModelForCTC.from_pretrained(model_id, export=True) |
| 994 | + ov_model = OVModelForCTC.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
989 | 995 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
990 | 996 |
|
991 | 997 | set_seed(SEED)
|
@@ -1037,7 +1043,7 @@ def test_load_vanilla_transformers_which_is_not_supported(self):
|
1037 | 1043 | def test_compare_to_transformers(self, model_arch):
|
1038 | 1044 | model_id = MODEL_NAMES[model_arch]
|
1039 | 1045 | set_seed(SEED)
|
1040 |
| - ov_model = OVModelForAudioXVector.from_pretrained(model_id, export=True) |
| 1046 | + ov_model = OVModelForAudioXVector.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
1041 | 1047 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
1042 | 1048 |
|
1043 | 1049 | set_seed(SEED)
|
@@ -1091,7 +1097,7 @@ def test_load_vanilla_transformers_which_is_not_supported(self):
|
1091 | 1097 | def test_compare_to_transformers(self, model_arch):
|
1092 | 1098 | model_id = MODEL_NAMES[model_arch]
|
1093 | 1099 | set_seed(SEED)
|
1094 |
| - ov_model = OVModelForAudioFrameClassification.from_pretrained(model_id, export=True) |
| 1100 | + ov_model = OVModelForAudioFrameClassification.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
1095 | 1101 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
1096 | 1102 |
|
1097 | 1103 | set_seed(SEED)
|
@@ -1134,7 +1140,7 @@ class OVModelForPix2StructIntegrationTest(unittest.TestCase):
|
1134 | 1140 | def test_compare_to_transformers(self, model_arch):
|
1135 | 1141 | model_id = MODEL_NAMES[model_arch]
|
1136 | 1142 | set_seed(SEED)
|
1137 |
| - ov_model = OVModelForPix2Struct.from_pretrained(model_id, export=True) |
| 1143 | + ov_model = OVModelForPix2Struct.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
1138 | 1144 |
|
1139 | 1145 | self.assertIsInstance(ov_model.encoder, OVEncoder)
|
1140 | 1146 | self.assertIsInstance(ov_model.decoder, OVDecoder)
|
@@ -1223,7 +1229,7 @@ def _generate_random_audio_data(self):
|
1223 | 1229 | def test_compare_to_transformers(self, model_arch):
|
1224 | 1230 | model_id = MODEL_NAMES[model_arch]
|
1225 | 1231 | set_seed(SEED)
|
1226 |
| - ov_model = OVModelForSpeechSeq2Seq.from_pretrained(model_id, export=True) |
| 1232 | + ov_model = OVModelForSpeechSeq2Seq.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) |
1227 | 1233 | self.assertIsInstance(ov_model.config, PretrainedConfig)
|
1228 | 1234 | transformers_model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id)
|
1229 | 1235 | processor = get_preprocessor(model_id)
|
|
0 commit comments