@@ -272,6 +272,7 @@ def test_compare_to_transformers(self, model_arch):
272
272
def test_pipeline (self , model_arch ):
273
273
model_id = MODEL_NAMES [model_arch ]
274
274
model = OVModelForSequenceClassification .from_pretrained (model_id , export = True , compile = False )
275
+ model .eval ()
275
276
tokenizer = AutoTokenizer .from_pretrained (model_id )
276
277
pipe = pipeline ("text-classification" , model = model , tokenizer = tokenizer )
277
278
text = "This restaurant is awesome"
@@ -345,6 +346,7 @@ def test_compare_to_transformers(self, model_arch):
345
346
def test_pipeline (self , model_arch ):
346
347
model_id = MODEL_NAMES [model_arch ]
347
348
model = OVModelForQuestionAnswering .from_pretrained (model_id , export = True )
349
+ model .eval ()
348
350
tokenizer = AutoTokenizer .from_pretrained (model_id )
349
351
pipe = pipeline ("question-answering" , model = model , tokenizer = tokenizer )
350
352
question = "What's my name?"
@@ -411,6 +413,7 @@ def test_compare_to_transformers(self, model_arch):
411
413
def test_pipeline (self , model_arch ):
412
414
model_id = MODEL_NAMES [model_arch ]
413
415
model = OVModelForTokenClassification .from_pretrained (model_id , export = True )
416
+ model .eval ()
414
417
tokenizer = AutoTokenizer .from_pretrained (model_id )
415
418
pipe = pipeline ("token-classification" , model = model , tokenizer = tokenizer )
416
419
outputs = pipe ("My Name is Arthur and I live in Lyon." )
@@ -460,6 +463,7 @@ def test_compare_to_transformers(self, model_arch):
460
463
def test_pipeline (self , model_arch ):
461
464
model_id = MODEL_NAMES [model_arch ]
462
465
model = OVModelForFeatureExtraction .from_pretrained (model_id , export = True )
466
+ model .eval ()
463
467
tokenizer = AutoTokenizer .from_pretrained (model_id )
464
468
pipe = pipeline ("feature-extraction" , model = model , tokenizer = tokenizer )
465
469
outputs = pipe ("My Name is Arthur and I live in Lyon." )
@@ -568,6 +572,7 @@ def test_pipeline(self, model_arch):
568
572
model = OVModelForCausalLM .from_pretrained (
569
573
model_id , export = True , use_cache = False , compile = False , ** model_kwargs
570
574
)
575
+ model .eval ()
571
576
model .config .encoder_no_repeat_ngram_size = 0
572
577
model .to ("cpu" )
573
578
model .half ()
@@ -758,6 +763,7 @@ def test_compare_to_transformers(self, model_arch):
758
763
def test_pipeline (self , model_arch ):
759
764
model_id = MODEL_NAMES [model_arch ]
760
765
model = OVModelForMaskedLM .from_pretrained (model_id , export = True )
766
+ model .eval ()
761
767
tokenizer = AutoTokenizer .from_pretrained (model_id )
762
768
pipe = pipeline ("fill-mask" , model = model , tokenizer = tokenizer )
763
769
outputs = pipe (f"This is a { tokenizer .mask_token } ." )
@@ -815,6 +821,7 @@ def test_compare_to_transformers(self, model_arch):
815
821
def test_pipeline (self , model_arch ):
816
822
model_id = MODEL_NAMES [model_arch ]
817
823
model = OVModelForImageClassification .from_pretrained (model_id , export = True )
824
+ model .eval ()
818
825
preprocessor = AutoFeatureExtractor .from_pretrained (model_id )
819
826
pipe = pipeline ("image-classification" , model = model , feature_extractor = preprocessor )
820
827
outputs = pipe ("http://images.cocodataset.org/val2017/000000039769.jpg" )
@@ -911,6 +918,7 @@ def test_pipeline(self, model_arch):
911
918
model_id = MODEL_NAMES [model_arch ]
912
919
tokenizer = AutoTokenizer .from_pretrained (model_id )
913
920
model = OVModelForSeq2SeqLM .from_pretrained (model_id , export = True , compile = False )
921
+ model .eval ()
914
922
model .half ()
915
923
model .to ("cpu" )
916
924
model .compile ()
@@ -1044,6 +1052,7 @@ def test_compare_to_transformers(self, model_arch):
1044
1052
def test_pipeline (self , model_arch ):
1045
1053
model_id = MODEL_NAMES [model_arch ]
1046
1054
model = OVModelForAudioClassification .from_pretrained (model_id , export = True )
1055
+ model .eval ()
1047
1056
preprocessor = AutoFeatureExtractor .from_pretrained (model_id )
1048
1057
pipe = pipeline ("audio-classification" , model = model , feature_extractor = preprocessor )
1049
1058
outputs = pipe ([np .random .random (16000 )])
@@ -1354,6 +1363,7 @@ def test_compare_to_transformers(self, model_arch):
1354
1363
def test_pipeline (self , model_arch ):
1355
1364
model_id = MODEL_NAMES [model_arch ]
1356
1365
model = OVModelForSpeechSeq2Seq .from_pretrained (model_id , export = True )
1366
+ model .eval ()
1357
1367
processor = get_preprocessor (model_id )
1358
1368
GenerationConfig .from_pretrained (model_id )
1359
1369
pipe = pipeline (
0 commit comments