@@ -272,6 +272,7 @@ def test_compare_to_transformers(self, model_arch):
272
272
def test_pipeline (self , model_arch ):
273
273
model_id = MODEL_NAMES [model_arch ]
274
274
model = OVModelForSequenceClassification .from_pretrained (model_id , export = True , compile = False )
275
+ model .eval ()
275
276
tokenizer = AutoTokenizer .from_pretrained (model_id )
276
277
pipe = pipeline ("text-classification" , model = model , tokenizer = tokenizer )
277
278
text = "This restaurant is awesome"
@@ -345,6 +346,7 @@ def test_compare_to_transformers(self, model_arch):
345
346
def test_pipeline (self , model_arch ):
346
347
model_id = MODEL_NAMES [model_arch ]
347
348
model = OVModelForQuestionAnswering .from_pretrained (model_id , export = True )
349
+ model .eval ()
348
350
tokenizer = AutoTokenizer .from_pretrained (model_id )
349
351
pipe = pipeline ("question-answering" , model = model , tokenizer = tokenizer )
350
352
question = "What's my name?"
@@ -411,6 +413,7 @@ def test_compare_to_transformers(self, model_arch):
411
413
def test_pipeline (self , model_arch ):
412
414
model_id = MODEL_NAMES [model_arch ]
413
415
model = OVModelForTokenClassification .from_pretrained (model_id , export = True )
416
+ model .eval ()
414
417
tokenizer = AutoTokenizer .from_pretrained (model_id )
415
418
pipe = pipeline ("token-classification" , model = model , tokenizer = tokenizer )
416
419
outputs = pipe ("My Name is Arthur and I live in Lyon." )
@@ -460,6 +463,7 @@ def test_compare_to_transformers(self, model_arch):
460
463
def test_pipeline (self , model_arch ):
461
464
model_id = MODEL_NAMES [model_arch ]
462
465
model = OVModelForFeatureExtraction .from_pretrained (model_id , export = True )
466
+ model .eval ()
463
467
tokenizer = AutoTokenizer .from_pretrained (model_id )
464
468
pipe = pipeline ("feature-extraction" , model = model , tokenizer = tokenizer )
465
469
outputs = pipe ("My Name is Arthur and I live in Lyon." )
@@ -540,6 +544,7 @@ def test_pipeline(self, model_arch):
540
544
model_id = MODEL_NAMES [model_arch ]
541
545
tokenizer = AutoTokenizer .from_pretrained (model_id )
542
546
model = OVModelForCausalLM .from_pretrained (model_id , export = True , use_cache = False , compile = False )
547
+ model .eval ()
543
548
model .config .encoder_no_repeat_ngram_size = 0
544
549
model .to ("cpu" )
545
550
model .half ()
@@ -722,6 +727,7 @@ def test_compare_to_transformers(self, model_arch):
722
727
def test_pipeline (self , model_arch ):
723
728
model_id = MODEL_NAMES [model_arch ]
724
729
model = OVModelForMaskedLM .from_pretrained (model_id , export = True )
730
+ model .eval ()
725
731
tokenizer = AutoTokenizer .from_pretrained (model_id )
726
732
pipe = pipeline ("fill-mask" , model = model , tokenizer = tokenizer )
727
733
outputs = pipe (f"This is a { tokenizer .mask_token } ." )
@@ -779,6 +785,7 @@ def test_compare_to_transformers(self, model_arch):
779
785
def test_pipeline (self , model_arch ):
780
786
model_id = MODEL_NAMES [model_arch ]
781
787
model = OVModelForImageClassification .from_pretrained (model_id , export = True )
788
+ model .eval ()
782
789
preprocessor = AutoFeatureExtractor .from_pretrained (model_id )
783
790
pipe = pipeline ("image-classification" , model = model , feature_extractor = preprocessor )
784
791
outputs = pipe ("http://images.cocodataset.org/val2017/000000039769.jpg" )
@@ -875,6 +882,7 @@ def test_pipeline(self, model_arch):
875
882
model_id = MODEL_NAMES [model_arch ]
876
883
tokenizer = AutoTokenizer .from_pretrained (model_id )
877
884
model = OVModelForSeq2SeqLM .from_pretrained (model_id , export = True , compile = False )
885
+ model .eval ()
878
886
model .half ()
879
887
model .to ("cpu" )
880
888
model .compile ()
@@ -1008,6 +1016,7 @@ def test_compare_to_transformers(self, model_arch):
1008
1016
def test_pipeline (self , model_arch ):
1009
1017
model_id = MODEL_NAMES [model_arch ]
1010
1018
model = OVModelForAudioClassification .from_pretrained (model_id , export = True )
1019
+ model .eval ()
1011
1020
preprocessor = AutoFeatureExtractor .from_pretrained (model_id )
1012
1021
pipe = pipeline ("audio-classification" , model = model , feature_extractor = preprocessor )
1013
1022
outputs = pipe ([np .random .random (16000 )])
@@ -1318,6 +1327,7 @@ def test_compare_to_transformers(self, model_arch):
1318
1327
def test_pipeline (self , model_arch ):
1319
1328
model_id = MODEL_NAMES [model_arch ]
1320
1329
model = OVModelForSpeechSeq2Seq .from_pretrained (model_id , export = True )
1330
+ model .eval ()
1321
1331
processor = get_preprocessor (model_id )
1322
1332
GenerationConfig .from_pretrained (model_id )
1323
1333
pipe = pipeline (
0 commit comments