File tree 2 files changed +8
-2
lines changed
2 files changed +8
-2
lines changed Original file line number Diff line number Diff line change @@ -482,8 +482,9 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
482
482
"gpt_neo" ,
483
483
"gpt_neox" ,
484
484
"llama" ,
485
- # "marian", # TODO : enable it back with openvino 2023.3.0
486
- # "mistral",
485
+ "llama_gptq" ,
486
+ "marian" ,
487
+ "mistral" ,
487
488
"mpt" ,
488
489
"opt" ,
489
490
"pegasus" ,
@@ -494,6 +495,10 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
494
495
@parameterized .expand (SUPPORTED_ARCHITECTURES )
495
496
def test_compare_to_transformers (self , model_arch ):
496
497
model_id = MODEL_NAMES [model_arch ]
498
+
499
+ if "gptq" in model_arch :
500
+ self .skipTest ("Unsupported GPTQ model" )
501
+
497
502
set_seed (SEED )
498
503
ov_model = OVModelForCausalLM .from_pretrained (model_id , export = True , ov_config = F32_CONFIG )
499
504
self .assertIsInstance (ov_model .config , PretrainedConfig )
Original file line number Diff line number Diff line change 49
49
"levit" : "hf-internal-testing/tiny-random-LevitModel" ,
50
50
"longt5" : "hf-internal-testing/tiny-random-longt5" ,
51
51
"llama" : "fxmarty/tiny-llama-fast-tokenizer" ,
52
+ "llama_gptq" : "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" ,
52
53
"m2m_100" : "hf-internal-testing/tiny-random-m2m_100" ,
53
54
"opt" : "hf-internal-testing/tiny-random-OPTModel" ,
54
55
"opt125m" : "facebook/opt-125m" ,
You can’t perform that action at this time.
0 commit comments