Skip to content

Commit 338e661

Browse files
committed
add tests
1 parent 471bee4 commit 338e661

File tree

2 files changed

+8
-2
lines changed

2 files changed

+8
-2
lines changed

tests/openvino/test_modeling.py

+7-2
Original file line numberDiff line numberDiff line change
@@ -482,8 +482,9 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
482482
"gpt_neo",
483483
"gpt_neox",
484484
"llama",
485-
# "marian", # TODO : enable it back with openvino 2023.3.0
486-
# "mistral",
485+
"llama_gptq",
486+
"marian",
487+
"mistral",
487488
"mpt",
488489
"opt",
489490
"pegasus",
@@ -494,6 +495,10 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase):
494495
@parameterized.expand(SUPPORTED_ARCHITECTURES)
495496
def test_compare_to_transformers(self, model_arch):
496497
model_id = MODEL_NAMES[model_arch]
498+
499+
if "gptq" in model_arch:
500+
self.skipTest("Unsupported GPTQ model")
501+
497502
set_seed(SEED)
498503
ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG)
499504
self.assertIsInstance(ov_model.config, PretrainedConfig)

tests/openvino/utils_tests.py

+1
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
"levit": "hf-internal-testing/tiny-random-LevitModel",
5050
"longt5": "hf-internal-testing/tiny-random-longt5",
5151
"llama": "fxmarty/tiny-llama-fast-tokenizer",
52+
"llama_gptq": "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ",
5253
"m2m_100": "hf-internal-testing/tiny-random-m2m_100",
5354
"opt": "hf-internal-testing/tiny-random-OPTModel",
5455
"opt125m": "facebook/opt-125m",

0 commit comments

Comments
 (0)