|
20 | 20 | from parameterized import parameterized
|
21 | 21 | from transformers import (
|
22 | 22 | AutoModelForCausalLM,
|
| 23 | + AutoModelForQuestionAnswering, |
23 | 24 | AutoModelForSequenceClassification,
|
24 | 25 | AutoTokenizer,
|
25 | 26 | PretrainedConfig,
|
|
28 | 29 | )
|
29 | 30 |
|
30 | 31 | from optimum.exporters.onnx import MODEL_TYPES_REQUIRING_POSITION_IDS
|
31 |
| -from optimum.intel import IPEXModelForCausalLM, IPEXModelForSequenceClassification |
| 32 | +from optimum.intel import IPEXModelForCausalLM, IPEXModelForQuestionAnswering, IPEXModelForSequenceClassification |
32 | 33 |
|
33 | 34 |
|
34 | 35 | SEED = 42
|
@@ -118,6 +119,46 @@ def test_pipeline(self, model_arch):
|
118 | 119 | self.assertIsInstance(outputs[0]["label"], str)
|
119 | 120 |
|
120 | 121 |
|
| 122 | +class IPEXModelForQuestionAnsweringTest(unittest.TestCase): |
| 123 | + SUPPORTED_ARCHITECTURES = ( |
| 124 | + "bert", |
| 125 | + "distilbert", |
| 126 | + "roberta", |
| 127 | + ) |
| 128 | + |
| 129 | + @parameterized.expand(SUPPORTED_ARCHITECTURES) |
| 130 | + def test_compare_to_transformers(self, model_arch): |
| 131 | + model_id = MODEL_NAMES[model_arch] |
| 132 | + set_seed(SEED) |
| 133 | + ipex_model = IPEXModelForQuestionAnswering.from_pretrained(model_id, export=True) |
| 134 | + self.assertIsInstance(ipex_model.config, PretrainedConfig) |
| 135 | + transformers_model = AutoModelForQuestionAnswering.from_pretrained(model_id) |
| 136 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 137 | + inputs = "This is a sample input" |
| 138 | + tokens = tokenizer(inputs, return_tensors="pt") |
| 139 | + with torch.no_grad(): |
| 140 | + transformers_outputs = transformers_model(**tokens) |
| 141 | + outputs = ipex_model(**tokens) |
| 142 | + self.assertIn("start_logits", outputs) |
| 143 | + self.assertIn("end_logits", outputs) |
| 144 | + # Compare tensor outputs |
| 145 | + self.assertTrue(torch.allclose(outputs.start_logits, transformers_outputs.start_logits, atol=1e-4)) |
| 146 | + self.assertTrue(torch.allclose(outputs.end_logits, transformers_outputs.end_logits, atol=1e-4)) |
| 147 | + |
| 148 | + @parameterized.expand(SUPPORTED_ARCHITECTURES) |
| 149 | + def test_pipeline(self, model_arch): |
| 150 | + model_id = MODEL_NAMES[model_arch] |
| 151 | + model = IPEXModelForQuestionAnswering.from_pretrained(model_id, export=True) |
| 152 | + tokenizer = AutoTokenizer.from_pretrained(model_id) |
| 153 | + pipe = pipeline("question-answering", model=model, tokenizer=tokenizer) |
| 154 | + question = "What's my name?" |
| 155 | + context = "My Name is Sasha and I live in Lyon." |
| 156 | + outputs = pipe(question, context) |
| 157 | + self.assertEqual(pipe.device, model.device) |
| 158 | + self.assertGreaterEqual(outputs["score"], 0.0) |
| 159 | + self.assertIsInstance(outputs["answer"], str) |
| 160 | + |
| 161 | + |
121 | 162 | class IPEXModelForCausalLMTest(unittest.TestCase):
|
122 | 163 | SUPPORTED_ARCHITECTURES = (
|
123 | 164 | "bart",
|
|
0 commit comments