Skip to content

Commit f8d3f74

Browse files
committed
fix tests
1 parent 8c60c7a commit f8d3f74

File tree

1 file changed

+4
-3
lines changed

1 file changed

+4
-3
lines changed

tests/ipex/test_modeling.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -284,19 +284,20 @@ def test_ipex_patching_beam_search(self, test_name, model_arch, use_cache):
284284
texts = ["This is a sample", ["This is the first input", "This is the second input"]]
285285
generation_configs = (
286286
GenerationConfig(max_new_tokens=4, num_beams=2, do_sample=True),
287-
GenerationConfig(max_new_tokens=4, num_beams=4, do_sample=True),
288-
GenerationConfig(max_new_tokens=4, num_beams=8, do_sample=True),
287+
GenerationConfig(max_new_tokens=4, num_beams=4, do_sample=True),
288+
GenerationConfig(max_new_tokens=4, num_beams=8, do_sample=True),
289289
GenerationConfig(max_new_tokens=4, num_beams=32, do_sample=True),
290290
GenerationConfig(max_new_tokens=4, do_sample=not use_cache, top_p=1.0, top_k=5, penalty_alpha=0.6),
291291
GenerationConfig(max_new_tokens=4, do_sample=True, top_p=0.9, top_k=0),
292-
)
292+
)
293293
for text in texts:
294294
tokens = tokenizer(text, padding=True, return_tensors="pt")
295295
for generation_config in generation_configs:
296296
outputs = model.generate(**tokens, generation_config=generation_config)
297297
transformers_outputs = trasnformers_model.generate(**tokens, generation_config=generation_config)
298298
self.assertIsInstance(outputs, torch.Tensor)
299299
self.assertEqual(outputs, transformers_outputs)
300+
300301
def test_compare_with_and_without_past_key_values(self):
301302
model_id = "echarlaix/tiny-random-gpt2-torchscript"
302303
tokenizer = AutoTokenizer.from_pretrained(model_id)

0 commit comments

Comments
 (0)