Skip to content

Commit 892a4f7

Browse files
committed
fix test
1 parent 7cbb6d3 commit 892a4f7

File tree

2 files changed

+23
-50
lines changed

2 files changed

+23
-50
lines changed

src/cpp/src/visual_language/inputs_embedder.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ namespace ov::genai {
2121

2222
// Base InputsEmbedder class
2323

24-
2524
std::pair<ov::Tensor, std::optional<int64_t>> InputsEmbedder::IInputsEmbedder::get_position_ids(const size_t inputs_embeds_size, const size_t history_size) {
2625
ov::Tensor position_ids = ov::Tensor{ov::element::i64, { 1, inputs_embeds_size }};
2726
std::iota(position_ids.data<int64_t>(), position_ids.data<int64_t>() + position_ids.get_size(), history_size);

tests/python_tests/test_vlm_pipeline.py

+23-49
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,10 @@ def test_vlm_with_scheduler_vs_default(config, cache):
222222
@pytest.mark.nightly
223223
@pytest.mark.parametrize("model_id", model_ids)
224224
@pytest.mark.parametrize("system_message", ["", "You are a helpful assistant."])
225-
def test_vlm_pipeline_chat(model_id, system_message, cache):
225+
@pytest.mark.parametrize("iteration_images", [[image_links_for_testing[0], image_links_for_testing[0]], [image_links_for_testing[0], image_links_for_testing[2], image_links_for_testing[0]],
226+
[image_links_for_testing[1], image_links_for_testing[1]], [image_links_for_testing[1], image_links_for_testing[1], image_links_for_testing[1]],
227+
[image_links_for_testing[2], image_links_for_testing[1]], [image_links_for_testing[2], image_links_for_testing[0], image_links_for_testing[1]]])
228+
def test_vlm_pipeline_chat(model_id, system_message, iteration_images, cache):
226229
def streamer(word: str) -> bool:
227230
nonlocal result_from_streamer
228231
result_from_streamer.append(word)
@@ -234,23 +237,26 @@ def streamer(word: str) -> bool:
234237
generation_config.max_new_tokens = 30
235238
generation_config.set_eos_token_id(ov_pipe.get_tokenizer().get_eos_token_id())
236239

237-
for links in image_links_for_testing:
240+
ov_pipe.start_chat(system_message)
241+
242+
images = []
243+
for link in iteration_images[0]:
244+
images.append(get_image_by_link(link))
245+
246+
result_from_streamer = []
247+
res = ov_pipe.generate(prompts[0], images=images, generation_config=generation_config, streamer=streamer)
248+
assert res.texts[0] == ''.join(result_from_streamer)
249+
250+
for image_set in iteration_images[1:]:
238251
images = []
239-
for link in links:
252+
for link in image_set:
240253
images.append(get_image_by_link(link))
241254

242-
ov_pipe.start_chat(system_message)
243-
244255
result_from_streamer = []
245-
res = ov_pipe.generate(prompts[0], images=images, generation_config=generation_config, streamer=streamer)
256+
res = ov_pipe.generate(prompts[1], images=images, generation_config=generation_config, streamer=streamer)
246257
assert res.texts[0] == ''.join(result_from_streamer)
247258

248-
for prompt in prompts[1:]:
249-
result_from_streamer = []
250-
res = ov_pipe.generate(prompt, generation_config=generation_config, streamer=streamer)
251-
assert res.texts[0] == ''.join(result_from_streamer)
252-
253-
ov_pipe.finish_chat()
259+
ov_pipe.finish_chat()
254260

255261

256262
@pytest.mark.precommit
@@ -354,6 +360,7 @@ def streamer(subword):
354360
generation_config = ov_pipe.get_generation_config()
355361
generation_config.max_new_tokens = 30
356362
generation_config.set_eos_token_id(ov_pipe.get_tokenizer().get_eos_token_id())
363+
generation_config.ignore_eos = True
357364

358365
images = []
359366
for link in iteration_images:
@@ -362,21 +369,18 @@ def streamer(subword):
362369
results_with_cancel = ""
363370
ov_pipe.start_chat()
364371
results_with_cancel += ov_pipe.generate(callback_questions[0], images=images, generation_config=generation_config).texts[0]
365-
366-
generation_config.ignore_eos = True
372+
# doesn't add to results_with_cancel as it should be complitely removed from the history
367373
ov_pipe.generate(callback_questions[1], images=images, generation_config=generation_config, streamer=streamer)
368374
results_with_cancel += ov_pipe.generate(callback_questions[2], images=images, generation_config=generation_config).texts[0]
369375
ov_pipe.finish_chat()
370-
376+
371377
results = ""
372378
ov_pipe.start_chat()
373379
results += ov_pipe.generate(callback_questions[0], images=images, generation_config=generation_config).texts[0]
374-
375-
generation_config.ignore_eos = True
376380
results += ov_pipe.generate(callback_questions[2], images=images, generation_config=generation_config).texts[0]
377381
ov_pipe.finish_chat()
378382

379-
assert(results_with_cancel == results)
383+
assert results_with_cancel == results
380384

381385

382386
@pytest.mark.precommit
@@ -413,34 +417,4 @@ def streamer(subword):
413417
res_second = ov_pipe.generate(callback_questions[0], images=images, generation_config=generation_config, streamer=streamer).texts[0]
414418
ov_pipe.finish_chat()
415419

416-
assert(res_first == res_second)
417-
418-
419-
@pytest.mark.precommit
420-
@pytest.mark.nightly
421-
@pytest.mark.parametrize("model_id", model_ids)
422-
@pytest.mark.parametrize("iteration_images", [[[], image_links_for_testing[1]], [image_links_for_testing[1], image_links_for_testing[1]], [[], image_links_for_testing[1], []]])
423-
def test_vlm_pipeline_chat_image_combination(model_id, iteration_images, cache):
424-
def streamer(word: str) -> bool:
425-
nonlocal result_from_streamer
426-
result_from_streamer.append(word)
427-
return False
428-
429-
models_path = get_ov_model(model_id, cache)
430-
ov_pipe = VLMPipeline(models_path, "CPU")
431-
generation_config = ov_pipe.get_generation_config()
432-
generation_config.max_new_tokens = 30
433-
generation_config.set_eos_token_id(ov_pipe.get_tokenizer().get_eos_token_id())
434-
435-
for images_links in iteration_images:
436-
ov_pipe.start_chat()
437-
438-
images = []
439-
for link in images_links:
440-
images.append(get_image_by_link(link))
441-
442-
result_from_streamer = []
443-
res = ov_pipe.generate(prompts[0], images=images, generation_config=generation_config, streamer=streamer)
444-
assert res.texts[0] == ''.join(result_from_streamer)
445-
446-
ov_pipe.finish_chat()
420+
assert res_first == res_second

0 commit comments

Comments
 (0)