Skip to content

Commit 107d7ef

Browse files
committed
add tests
1 parent 3cd5402 commit 107d7ef

File tree

4 files changed

+11
-7
lines changed

4 files changed

+11
-7
lines changed

optimum/exporters/openvino/model_configs.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -131,10 +131,11 @@ def init_model_configs():
131131
"transformers",
132132
"Qwen2VLForConditionalGeneration",
133133
)
134-
TasksManager._CUSTOM_CLASSES[("pt", "qwen2-5-vl", "image-text-to-text")] = (
135-
"transformers",
136-
"Qwen2_5_VLForConditionalGeneration",
137-
)
134+
if is_transformers_version(">", "4.48.99"):
135+
TasksManager._CUSTOM_CLASSES[("pt", "qwen2-5-vl", "image-text-to-text")] = (
136+
"transformers",
137+
"Qwen2_5_VLForConditionalGeneration",
138+
)
138139
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS[
139140
"image-text-to-text"
140141
] = TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]

optimum/intel/openvino/modeling_visual_language.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -2658,8 +2658,7 @@ def get_vision_embeddings(self, pixel_values, grid_thw, **kwargs):
26582658
window_index, cu_window_seqlens = self.get_window_index(grid_thw)
26592659
cu_window_seqlens = torch.tensor(
26602660
cu_window_seqlens,
2661-
device=hidden_states.device,
2662-
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
2661+
dtype=torch.int32,
26632662
)
26642663
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
26652664
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(

tests/openvino/test_modeling.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -2115,8 +2115,11 @@ class OVModelForVisualCausalLMIntegrationTest(unittest.TestCase):
21152115
SUPPORTED_ARCHITECTURES += ["llava_next", "nanollava"]
21162116
if is_transformers_version(">=", "4.45.0"):
21172117
SUPPORTED_ARCHITECTURES += ["minicpmv", "internvl2", "phi3_v", "qwen2_vl", "maira2"]
2118+
2119+
if is_transformers_version(">=", "4.48.0"):
2120+
SUPPORTED_ARCHITECTURES += ["qwen2_5_vl"]
21182121
TASK = "image-text-to-text"
2119-
REMOTE_CODE_MODELS = ["internvl2", "minicpmv", "nanollava", "phi3_v", "maira2"]
2122+
REMOTE_CODE_MODELS = ["internvl2", "minicpmv", "nanollava", "phi3_v", "maira2", "qwen2_5_vl"]
21202123

21212124
IMAGE = Image.open(
21222125
requests.get(

tests/openvino/utils_tests.py

+1
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@
126126
"qwen2": "fxmarty/tiny-dummy-qwen2",
127127
"qwen2-moe": "katuni4ka/tiny-random-qwen1.5-moe",
128128
"qwen2_vl": "katuni4ka/tiny-random-qwen2vl",
129+
"qwen2_5_vl": "katuni4ka/tiny-random-qwen2.5-vl",
129130
"resnet": "hf-internal-testing/tiny-random-resnet",
130131
"roberta": "hf-internal-testing/tiny-random-roberta",
131132
"roformer": "hf-internal-testing/tiny-random-roformer",

0 commit comments

Comments
 (0)