Skip to content

Commit 5af0206

Browse files
committed
fix code style
1 parent c26a450 commit 5af0206

File tree

4 files changed

+22
-16
lines changed

4 files changed

+22
-16
lines changed

optimum/exporters/openvino/model_configs.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -112,9 +112,9 @@ def init_model_configs():
112112
"transformers",
113113
"Qwen2VLForConditionalGeneration",
114114
)
115-
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["image-text-to-text"] = (
116-
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]
117-
)
115+
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS[
116+
"image-text-to-text"
117+
] = TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]
118118

119119
supported_model_types = [
120120
"_SUPPORTED_MODEL_TYPE",

optimum/exporters/openvino/model_patcher.py

+10-11
Original file line numberDiff line numberDiff line change
@@ -423,9 +423,9 @@ def _llama_gemma_update_causal_mask_legacy(self, attention_mask, input_tensor, c
423423
offset = 0
424424
mask_shape = attention_mask.shape
425425
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
426-
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
427-
mask_slice
428-
)
426+
causal_mask[
427+
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
428+
] = mask_slice
429429

430430
if (
431431
self.config._attn_implementation == "sdpa"
@@ -2060,9 +2060,9 @@ def _dbrx_update_causal_mask_legacy(
20602060
offset = 0
20612061
mask_shape = attention_mask.shape
20622062
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
2063-
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
2064-
mask_slice
2065-
)
2063+
causal_mask[
2064+
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
2065+
] = mask_slice
20662066

20672067
if (
20682068
self.config._attn_implementation == "sdpa"
@@ -3386,10 +3386,9 @@ class Qwen2VLLanguageModelPatcher(DecoderModelPatcher):
33863386
def __init__(
33873387
self,
33883388
config: OnnxConfig,
3389-
model: PreTrainedModel | TFPreTrainedModel,
3390-
model_kwargs: Dict[str, Any] | None = None,
3389+
model: Union[PreTrainedModel, TFPreTrainedModel],
3390+
model_kwargs: Dict[str, Any] = None,
33913391
):
3392-
33933392
model.__orig_forward = model.forward
33943393

33953394
def forward_wrap(
@@ -3426,8 +3425,8 @@ class Qwen2VLVisionEmbMergerPatcher(ModelPatcher):
34263425
def __init__(
34273426
self,
34283427
config: OnnxConfig,
3429-
model: PreTrainedModel | TFPreTrainedModel,
3430-
model_kwargs: Dict[str, Any] | None = None,
3428+
model: Union[PreTrainedModel, TFPreTrainedModel],
3429+
model_kwargs: Dict[str, Any] = None,
34313430
):
34323431
model.__orig_forward = model.forward
34333432

optimum/exporters/openvino/utils.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,15 @@ def get_submodels(model):
216216
return custom_export, fn_get_submodels
217217

218218

219-
MULTI_MODAL_TEXT_GENERATION_MODELS = ["llava", "llava-next", "llava-qwen2", "internvl-chat", "minicpmv", "phi3-v", "qwen2-vl"]
219+
MULTI_MODAL_TEXT_GENERATION_MODELS = [
220+
"llava",
221+
"llava-next",
222+
"llava-qwen2",
223+
"internvl-chat",
224+
"minicpmv",
225+
"phi3-v",
226+
"qwen2-vl",
227+
]
220228

221229

222230
def save_config(config, save_dir):

optimum/intel/openvino/modeling_visual_language.py

-1
Original file line numberDiff line numberDiff line change
@@ -2354,7 +2354,6 @@ def get_multimodal_embeddings(
23542354
video_grid_thw=None,
23552355
**kwargs,
23562356
):
2357-
23582357
inputs_embeds = torch.from_numpy(self.get_text_embeddings(input_ids))
23592358
if pixel_values is not None and input_ids.shape[1] != 1:
23602359
image_embeds = torch.from_numpy(self.get_vision_embeddings(pixel_values, image_grid_thw))

0 commit comments

Comments
 (0)