Skip to content

Commit 973f155

Browse files
committed
fix code style
1 parent c26a450 commit 973f155

File tree

4 files changed

+26
-20
lines changed

4 files changed

+26
-20
lines changed

optimum/exporters/openvino/model_configs.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,9 @@
8888
PersimmonModelPatcher,
8989
Phi3ModelPatcher,
9090
Phi3VisionImageEmbeddingsPatcher,
91-
QwenModelPatcher,
9291
Qwen2VLLanguageModelPatcher,
9392
Qwen2VLVisionEmbMergerPatcher,
93+
QwenModelPatcher,
9494
RotaryEmbPatcher,
9595
UpdateCausalMaskModelPatcher,
9696
XverseModelPatcher,
@@ -112,9 +112,9 @@ def init_model_configs():
112112
"transformers",
113113
"Qwen2VLForConditionalGeneration",
114114
)
115-
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["image-text-to-text"] = (
116-
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]
117-
)
115+
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS[
116+
"image-text-to-text"
117+
] = TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]
118118

119119
supported_model_types = [
120120
"_SUPPORTED_MODEL_TYPE",

optimum/exporters/openvino/model_patcher.py

+11-12
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,14 @@
1818
import math
1919
import types
2020
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
21-
from optimum.exporters.onnx.base import OnnxConfig
2221

2322
import torch
2423
import torch.nn.functional as F
2524
from transformers import PreTrainedModel, TFPreTrainedModel
2625
from transformers.modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling
2726
from transformers.utils import is_tf_available
2827

28+
from optimum.exporters.onnx.base import OnnxConfig
2929
from optimum.exporters.onnx.model_patcher import DecoderModelPatcher, ModelPatcher, override_arguments
3030
from optimum.intel.utils.import_utils import (
3131
_openvino_version,
@@ -423,9 +423,9 @@ def _llama_gemma_update_causal_mask_legacy(self, attention_mask, input_tensor, c
423423
offset = 0
424424
mask_shape = attention_mask.shape
425425
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
426-
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
427-
mask_slice
428-
)
426+
causal_mask[
427+
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
428+
] = mask_slice
429429

430430
if (
431431
self.config._attn_implementation == "sdpa"
@@ -2060,9 +2060,9 @@ def _dbrx_update_causal_mask_legacy(
20602060
offset = 0
20612061
mask_shape = attention_mask.shape
20622062
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
2063-
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
2064-
mask_slice
2065-
)
2063+
causal_mask[
2064+
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
2065+
] = mask_slice
20662066

20672067
if (
20682068
self.config._attn_implementation == "sdpa"
@@ -3386,10 +3386,9 @@ class Qwen2VLLanguageModelPatcher(DecoderModelPatcher):
33863386
def __init__(
33873387
self,
33883388
config: OnnxConfig,
3389-
model: PreTrainedModel | TFPreTrainedModel,
3390-
model_kwargs: Dict[str, Any] | None = None,
3389+
model: Union[PreTrainedModel, TFPreTrainedModel],
3390+
model_kwargs: Dict[str, Any] = None,
33913391
):
3392-
33933392
model.__orig_forward = model.forward
33943393

33953394
def forward_wrap(
@@ -3426,8 +3425,8 @@ class Qwen2VLVisionEmbMergerPatcher(ModelPatcher):
34263425
def __init__(
34273426
self,
34283427
config: OnnxConfig,
3429-
model: PreTrainedModel | TFPreTrainedModel,
3430-
model_kwargs: Dict[str, Any] | None = None,
3428+
model: Union[PreTrainedModel, TFPreTrainedModel],
3429+
model_kwargs: Dict[str, Any] = None,
34313430
):
34323431
model.__orig_forward = model.forward
34333432

optimum/exporters/openvino/utils.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,15 @@ def get_submodels(model):
216216
return custom_export, fn_get_submodels
217217

218218

219-
MULTI_MODAL_TEXT_GENERATION_MODELS = ["llava", "llava-next", "llava-qwen2", "internvl-chat", "minicpmv", "phi3-v", "qwen2-vl"]
219+
MULTI_MODAL_TEXT_GENERATION_MODELS = [
220+
"llava",
221+
"llava-next",
222+
"llava-qwen2",
223+
"internvl-chat",
224+
"minicpmv",
225+
"phi3-v",
226+
"qwen2-vl",
227+
]
220228

221229

222230
def save_config(config, save_dir):

optimum/intel/openvino/modeling_visual_language.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import copy
2-
from dataclasses import dataclass
32
import logging
43
import os
54
import warnings
65
from abc import abstractmethod
6+
from dataclasses import dataclass
77
from pathlib import Path
8-
from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Any
8+
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
99

1010
import numpy as np
1111
import openvino as ov
@@ -2354,7 +2354,6 @@ def get_multimodal_embeddings(
23542354
video_grid_thw=None,
23552355
**kwargs,
23562356
):
2357-
23582357
inputs_embeds = torch.from_numpy(self.get_text_embeddings(input_ids))
23592358
if pixel_values is not None and input_ids.shape[1] != 1:
23602359
image_embeds = torch.from_numpy(self.get_vision_embeddings(pixel_values, image_grid_thw))

0 commit comments

Comments
 (0)