Skip to content

Commit f6d48b1

Browse files
fix type hint error, vison language models tests and phi3
1 parent 4bf5ffc commit f6d48b1

File tree

3 files changed

+8
-6
lines changed

3 files changed

+8
-6
lines changed

optimum/exporters/openvino/model_patcher.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1581,19 +1581,19 @@ def __enter__(self):
15811581
):
15821582
self._model.config.max_position_embeddings = self._model.config.original_max_position_embeddings
15831583

1584-
if is_transformers_version(">=", "4.42.0"):
1584+
if is_transformers_version(">=", "4.42.0") and is_transformers_version("<", "4.48.0"):
15851585
self._model.model._orig_forward = self._model.model.forward
15861586
self._model.model.forward = types.MethodType(phi3_442_forward, self._model.model)
15871587

15881588
# https://github.com/huggingface/transformers/blob/30ee508c6c92a1c0aa0281d193c7c0fb815b8d2f/src/transformers/models/phi3/modeling_phi3.py#L113
15891589
# init inv_freq for torchscript tracing
15901590
for layer in self._model.model.layers:
1591-
if is_torch_version(">=", "2.1.0"):
1591+
if is_torch_version(">=", "2.1.0") and is_transformers_version("<", "4.48.0"):
15921592
orig_self_attn_fwd = layer.self_attn.forward
15931593
layer.self_attn.forward = types.MethodType(_phi3_self_attn_sdpa_forward, layer.self_attn)
15941594
layer.self_attn._orig_forward = orig_self_attn_fwd
15951595

1596-
if layer.self_attn.rotary_emb.inv_freq is None:
1596+
if hasattr(layer.self_attn, "rotary_emb") and layer.self_attn.rotary_emb.inv_freq is None:
15971597
rotary_emb = layer.self_attn.rotary_emb
15981598
layer.self_attn.rotary_emb.inv_freq = 1.0 / (
15991599
rotary_emb.base ** (torch.arange(0, rotary_emb.dim, 2, dtype=torch.int64).float() / rotary_emb.dim)

optimum/intel/openvino/modeling_decoder.py

-3
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@
5656

5757

5858
if TYPE_CHECKING:
59-
from transformers.generation.streamers import BaseStreamer
6059
from transformers.modeling_utils import PreTrainedModel
6160

6261

@@ -706,7 +705,6 @@ def generate(
706705
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
707706
synced_gpus: Optional[bool] = None,
708707
assistant_model: Optional["PreTrainedModel"] = None,
709-
streamer: Optional["BaseStreamer"] = None,
710708
negative_prompt_ids: Optional[torch.Tensor] = None,
711709
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
712710
**kwargs,
@@ -734,7 +732,6 @@ def generate(
734732
prefix_allowed_tokens_fn,
735733
synced_gpus,
736734
assistant_model,
737-
streamer,
738735
negative_prompt_ids,
739736
negative_prompt_attention_mask,
740737
**kwargs,

tests/openvino/test_modeling.py

+5
Original file line numberDiff line numberDiff line change
@@ -2172,6 +2172,11 @@ def test_compare_to_transformers(self, model_arch):
21722172
for component_name, component in ov_model.components.items():
21732173
self.assertIsInstance(component, MODEL_PARTS_CLS_MAPPING[component_name])
21742174
self.assertIsInstance(ov_model.config, PretrainedConfig)
2175+
2176+
# TODO: fix in models
2177+
if preprocessors.get("processor") is not None:
2178+
preprocessors["processor"].patch_size = ov_model.config.vision_config.patch_size
2179+
21752180
inputs = ov_model.preprocess_inputs(**preprocessors, text=prompt, image=self.IMAGE.resize((600, 600)))
21762181
transformers_inputs = copy.deepcopy(inputs)
21772182
test_device = "AUTO"

0 commit comments

Comments
 (0)