|
46 | 46 | WhisperOnnxConfig,
|
47 | 47 | )
|
48 | 48 | from optimum.exporters.onnx.model_patcher import ModelPatcher
|
49 |
| -from optimum.exporters.tasks import TasksManager |
| 49 | +from optimum.exporters.tasks import TasksManager, get_transformers_tasks_to_model_mapping |
50 | 50 | from optimum.utils import DEFAULT_DUMMY_SHAPES
|
51 | 51 | from optimum.utils.input_generators import (
|
52 | 52 | DTYPE_MAPPER,
|
@@ -132,16 +132,14 @@ def init_model_configs():
|
132 | 132 | "transformers",
|
133 | 133 | "Qwen2VLForConditionalGeneration",
|
134 | 134 | )
|
135 |
| - |
136 |
| - TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["image-text-to-text"] = ( |
137 |
| - ( |
138 |
| - "AutoModelForImageTextToText", |
139 |
| - "AutoModelForCausalLM", |
140 |
| - ) |
141 |
| - if is_transformers_version(">=", "4.46") |
142 |
| - else TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"] |
| 135 | + TasksManager._CUSTOM_CLASSES[("pt", "qwen2-vl", "image-text-to-text")] = ( |
| 136 | + "transformers", |
| 137 | + "AutoModelForImageTextToText", |
143 | 138 | )
|
144 | 139 |
|
| 140 | + TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS[ |
| 141 | + "image-text-to-text" |
| 142 | + ] = TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"] |
145 | 143 | if is_diffusers_available() and "fill" not in TasksManager._DIFFUSERS_TASKS_TO_MODEL_LOADERS:
|
146 | 144 | TasksManager._DIFFUSERS_TASKS_TO_MODEL_LOADERS["fill"] = "FluxFillPipeline"
|
147 | 145 | TasksManager._DIFFUSERS_TASKS_TO_MODEL_MAPPINGS["fill"] = {"flux": "FluxFillPipeline"}
|
@@ -2573,7 +2571,7 @@ def generate(self, input_name: str, framework: str = "pt", int_dtype: str = "int
|
2573 | 2571 | return self.random_float_tensor([grid_h * grid_t * grid_w, dim], framework=framework, dtype=float_dtype)
|
2574 | 2572 |
|
2575 | 2573 | if input_name == "window_index":
|
2576 |
| - if self.spatial_merge_size: |
| 2574 | + if self.spatial_merge_size is None: |
2577 | 2575 | raise ValueError(
|
2578 | 2576 | "`spatial_merge_size` parameter is not found in model config. Can not generate dummy input data for `window_index` input"
|
2579 | 2577 | )
|
|
0 commit comments