Skip to content

Commit bb5ee65

Browse files
fix tests
1 parent c5858ff commit bb5ee65

File tree

2 files changed

+16
-10
lines changed

2 files changed

+16
-10
lines changed

optimum/intel/openvino/modeling_visual_language.py

+11-6
Original file line numberDiff line numberDiff line change
@@ -1021,12 +1021,16 @@ def preprocess_inputs(
10211021
else:
10221022
prompt = text
10231023

1024-
if (
1025-
getattr(processor, "patch_size", None) is None
1026-
and getattr(config, "vision_config", None) is not None
1027-
and getattr(config.vision_config, "patch_size", None) is not None
1028-
):
1029-
processor.patch_size = config.vision_config.patch_size
1024+
if getattr(processor, "patch_size", None) is None:
1025+
if (
1026+
getattr(config, "vision_config", None) is not None
1027+
and getattr(config.vision_config, "patch_size", None) is not None
1028+
):
1029+
processor.patch_size = config.vision_config.patch_size
1030+
else:
1031+
raise ValueError(
1032+
"Processor does not have `patch_size` attribute. Please fix the processor or provide `patch_size` in the config."
1033+
)
10301034

10311035
inputs = processor(images=image, text=prompt, return_tensors="pt")
10321036
return inputs
@@ -1923,6 +1927,7 @@ def preprocess_inputs(
19231927
input_ids = tokenizer(text, return_tensors="pt").input_ids
19241928
attention_mask = torch.ones_like(input_ids, dtype=torch.int64)
19251929
result = {"input_ids": input_ids, "attention_mask": attention_mask}
1930+
19261931
if image is not None:
19271932
result["images"] = processor(images=[image], return_tensors="pt")["pixel_values"]
19281933
return result

tests/openvino/test_modeling.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -2311,17 +2311,17 @@ def test_generate_utils(self, model_arch):
23112311

23122312
def get_preprocessors(self, model_arch):
23132313
model_id = MODEL_NAMES[model_arch]
2314+
config = AutoConfig.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS)
2315+
23142316
if model_arch == "nanollava":
2315-
config = AutoConfig.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS)
23162317
processor = AutoProcessor.from_pretrained(
23172318
config.mm_vision_tower, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS
23182319
)
23192320
tokenizer = AutoTokenizer.from_pretrained(
23202321
model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS
23212322
)
2322-
preprocessors = {"processor": processor, "tokenizer": tokenizer}
2323+
preprocessors = {"processor": processor, "tokenizer": tokenizer, "config": config}
23232324
elif model_arch == "internvl2":
2324-
config = AutoConfig.from_pretrained(model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS)
23252325
tokenizer = AutoTokenizer.from_pretrained(
23262326
model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS
23272327
)
@@ -2330,7 +2330,8 @@ def get_preprocessors(self, model_arch):
23302330
processor = AutoProcessor.from_pretrained(
23312331
model_id, trust_remote_code=model_arch in self.REMOTE_CODE_MODELS
23322332
)
2333-
preprocessors = {"processor": processor, "tokenizer": None}
2333+
preprocessors = {"processor": processor, "tokenizer": None, "config": config}
2334+
23342335
return preprocessors
23352336

23362337
@parameterized.expand(SUPPORTED_ARCHITECTURES)

0 commit comments

Comments
 (0)