Skip to content

Commit 3061342

Browse files
committed
fix quant tests
1 parent 3da5328 commit 3061342

File tree

2 files changed

+21
-7
lines changed

2 files changed

+21
-7
lines changed

optimum/intel/openvino/modeling_seq2seq.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,8 @@ def forward(
652652

653653
if "cache_position" in self.input_names:
654654
if cache_position is None:
655-
cache_position = np.arange(self._past_len, self._past_len + input_ids.shape[1])
655+
past_len = self._get_past_length(past_key_values)
656+
cache_position = np.arange(past_len, past_len + input_ids.shape[1])
656657
inputs["cache_position"] = cache_position
657658

658659
if "beam_idx" in self.input_names:

tests/openvino/test_quantization.py

+19-6
Original file line numberDiff line numberDiff line change
@@ -208,8 +208,12 @@ def test_ov_model_static_quantization_with_auto_dataset(
208208
ov_model.save_pretrained(tmp_dir)
209209

210210
if model_cls == OVModelForSpeechSeq2Seq:
211+
models = [ov_model.encoder.model, ov_model.decoder.model]
212+
213+
if ov_model.decoder_with_past is not None:
214+
models.append(ov_model.decoder_with_past.model)
211215
for model, expected_fq, expected_i8 in zip(
212-
(ov_model.encoder.model, ov_model.decoder.model, ov_model.decoder_with_past.model),
216+
models,
213217
expected_fake_quantize,
214218
expected_int8,
215219
):
@@ -629,7 +633,9 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type, trust
629633
self.assertEqual(model._openvino_config.dtype, "int8")
630634

631635
if model.export_feature.startswith("text2text-generation"):
632-
models = [model.encoder, model.decoder, model.decoder_with_past]
636+
models = [model.encoder, model.decoder]
637+
if model.decoder_with_past is not None:
638+
models.append(model.decoder_with_past)
633639
elif model.export_feature == "text-to-image":
634640
models = [model.unet, model.vae_encoder, model.vae_decoder]
635641
models.append(model.text_encoder if model_type == "stable-diffusion" else model.text_encoder_2)
@@ -772,7 +778,9 @@ def test_ovmodel_load_with_uncompressed_weights(self, model_cls, model_type, tru
772778
MODEL_NAMES[model_type], export=True, load_in_8bit=False, trust_remote_code=trust_remote_code
773779
)
774780
if model.export_feature.startswith("text2text-generation"):
775-
models = [model.encoder, model.decoder, model.decoder_with_past]
781+
models = [model.encoder, model.decoder]
782+
if model.decoder_with_past is not None:
783+
models.append(model.decoder_with_past)
776784
elif model.export_feature == "text-to-image":
777785
models = [model.unet, model.vae_encoder, model.vae_decoder]
778786
models.append(model.text_encoder if model_type == "stable-diffusion" else model.text_encoder_2)
@@ -1205,9 +1213,14 @@ def test_calibration_data_uniqueness(self, model_name, apply_caching):
12051213
processor = AutoProcessor.from_pretrained(model_id)
12061214

12071215
calibration_data = []
1208-
ov_model.decoder_with_past.request = InferRequestWrapper(
1209-
ov_model.decoder_with_past.request, calibration_data, apply_caching=apply_caching
1210-
)
1216+
if not ov_model.stateful:
1217+
ov_model.decoder_with_past.request = InferRequestWrapper(
1218+
ov_model.decoder_with_past.request, calibration_data, apply_caching=apply_caching
1219+
)
1220+
else:
1221+
ov_model.decoder.request = InferRequestWrapper(
1222+
ov_model.decoder.request, calibration_data, apply_caching=apply_caching
1223+
)
12111224
for _ in range(2):
12121225
input_features = self._generate_random_audio_data(processor)
12131226
ov_model.generate(input_features, max_new_tokens=10, min_new_tokens=10)

0 commit comments

Comments
 (0)