Skip to content

Commit 5abb066

Browse files
committedJan 8, 2025·
add test
1 parent bb1c68a commit 5abb066

File tree

2 files changed

+75
-4
lines changed

2 files changed

+75
-4
lines changed
 

‎optimum/intel/openvino/modeling_diffusion.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -162,10 +162,11 @@ def __init__(
162162
"Please provide `compile=True` if you want to use `compile_only=True` or set `compile_only=False`"
163163
)
164164

165-
if not isinstance(unet, openvino.runtime.CompiledModel):
165+
main_model = unet if unet is not None else transformer
166+
if not isinstance(main_model, openvino.runtime.CompiledModel):
166167
raise ValueError("`compile_only` expect that already compiled model will be provided")
167168

168-
model_is_dynamic = model_has_dynamic_inputs(unet)
169+
model_is_dynamic = model_has_dynamic_inputs(main_model)
169170
if dynamic_shapes ^ model_is_dynamic:
170171
requested_shapes = "dynamic" if dynamic_shapes else "static"
171172
compiled_shapes = "dynamic" if model_is_dynamic else "static"

‎tests/openvino/test_modeling.py

+72-2
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,8 @@
6767

6868
from optimum.exporters.openvino.model_patcher import patch_update_causal_mask
6969
from optimum.intel import (
70+
OVDiffusionPipeline,
71+
OVFluxPipeline,
7072
OVModelForAudioClassification,
7173
OVModelForAudioFrameClassification,
7274
OVModelForAudioXVector,
@@ -107,7 +109,9 @@
107109
from optimum.intel.utils.import_utils import is_openvino_version, is_transformers_version
108110
from optimum.intel.utils.modeling_utils import _find_files_matching_pattern
109111
from optimum.utils import (
112+
DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER,
110113
DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
114+
DIFFUSION_MODEL_TRANSFORMER_SUBFOLDER,
111115
DIFFUSION_MODEL_UNET_SUBFOLDER,
112116
DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
113117
DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
@@ -140,7 +144,8 @@ def __init__(self, *args, **kwargs):
140144
self.OV_MODEL_ID = "echarlaix/distilbert-base-uncased-finetuned-sst-2-english-openvino"
141145
self.OV_DECODER_MODEL_ID = "helenai/gpt2-ov"
142146
self.OV_SEQ2SEQ_MODEL_ID = "echarlaix/t5-small-openvino"
143-
self.OV_DIFFUSION_MODEL_ID = "hf-internal-testing/tiny-stable-diffusion-openvino"
147+
self.OV_SD_DIFFUSION_MODEL_ID = "hf-internal-testing/tiny-stable-diffusion-openvino"
148+
self.OV_FLUX_DIFFUSION_MODEL_ID = "katuni4ka/tiny-random-flux-ov"
144149
self.OV_VLM_MODEL_ID = "katuni4ka/tiny-random-llava-ov"
145150

146151
def test_load_from_hub_and_save_model(self):
@@ -337,7 +342,7 @@ def test_load_from_hub_and_save_seq2seq_model(self):
337342

338343
@require_diffusers
339344
def test_load_from_hub_and_save_stable_diffusion_model(self):
340-
loaded_pipeline = OVStableDiffusionPipeline.from_pretrained(self.OV_DIFFUSION_MODEL_ID, compile=False)
345+
loaded_pipeline = OVStableDiffusionPipeline.from_pretrained(self.OV_SD_DIFFUSION_MODEL_ID, compile=False)
341346
self.assertIsInstance(loaded_pipeline.config, Dict)
342347
# Test that PERFORMANCE_HINT is set to LATENCY by default
343348
self.assertEqual(loaded_pipeline.ov_config.get("PERFORMANCE_HINT"), "LATENCY")
@@ -391,6 +396,71 @@ def test_load_from_hub_and_save_stable_diffusion_model(self):
391396
del pipeline
392397
gc.collect()
393398

399+
@require_diffusers
400+
@unittest.skipIf(
401+
is_transformers_version("<", "4.45"),
402+
"model tokenizer exported with tokenizers 0.20 is not compatible with old transformers",
403+
)
404+
def test_load_from_hub_and_save_flux_model(self):
405+
loaded_pipeline = OVDiffusionPipeline.from_pretrained(self.OV_FLUX_DIFFUSION_MODEL_ID, compile=False)
406+
self.assertIsInstance(loaded_pipeline, OVFluxPipeline)
407+
self.assertIsInstance(loaded_pipeline.config, Dict)
408+
# Test that PERFORMANCE_HINT is set to LATENCY by default
409+
self.assertEqual(loaded_pipeline.ov_config.get("PERFORMANCE_HINT"), "LATENCY")
410+
loaded_pipeline.compile()
411+
self.assertEqual(loaded_pipeline.unet.request.get_property("PERFORMANCE_HINT"), "LATENCY")
412+
batch_size, height, width = 2, 16, 16
413+
inputs = {
414+
"prompt": ["sailing ship in storm by Leonardo da Vinci"] * batch_size,
415+
"height": height,
416+
"width": width,
417+
"num_inference_steps": 2,
418+
"output_type": "np",
419+
}
420+
421+
np.random.seed(0)
422+
torch.manual_seed(0)
423+
pipeline_outputs = loaded_pipeline(**inputs).images
424+
self.assertEqual(pipeline_outputs.shape, (batch_size, height, width, 3))
425+
426+
with TemporaryDirectory() as tmpdirname:
427+
loaded_pipeline.save_pretrained(tmpdirname)
428+
pipeline = OVDiffusionPipeline.from_pretrained(tmpdirname)
429+
self.assertIsInstance(loaded_pipeline, OVFluxPipeline)
430+
folder_contents = os.listdir(tmpdirname)
431+
self.assertIn(loaded_pipeline.config_name, folder_contents)
432+
for subfoler in {
433+
DIFFUSION_MODEL_TRANSFORMER_SUBFOLDER,
434+
DIFFUSION_MODEL_TEXT_ENCODER_SUBFOLDER,
435+
DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER,
436+
DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER,
437+
DIFFUSION_MODEL_VAE_DECODER_SUBFOLDER,
438+
}:
439+
folder_contents = os.listdir(os.path.join(tmpdirname, subfoler))
440+
self.assertIn(OV_XML_FILE_NAME, folder_contents)
441+
self.assertIn(OV_XML_FILE_NAME.replace(".xml", ".bin"), folder_contents)
442+
443+
compile_only_pipeline = OVDiffusionPipeline.from_pretrained(tmpdirname, compile_only=True)
444+
self.assertIsInstance(compile_only_pipeline, OVFluxPipeline)
445+
self.assertIsInstance(compile_only_pipeline.transformer.model, ov.runtime.CompiledModel)
446+
self.assertIsInstance(compile_only_pipeline.text_encoder.model, ov.runtime.CompiledModel)
447+
self.assertIsInstance(compile_only_pipeline.text_encoder_2.model, ov.runtime.CompiledModel)
448+
self.assertIsInstance(compile_only_pipeline.vae_encoder.model, ov.runtime.CompiledModel)
449+
self.assertIsInstance(compile_only_pipeline.vae_decoder.model, ov.runtime.CompiledModel)
450+
451+
np.random.seed(0)
452+
torch.manual_seed(0)
453+
outputs = compile_only_pipeline(**inputs).images
454+
np.testing.assert_allclose(pipeline_outputs, outputs, atol=1e-4, rtol=1e-4)
455+
del compile_only_pipeline
456+
457+
np.random.seed(0)
458+
torch.manual_seed(0)
459+
outputs = pipeline(**inputs).images
460+
np.testing.assert_allclose(pipeline_outputs, outputs, atol=1e-4, rtol=1e-4)
461+
del pipeline
462+
gc.collect()
463+
394464
@pytest.mark.run_slow
395465
@slow
396466
def test_load_model_from_hub_private_with_token(self):

0 commit comments

Comments
 (0)