26
26
from tqdm import tqdm
27
27
from transformers import AutoProcessor , AutoTokenizer , DataCollator , default_data_collator
28
28
29
- from optimum .intel .openvino .modeling_decoder import OVBaseDecoderModel
30
29
from optimum .intel .openvino .utils import (
31
30
PREDEFINED_DIFFUSION_DATASETS ,
32
31
PREDEFINED_SPEECH_TO_TEXT_DATASETS ,
@@ -171,6 +170,7 @@ def build_from_dataset(
171
170
# TODO: deprecate remove_unused_columns ?
172
171
173
172
from optimum .intel import OVModelForVisualCausalLM
173
+ from optimum .intel .openvino .modeling_decoder import OVBaseDecoderModel
174
174
from optimum .intel .openvino .modeling_seq2seq import _OVModelForWhisper
175
175
176
176
if is_diffusers_available ():
@@ -184,7 +184,7 @@ def build_from_dataset(
184
184
return self ._prepare_visual_causal_lm_calibration_data (quantization_config , dataloader )
185
185
elif isinstance (self .model , _OVModelForWhisper ):
186
186
return self ._prepare_speech_to_text_calibration_data (quantization_config , dataloader )
187
- elif isinstance (self .model , OVDiffusionPipeline ):
187
+ elif is_diffusers_available () and isinstance (self .model , OVDiffusionPipeline ):
188
188
return self ._prepare_diffusion_calibration_data (quantization_config , dataloader )
189
189
else :
190
190
# Torch model quantization scenario
@@ -337,7 +337,7 @@ def preprocess_function(item):
337
337
)
338
338
else :
339
339
raise Exception
340
- elif isinstance (self .model , OVDiffusionPipeline ):
340
+ elif is_diffusers_available () and isinstance (self .model , OVDiffusionPipeline ):
341
341
dataset = config .dataset
342
342
343
343
dataset_metadata = None
0 commit comments