@@ -341,7 +341,7 @@ def _quantize_ovbasemodel(
341
341
calibration_dataset = self ._prepare_unet_dataset (
342
342
quantization_config .num_samples , dataset = calibration_dataset
343
343
)
344
- elif Dataset is not None and isinstance (calibration_dataset , Dataset ):
344
+ elif is_datasets_available () and isinstance (calibration_dataset , Dataset ):
345
345
calibration_dataloader = self ._get_calibration_dataloader (
346
346
calibration_dataset = calibration_dataset ,
347
347
batch_size = batch_size ,
@@ -374,7 +374,7 @@ def _quantize_ovbasemodel(
374
374
from optimum .intel import OVModelForCausalLM
375
375
376
376
if isinstance (self .model , OVModelForCausalLM ):
377
- calibration_dataset = self ._prepare_gptq_dataset (quantization_config )
377
+ calibration_dataset = self ._prepare_builtin_dataset (quantization_config )
378
378
elif isinstance (self .model , OVStableDiffusionPipelineBase ):
379
379
calibration_dataset = self ._prepare_unet_dataset (
380
380
quantization_config .num_samples , dataset_name = quantization_config .dataset
@@ -392,6 +392,7 @@ def _quantize_ovbasemodel(
392
392
self .model .unet .model , quantization_config , calibration_dataset
393
393
)
394
394
else :
395
+ # This may be for example OVModelForImageClassification, OVModelForAudioClassification, etc.
395
396
self .model .model = _hybrid_quantization (self .model .model , quantization_config , calibration_dataset )
396
397
else :
397
398
_weight_only_quantization (self .model .model , quantization_config , calibration_dataset )
@@ -672,7 +673,7 @@ def _remove_unused_columns(self, dataset: "Dataset"):
672
673
ignored_columns = list (set (dataset .column_names ) - set (self ._signature_columns ))
673
674
return dataset .remove_columns (ignored_columns )
674
675
675
- def _prepare_gptq_dataset (self , quantization_config : OVWeightQuantizationConfig ):
676
+ def _prepare_builtin_dataset (self , quantization_config : OVWeightQuantizationConfig ):
676
677
from optimum .gptq .data import get_dataset , prepare_dataset
677
678
678
679
tokenizer = AutoTokenizer .from_pretrained (quantization_config .tokenizer )
@@ -721,7 +722,7 @@ def _prepare_unet_dataset(
721
722
if dataset is not None :
722
723
if isinstance (dataset , nncf .Dataset ):
723
724
return dataset
724
- if Dataset is not None and isinstance (dataset , Dataset ):
725
+ if is_datasets_available () and isinstance (dataset , Dataset ):
725
726
dataset = dataset .select_columns (["caption" ])
726
727
727
728
def transform_fn (data_item ):
@@ -783,7 +784,7 @@ def _weight_only_quantization(
783
784
784
785
dataset = None
785
786
if calibration_dataset is not None :
786
- if Dataset is not None and isinstance (calibration_dataset , Dataset ):
787
+ if is_datasets_available () and isinstance (calibration_dataset , Dataset ):
787
788
raise ValueError (
788
789
"Providing calibration dataset as an instance of `datasets.Dataset` for OV weight-only "
789
790
"quantization is not supported. Please provide it as `nncf.Dataset` or as iterable of "
0 commit comments