Skip to content

Commit de9b5c1

Browse files
linters
1 parent ca30de1 commit de9b5c1

File tree

3 files changed

+37
-33
lines changed

3 files changed

+37
-33
lines changed

optimum/intel/openvino/modeling_diffusion.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
import copy
14+
1515
import importlib
1616
import logging
1717
import os
@@ -57,14 +57,13 @@
5757
)
5858

5959
from ...exporters.openvino import main_export
60-
from .configuration import OVConfig, OVWeightQuantizationConfig, OVQuantizationMethod
60+
from .configuration import OVConfig, OVQuantizationMethod, OVWeightQuantizationConfig
6161
from .loaders import OVTextualInversionLoaderMixin
6262
from .modeling_base import OVBaseModel
6363
from .utils import (
6464
ONNX_WEIGHTS_NAME,
6565
OV_TO_NP_TYPE,
6666
OV_XML_FILE_NAME,
67-
PREDEFINED_SD_DATASETS,
6867
_print_compiled_model_properties,
6968
)
7069

@@ -300,7 +299,7 @@ def _from_pretrained(
300299
# load the UNet model uncompressed to apply hybrid quantization further
301300
unet = cls.load_model(unet_path)
302301
# Apply weights compression to other `components` without dataset
303-
quantization_config_without_dataset = copy.deepcopy(quantization_config)
302+
quantization_config_without_dataset = deepcopy(quantization_config)
304303
quantization_config_without_dataset.dataset = None
305304
else:
306305
quantization_config_without_dataset = quantization_config
@@ -333,7 +332,7 @@ def _from_pretrained(
333332
from optimum.intel import OVQuantizer
334333

335334
quantizer = OVQuantizer(sd_model)
336-
quantization_config_copy = copy.deepcopy(quantization_config)
335+
quantization_config_copy = deepcopy(quantization_config)
337336
quantization_config_copy.quant_method = OVQuantizationMethod.HYBRID
338337
quantizer.quantize(ov_config=OVConfig(quantization_config=quantization_config_copy))
339338

optimum/intel/openvino/quantization.py

+29-22
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
from ..utils.constant import _TASK_ALIASES
5151
from ..utils.import_utils import DATASETS_IMPORT_ERROR, is_datasets_available
5252
from ..utils.modeling_utils import get_model_device
53-
from .configuration import OVConfig, OVQuantizationConfig, OVWeightQuantizationConfig, OVQuantizationMethod
53+
from .configuration import OVConfig, OVQuantizationConfig, OVQuantizationMethod, OVWeightQuantizationConfig
5454
from .modeling_base import OVBaseModel
5555
from .utils import (
5656
MAX_ONNX_OPSET,
@@ -339,8 +339,8 @@ def _quantize_ovbasemodel(
339339

340340
if isinstance(self.model, OVStableDiffusionPipelineBase):
341341
calibration_dataset = self._prepare_unet_dataset(
342-
quantization_config.num_samples,
343-
dataset=calibration_dataset)
342+
quantization_config.num_samples, dataset=calibration_dataset
343+
)
344344
elif Dataset is not None and isinstance(calibration_dataset, Dataset):
345345
calibration_dataloader = self._get_calibration_dataloader(
346346
calibration_dataset=calibration_dataset,
@@ -351,14 +351,17 @@ def _quantize_ovbasemodel(
351351

352352
if self.model.export_feature == "text-generation" and self.model.use_cache:
353353
calibration_dataset = self._prepare_text_generation_dataset(
354-
quantization_config, calibration_dataloader)
354+
quantization_config, calibration_dataloader
355+
)
355356
else:
356357
calibration_dataset = nncf.Dataset(calibration_dataloader)
357358
elif isinstance(calibration_dataset, collections.abc.Iterable):
358359
calibration_dataset = nncf.Dataset(calibration_dataset)
359360
elif not isinstance(calibration_dataset, nncf.Dataset):
360-
raise ValueError("`calibration_dataset` must be either an `Iterable` object or an instance of "
361-
f"`nncf.Dataset` or `datasets.Dataset`. Found: {type(calibration_dataset)}.")
361+
raise ValueError(
362+
"`calibration_dataset` must be either an `Iterable` object or an instance of "
363+
f"`nncf.Dataset` or `datasets.Dataset`. Found: {type(calibration_dataset)}."
364+
)
362365

363366
if isinstance(quantization_config, OVWeightQuantizationConfig):
364367
if quantization_config.dataset is not None and calibration_dataset is not None:
@@ -374,8 +377,8 @@ def _quantize_ovbasemodel(
374377
calibration_dataset = self._prepare_gptq_dataset(quantization_config)
375378
elif isinstance(self.model, OVStableDiffusionPipelineBase):
376379
calibration_dataset = self._prepare_unet_dataset(
377-
quantization_config.num_samples,
378-
dataset_name=quantization_config.dataset)
380+
quantization_config.num_samples, dataset_name=quantization_config.dataset
381+
)
379382
else:
380383
raise ValueError(
381384
f"Can't create weight compression calibration dataset from string for {type(self.model)}"
@@ -385,7 +388,9 @@ def _quantize_ovbasemodel(
385388
if calibration_dataset is None:
386389
raise ValueError("Calibration dataset is required to run hybrid quantization.")
387390
if isinstance(self.model, OVStableDiffusionPipelineBase):
388-
self.model.unet.model = _hybrid_quantization(self.model.unet.model, quantization_config, calibration_dataset)
391+
self.model.unet.model = _hybrid_quantization(
392+
self.model.unet.model, quantization_config, calibration_dataset
393+
)
389394
else:
390395
self.model.model = _hybrid_quantization(self.model.model, quantization_config, calibration_dataset)
391396
else:
@@ -672,18 +677,15 @@ def _prepare_gptq_dataset(self, quantization_config: OVWeightQuantizationConfig)
672677

673678
tokenizer = AutoTokenizer.from_pretrained(quantization_config.tokenizer)
674679
nsamples = quantization_config.num_samples if quantization_config.num_samples else 128
675-
calibration_dataset = get_dataset(
676-
quantization_config.dataset, tokenizer, seqlen=32, nsamples=nsamples
677-
)
680+
calibration_dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32, nsamples=nsamples)
678681
calibration_dataset = prepare_dataset(calibration_dataset)
679682
calibration_dataset = nncf.Dataset(calibration_dataset, lambda x: self.model.prepare_inputs(**x))
680683

681684
return calibration_dataset
682685

683686
def _prepare_text_generation_dataset(
684-
self,
685-
quantization_config: OVQuantizationConfig,
686-
calibration_dataloader: OVDataLoader) -> nncf.Dataset:
687+
self, quantization_config: OVQuantizationConfig, calibration_dataloader: OVDataLoader
688+
) -> nncf.Dataset:
687689
# TODO: this function is not covered by tests, remove if not relevant anymore or cover by tests otherwise
688690

689691
# Prefetch past_key_values
@@ -705,10 +707,11 @@ def _prepare_text_generation_dataset(
705707
return calibration_dataset
706708

707709
def _prepare_unet_dataset(
708-
self,
709-
num_samples: Optional[int] = None,
710-
dataset_name: Optional[str] = None,
711-
dataset: Optional[Union[Iterable, "Dataset"]] = None) -> nncf.Dataset:
710+
self,
711+
num_samples: Optional[int] = None,
712+
dataset_name: Optional[str] = None,
713+
dataset: Optional[Union[Iterable, "Dataset"]] = None,
714+
) -> nncf.Dataset:
712715
self.model.compile()
713716

714717
size = self.model.unet.config.get("sample_size", 64) * self.model.vae_scale_factor
@@ -735,16 +738,20 @@ def transform_fn(data_item):
735738
from datasets import load_dataset
736739

737740
dataset_metadata = PREDEFINED_SD_DATASETS[dataset_name]
738-
dataset = load_dataset(dataset_name, split=dataset_metadata["split"], streaming=True).shuffle(seed=self.seed)
741+
dataset = load_dataset(dataset_name, split=dataset_metadata["split"], streaming=True).shuffle(
742+
seed=self.seed
743+
)
739744
input_names = dataset_metadata["inputs"]
740745
dataset = dataset.select_columns(list(input_names.values()))
741746

742747
def transform_fn(data_item):
743748
return {inp_name: data_item[column] for inp_name, column in input_names.items()}
744749

745750
else:
746-
raise ValueError("For UNet inputs collection either quantization_config.dataset or custom "
747-
"calibration_dataset must be provided.")
751+
raise ValueError(
752+
"For UNet inputs collection either quantization_config.dataset or custom "
753+
"calibration_dataset must be provided."
754+
)
748755

749756
calibration_data = []
750757
try:

tests/openvino/test_quantization.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -21,21 +21,17 @@
2121
from collections import defaultdict
2222
from enum import Enum
2323
from functools import partial
24-
from typing import List, Union
24+
from typing import Union
2525

2626
import evaluate
2727
import numpy as np
2828
import torch
2929
from datasets import load_dataset
30-
from nncf.quantization.advanced_parameters import OverflowFix
3130
from parameterized import parameterized
32-
import openvino.runtime as ov
3331
import nncf
3432
from transformers import (
3533
AutoModelForQuestionAnswering,
3634
AutoModelForSequenceClassification,
37-
AutoModelForCausalLM,
38-
AutoModelForTokenClassification,
3935
AutoTokenizer,
4036
AutoProcessor,
4137
TrainingArguments,
@@ -415,7 +411,9 @@ def test_ovmodel_hybrid_quantization_with_custom_dataset(
415411
export=True,
416412
)
417413
quantizer = OVQuantizer(model)
418-
quantization_config = OVWeightQuantizationConfig(bits=8, num_samples=3, quant_method=OVQuantizationMethod.HYBRID)
414+
quantization_config = OVWeightQuantizationConfig(
415+
bits=8, num_samples=3, quant_method=OVQuantizationMethod.HYBRID
416+
)
419417
quantizer.quantize(ov_config=OVConfig(quantization_config=quantization_config), calibration_dataset=dataset)
420418
num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet)
421419
self.assertEqual(expected_num_fake_quantize, num_fake_quantize)

0 commit comments

Comments
 (0)