Skip to content

Commit 7a56b14

Browse files
committed
apply black
1 parent ba4f195 commit 7a56b14

File tree

4 files changed

+12
-15
lines changed

4 files changed

+12
-15
lines changed

optimum/intel/openvino/configuration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ def post_init(self):
240240
stable_diffusion_datasets = [
241241
"conceptual_captions",
242242
"laion/220k-GPT4Vision-captions-from-LIVIS",
243-
"laion/filtered-wit"
243+
"laion/filtered-wit",
244244
]
245245
if self.dataset not in llm_datasets + stable_diffusion_datasets:
246246
raise ValueError(

optimum/intel/openvino/modeling_diffusion.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ def _from_pretrained(
306306
supported_pipelines = (
307307
OVStableDiffusionPipeline,
308308
OVStableDiffusionXLPipeline,
309-
OVLatentConsistencyModelPipeline
309+
OVLatentConsistencyModelPipeline,
310310
)
311311
if not isinstance(sd_model, supported_pipelines):
312312
raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}")
@@ -316,14 +316,18 @@ def _from_pretrained(
316316

317317
if isinstance(quantization_config.dataset, str):
318318
from .quantization import get_stable_diffusion_dataset
319+
319320
dataset_name = quantization_config.dataset
320321
num_samples = math.ceil(quantization_config.subset_size / num_inference_steps)
321322
quantization_config.dataset = get_stable_diffusion_dataset(dataset_name, num_samples)
322323

323-
unet_inputs = sd_model.prepare_inputs(quantization_config.dataset, quantization_config.subset_size, num_inference_steps)
324+
unet_inputs = sd_model.prepare_inputs(
325+
quantization_config.dataset, quantization_config.subset_size, num_inference_steps
326+
)
324327
quantization_config.dataset = unet_inputs
325328

326329
from .quantization import _hybrid_quantization
330+
327331
unet = _hybrid_quantization(sd_model.unet.model, quantization_config)
328332

329333
return cls(
@@ -348,6 +352,7 @@ def prepare_inputs(
348352
calibration_data = []
349353

350354
from .quantization import InferRequestWrapper
355+
351356
self.unet.request = InferRequestWrapper(self.unet.request, calibration_data)
352357
for prompt in dataset.get_inference_data():
353358
_ = self.__call__(prompt, num_inference_steps=num_inference_steps, height=height, width=width)
@@ -356,8 +361,8 @@ def prepare_inputs(
356361
self.unet.request = self.unet.request.request
357362

358363
from nncf import Dataset
359-
return Dataset(calibration_data)
360364

365+
return Dataset(calibration_data)
361366

362367
@classmethod
363368
def _from_transformers(

optimum/intel/openvino/quantization.py

+2-8
Original file line numberDiff line numberDiff line change
@@ -640,11 +640,7 @@ def _collect_ops_with_weights(model):
640640
def get_stable_diffusion_dataset(
641641
dataset_name: str, nsamples: int = 50, seed: int = 0, text_column: str = "caption"
642642
) -> nncf.Dataset:
643-
if dataset_name not in [
644-
"conceptual_captions",
645-
"laion/220k-GPT4Vision-captions-from-LIVIS",
646-
"laion/filtered-wit"
647-
]:
643+
if dataset_name not in ["conceptual_captions", "laion/220k-GPT4Vision-captions-from-LIVIS", "laion/filtered-wit"]:
648644
raise ValueError(
649645
f"""You have entered a string value for dataset. You can only choose between
650646
['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'],
@@ -656,9 +652,7 @@ def get_stable_diffusion_dataset(
656652
return nncf.Dataset(dataset)
657653

658654

659-
def _hybrid_quantization(
660-
model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict]
661-
):
655+
def _hybrid_quantization(model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict]):
662656
dataset = quantization_config.dataset
663657
wc_ignored_scope = deepcopy(quantization_config.ignored_scope)
664658

tests/openvino/test_quantization.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -384,9 +384,7 @@ def test_ovmodel_hybrid_quantization_with_custom_dataset(
384384
model = model_cls.from_pretrained(
385385
model_id,
386386
export=True,
387-
quantization_config=OVWeightQuantizationConfig(
388-
bits=8, dataset=quantization_dataset, subset_size=3
389-
),
387+
quantization_config=OVWeightQuantizationConfig(bits=8, dataset=quantization_dataset, subset_size=3),
390388
)
391389
num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet)
392390
self.assertEqual(expected_num_fake_quantize, num_fake_quantize)

0 commit comments

Comments
 (0)