From 2e364ef1bc087786dfc245b94da6ef29575c8045 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 4 Mar 2024 18:15:26 +0000 Subject: [PATCH 01/11] Add hybrid quantization for StableDiffusion pipelines --- optimum/intel/openvino/configuration.py | 17 ++- optimum/intel/openvino/modeling_diffusion.py | 65 +++++++++++- optimum/intel/openvino/quantization.py | 104 +++++++++++++++++++ tests/openvino/test_quantization.py | 41 ++++++++ tests/openvino/utils_tests.py | 4 +- 5 files changed, 223 insertions(+), 8 deletions(-) diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 8ddd005279..b8adf8dc30 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -179,7 +179,8 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. dataset (`Union[List[str]]`, *optional*): The dataset used for data-aware compression. You can provide your own dataset in a list of string or just use the - the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] + the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs or + ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for SD models group_size (`int`, *optional*, defaults to 128): The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. ratio (`float`, *optional*, defaults to 1.0): @@ -194,6 +195,8 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): Enables AWQ method to unify weight ranges and improve overall model accuracy. ignored_scope (`nncf.IgnoredScope`, *optional*): An ignored scope that defined the list of model control flow graph nodes to be ignored during quantization. + subset_size (`int`, *optional*, defaults to 128): + Number of data samples to calculate activation statistics. """ @@ -208,6 +211,7 @@ def __init__( all_layers: Optional[bool] = None, sensitivity_metric: Optional[str] = None, ignored_scope: Optional[dict] = None, + subset_size: int = 128, **kwargs, ): self.bits = bits @@ -219,6 +223,7 @@ def __init__( self.all_layers = all_layers self.sensitivity_metric = sensitivity_metric self.ignored_scope = ignored_scope + self.subset_size = subset_size self.quant_method = "default" # TODO : enable AWQ after nncf v2.9.0 release self.post_init() @@ -231,10 +236,16 @@ def post_init(self): if self.group_size is not None and self.group_size != -1 and self.group_size <= 0: raise ValueError("`group_size` must be greater than 0 or equal to -1") if self.dataset is not None and isinstance(self.dataset, str): - if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: + llm_datasets = ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"] + stable_diffusion_datasets = [ + "conceptual_captions", + "laion/220k-GPT4Vision-captions-from-LIVIS", + "laion/filtered-wit" + ] + if self.dataset not in llm_datasets + stable_diffusion_datasets: raise ValueError( f"""You have entered a string value for dataset. You can only choose between - ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" + {llm_datasets} for LLLMs or {stable_diffusion_datasets} for SD models, but we found {self.dataset}""" ) if self.bits not in [4, 8]: diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 5e8a0cdc59..8e05714339 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -14,6 +14,7 @@ import importlib import logging +import math import os import shutil from pathlib import Path @@ -274,9 +275,17 @@ def _from_pretrained( kwargs[name] = load_method(new_model_save_dir) quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) - unet = cls.load_model( - new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config - ) + + dataset = None + if quantization_config: + dataset = quantization_config.dataset + quantization_config.dataset = None # apply weight compression without dataset + + unet_path = new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name + if quantization_config and dataset is None: + unet = cls.load_model(unet_path, quantization_config) + else: + unet = cls.load_model(unet_path) components = { "vae_encoder": new_model_save_dir / DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER / vae_encoder_file_name, @@ -291,6 +300,32 @@ def _from_pretrained( if model_save_dir is None: model_save_dir = new_model_save_dir + if quantization_config and dataset is not None: + sd_model = cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs) + + supported_pipelines = ( + OVStableDiffusionPipeline, + OVStableDiffusionXLPipeline, + OVLatentConsistencyModelPipeline + ) + if not isinstance(sd_model, supported_pipelines): + raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") + + num_inference_steps = 4 if isinstance(cls, OVLatentConsistencyModelPipeline) else 50 + quantization_config.dataset = dataset + + if isinstance(quantization_config.dataset, str): + from .quantization import get_stable_diffusion_dataset + dataset_name = quantization_config.dataset + num_samples = math.ceil(quantization_config.subset_size / num_inference_steps) + quantization_config.dataset = get_stable_diffusion_dataset(dataset_name, num_samples) + + unet_inputs = sd_model.prepare_inputs(quantization_config.dataset, quantization_config.subset_size, num_inference_steps) + quantization_config.dataset = unet_inputs + + from .quantization import _hybrid_quantization + unet = _hybrid_quantization(sd_model.unet.model, quantization_config) + return cls( unet=unet, config=config, @@ -300,6 +335,30 @@ def _from_pretrained( **kwargs, ) + def prepare_inputs( + self, + dataset: "Dataset", + subset_size: int, + num_inference_steps: int, + height: Optional[int] = 512, + width: Optional[int] = 512, + **kwargs, + ) -> "Dataset": + self.compile() + calibration_data = [] + + from .quantization import InferRequestWrapper + self.unet.request = InferRequestWrapper(self.unet.request, calibration_data) + for prompt in dataset.get_inference_data(): + _ = self.__call__(prompt, num_inference_steps=num_inference_steps, height=height, width=width) + if len(calibration_data) >= subset_size: + break + self.unet.request = self.unet.request.request + + from nncf import Dataset + return Dataset(calibration_data) + + @classmethod def _from_transformers( cls, diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index cd26f91f22..5cdf6b3a8b 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -16,6 +16,9 @@ import inspect import logging import os +from collections import deque +from copy import deepcopy +from datasets import load_dataset from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple, Union @@ -24,6 +27,7 @@ import torch import transformers from nncf import CompressWeightsMode, IgnoredScope, NNCFConfig, SensitivityMetric +from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters from nncf.torch import create_compressed_model, register_default_init_args, register_module from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk from nncf.torch.initialization import PTInitializingDataLoader @@ -590,4 +594,104 @@ def _weight_only_quantization( # awq=config.quant_method == "awq", # TODO : remove and add it back once nncf v2.9.0 ignored_scope=ignored_scope, dataset=dataset, + subset_size=config.subset_size, ) + + +def _get_operation_const_op(operation, const_port_id: int): + node = operation.input_value(const_port_id).get_node() + queue = deque([node]) + constant_node = None + allowed_propagation_types_list = ["Convert", "FakeQuantize", "Reshape"] + + while len(queue) != 0: + curr_node = queue.popleft() + if curr_node.get_type_name() == "Constant": + constant_node = curr_node + break + if len(curr_node.inputs()) == 0: + break + if curr_node.get_type_name() in allowed_propagation_types_list: + queue.append(curr_node.input_value(0).get_node()) + + return constant_node + + +def _is_embedding(node) -> bool: + allowed_types_list = ["f16", "f32", "f64"] + const_port_id = 0 + input_tensor = node.input_value(const_port_id) + if input_tensor.get_element_type().get_type_name() in allowed_types_list: + const_node = _get_operation_const_op(node, const_port_id) + if const_node is not None: + return True + + return False + + +def _collect_ops_with_weights(model): + ops_with_weights = [] + for op in model.get_ops(): + if op.get_type_name() == "MatMul": + constant_node_0 = _get_operation_const_op(op, const_port_id=0) + constant_node_1 = _get_operation_const_op(op, const_port_id=1) + if constant_node_0 or constant_node_1: + ops_with_weights.append(op.get_friendly_name()) + if op.get_type_name() == "Gather" and _is_embedding(op): + ops_with_weights.append(op.get_friendly_name()) + + return ops_with_weights + + +def get_stable_diffusion_dataset( + dataset_name: str, nsamples: int = 50, seed: int = 0, text_column: str = "caption" +) -> nncf.Dataset: + if dataset_name not in [ + "conceptual_captions", + "laion/220k-GPT4Vision-captions-from-LIVIS", + "laion/filtered-wit" + ]: + raise ValueError( + f"""You have entered a string value for dataset. You can only choose between + ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'], + but we found {dataset_name}""" + ) + + data = load_dataset(dataset_name, split="train", streaming=True).shuffle(seed=seed).take(nsamples) + dataset = [batch[text_column] for batch in data] + return nncf.Dataset(dataset) + + +def _hybrid_quantization( + model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict] +): + dataset = quantization_config.dataset + wc_ignored_scope = deepcopy(quantization_config.ignored_scope) + + if isinstance(wc_ignored_scope, dict): + wc_ignored_scope["types"] = wc_ignored_scope.get("types", []) + ["Convolution"] + else: + assert wc_ignored_scope is None + wc_ignored_scope = {"types": ["Convolution"]} + + ops_to_compress = _collect_ops_with_weights(model) + ptq_ignored_scope = deepcopy(quantization_config.ignored_scope) + if isinstance(ptq_ignored_scope, dict): + ptq_ignored_scope["names"] = ptq_ignored_scope.get("names", []) + ops_to_compress + else: + assert ptq_ignored_scope is None + ptq_ignored_scope = {"names": ops_to_compress} + + quantization_config.dataset = None # Apply Weight Compression without dataset + quantization_config.ignored_scope = wc_ignored_scope + compressed_model = _weight_only_quantization(model, quantization_config) + + quantized_model = nncf.quantize( + compressed_model, + dataset, + model_type=nncf.ModelType.TRANSFORMER, + ignored_scope=nncf.IgnoredScope(**ptq_ignored_scope), + advanced_parameters=nncf.AdvancedQuantizationParameters(AdvancedSmoothQuantParameters(matmul=-1)), + subset_size=quantization_config.subset_size, + ) + return quantized_model diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index a33e0339f3..45b64cb2f6 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -39,6 +39,7 @@ from optimum.intel import ( OVConfig, + OVLatentConsistencyModelPipeline, OVModelForAudioClassification, OVModelForCausalLM, OVModelForFeatureExtraction, @@ -233,6 +234,12 @@ class OVWeightCompressionTest(unittest.TestCase): (OVStableDiffusionXLPipeline, "stable-diffusion-xl"), ) + SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION = ( + (OVStableDiffusionPipeline, "stable-diffusion", 72, 195), + (OVStableDiffusionXLPipeline, "stable-diffusion-xl", 84, 331), + (OVLatentConsistencyModelPipeline, "latent-consistency", 50, 135), + ) + IS_SUPPORT_STATEFUL = is_openvino_version(">=", "2023.3") DEFAULT_INT4_CONFIG = {"bits": 4, "sym": True, "group_size": 64, "all_layers": True} @@ -352,6 +359,40 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type): _, num_int8, _ = get_num_quantized_nodes(model) self.assertEqual(expected_ov_int8[i], num_int8) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) + def test_ovmodel_hybrid_quantization(self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8): + model_id = MODEL_NAMES[model_type] + quantization_config = OVWeightQuantizationConfig(bits=8, dataset="conceptual_captions", subset_size=5) + with tempfile.TemporaryDirectory() as tmp_dir: + model = model_cls.from_pretrained(model_id, export=True, quantization_config=quantization_config) + + num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet) + self.assertEqual(expected_num_fake_quantize, num_fake_quantize) + self.assertEqual(expected_ov_int8, num_int8) + self.assertEqual(0, num_int4) + + model.save_pretrained(tmp_dir) + + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) + def test_ovmodel_hybrid_quantization_with_custom_dataset( + self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8 + ): + model_id = MODEL_NAMES[model_type] + dataset_name = "daspartho/stable-diffusion-prompts" + dataset = load_dataset(dataset_name, split="train", streaming=True) + quantization_dataset = nncf.Dataset(dataset, lambda x: x["prompt"]) + model = model_cls.from_pretrained( + model_id, + export=True, + quantization_config=OVWeightQuantizationConfig( + bits=8, dataset=quantization_dataset, subset_size=3 + ), + ) + num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet) + self.assertEqual(expected_num_fake_quantize, num_fake_quantize) + self.assertEqual(expected_ov_int8, num_int8) + self.assertEqual(0, num_int4) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS) @unittest.mock.patch.dict( "optimum.intel.openvino.configuration._DEFAULT_4BIT_CONFIGS", {"facebook/opt-125m": DEFAULT_INT4_CONFIG} diff --git a/tests/openvino/utils_tests.py b/tests/openvino/utils_tests.py index 04049172d3..072a51f3d8 100644 --- a/tests/openvino/utils_tests.py +++ b/tests/openvino/utils_tests.py @@ -127,8 +127,8 @@ def get_num_quantized_nodes(ov_model): if "FakeQuantize" in elem.name: num_fake_quantize += 1 for i in range(elem.get_output_size()): - if "8" in elem.get_output_element_type(i).get_type_name(): + if elem.get_output_element_type(i).get_type_name() in ["i8", "u8"]: num_int8 += 1 - if "4" in elem.get_output_element_type(i).get_type_name(): + if elem.get_output_element_type(i).get_type_name() in ["i4", "u4"]: num_int4 += 1 return num_fake_quantize, num_int8, num_int4 From 8ffc124e1fb8231a9ad5fdaa43cfe00d2fe0f418 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 5 Mar 2024 12:00:16 +0000 Subject: [PATCH 02/11] apply black --- optimum/intel/openvino/configuration.py | 2 +- optimum/intel/openvino/modeling_diffusion.py | 11 ++++++++--- optimum/intel/openvino/quantization.py | 10 ++-------- tests/openvino/test_quantization.py | 4 +--- 4 files changed, 12 insertions(+), 15 deletions(-) diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index b8adf8dc30..9d8f23f9dd 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -240,7 +240,7 @@ def post_init(self): stable_diffusion_datasets = [ "conceptual_captions", "laion/220k-GPT4Vision-captions-from-LIVIS", - "laion/filtered-wit" + "laion/filtered-wit", ] if self.dataset not in llm_datasets + stable_diffusion_datasets: raise ValueError( diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 8e05714339..debf133147 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -306,7 +306,7 @@ def _from_pretrained( supported_pipelines = ( OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, - OVLatentConsistencyModelPipeline + OVLatentConsistencyModelPipeline, ) if not isinstance(sd_model, supported_pipelines): raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") @@ -316,14 +316,18 @@ def _from_pretrained( if isinstance(quantization_config.dataset, str): from .quantization import get_stable_diffusion_dataset + dataset_name = quantization_config.dataset num_samples = math.ceil(quantization_config.subset_size / num_inference_steps) quantization_config.dataset = get_stable_diffusion_dataset(dataset_name, num_samples) - unet_inputs = sd_model.prepare_inputs(quantization_config.dataset, quantization_config.subset_size, num_inference_steps) + unet_inputs = sd_model.prepare_inputs( + quantization_config.dataset, quantization_config.subset_size, num_inference_steps + ) quantization_config.dataset = unet_inputs from .quantization import _hybrid_quantization + unet = _hybrid_quantization(sd_model.unet.model, quantization_config) return cls( @@ -348,6 +352,7 @@ def prepare_inputs( calibration_data = [] from .quantization import InferRequestWrapper + self.unet.request = InferRequestWrapper(self.unet.request, calibration_data) for prompt in dataset.get_inference_data(): _ = self.__call__(prompt, num_inference_steps=num_inference_steps, height=height, width=width) @@ -356,8 +361,8 @@ def prepare_inputs( self.unet.request = self.unet.request.request from nncf import Dataset - return Dataset(calibration_data) + return Dataset(calibration_data) @classmethod def _from_transformers( diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 5cdf6b3a8b..f0c9f504fb 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -646,11 +646,7 @@ def _collect_ops_with_weights(model): def get_stable_diffusion_dataset( dataset_name: str, nsamples: int = 50, seed: int = 0, text_column: str = "caption" ) -> nncf.Dataset: - if dataset_name not in [ - "conceptual_captions", - "laion/220k-GPT4Vision-captions-from-LIVIS", - "laion/filtered-wit" - ]: + if dataset_name not in ["conceptual_captions", "laion/220k-GPT4Vision-captions-from-LIVIS", "laion/filtered-wit"]: raise ValueError( f"""You have entered a string value for dataset. You can only choose between ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'], @@ -662,9 +658,7 @@ def get_stable_diffusion_dataset( return nncf.Dataset(dataset) -def _hybrid_quantization( - model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict] -): +def _hybrid_quantization(model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict]): dataset = quantization_config.dataset wc_ignored_scope = deepcopy(quantization_config.ignored_scope) diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 45b64cb2f6..38091fc596 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -384,9 +384,7 @@ def test_ovmodel_hybrid_quantization_with_custom_dataset( model = model_cls.from_pretrained( model_id, export=True, - quantization_config=OVWeightQuantizationConfig( - bits=8, dataset=quantization_dataset, subset_size=3 - ), + quantization_config=OVWeightQuantizationConfig(bits=8, dataset=quantization_dataset, subset_size=3), ) num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet) self.assertEqual(expected_num_fake_quantize, num_fake_quantize) From bfd71721ea54ead9f7704dc4046e2764c392d1c9 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 5 Mar 2024 12:51:39 +0000 Subject: [PATCH 03/11] fix tests --- tests/openvino/test_quantization.py | 6 +++--- tests/openvino/utils_tests.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 38091fc596..1d13696959 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -158,10 +158,10 @@ class OVWeightCompressionTest(unittest.TestCase): (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 44), ) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 62, 365),) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 385),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 62, 86),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 150),) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTO_COMPRESSED_MATMULS = ( - (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 14, 136), + (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 14, 50), ) SUPPORTED_ARCHITECTURES_STATEFUL_WITH_EXPECTED_8BIT_COMPRESSED_MATMULS = ( (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 44), diff --git a/tests/openvino/utils_tests.py b/tests/openvino/utils_tests.py index 072a51f3d8..97c8a92836 100644 --- a/tests/openvino/utils_tests.py +++ b/tests/openvino/utils_tests.py @@ -116,7 +116,7 @@ "stable-diffusion-xl-refiner": (366, 34, 42, 66), } -_ARCHITECTURES_TO_EXPECTED_INT4_INT8 = {"opt125m": (62, 477)} +_ARCHITECTURES_TO_EXPECTED_INT4_INT8 = {"opt125m": (62, 86)} def get_num_quantized_nodes(ov_model): From 93dae89cf5b5a6b1fce57808288c58cb6279bf23 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 5 Mar 2024 15:17:37 +0000 Subject: [PATCH 04/11] fix ruff --- optimum/intel/openvino/modeling_diffusion.py | 8 +++----- optimum/intel/openvino/quantization.py | 4 ++-- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index debf133147..707fd70b93 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -35,6 +35,7 @@ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, is_invisible_watermark_available from huggingface_hub import snapshot_download +from nncf import Dataset from openvino._offline_transformations import compress_model_transformation from openvino.runtime import Core from transformers import CLIPFeatureExtractor, CLIPTokenizer @@ -341,13 +342,13 @@ def _from_pretrained( def prepare_inputs( self, - dataset: "Dataset", + dataset: Dataset, subset_size: int, num_inference_steps: int, height: Optional[int] = 512, width: Optional[int] = 512, **kwargs, - ) -> "Dataset": + ) -> Dataset: self.compile() calibration_data = [] @@ -359,9 +360,6 @@ def prepare_inputs( if len(calibration_data) >= subset_size: break self.unet.request = self.unet.request.request - - from nncf import Dataset - return Dataset(calibration_data) @classmethod diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index f0c9f504fb..a993702876 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -18,7 +18,6 @@ import os from collections import deque from copy import deepcopy -from datasets import load_dataset from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple, Union @@ -26,6 +25,7 @@ import openvino import torch import transformers +from datasets import load_dataset from nncf import CompressWeightsMode, IgnoredScope, NNCFConfig, SensitivityMetric from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters from nncf.torch import create_compressed_model, register_default_init_args, register_module @@ -594,7 +594,7 @@ def _weight_only_quantization( # awq=config.quant_method == "awq", # TODO : remove and add it back once nncf v2.9.0 ignored_scope=ignored_scope, dataset=dataset, - subset_size=config.subset_size, + # subset_size=config.subset_size, # TODO : enable from nncf v2.9.0 ) From 74f88837478a4c190e0302fd4e3eeaabe89c57a1 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 5 Mar 2024 15:50:16 +0000 Subject: [PATCH 05/11] fix lcm bug --- optimum/intel/openvino/modeling_diffusion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 707fd70b93..7d9e625d9f 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -312,7 +312,7 @@ def _from_pretrained( if not isinstance(sd_model, supported_pipelines): raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") - num_inference_steps = 4 if isinstance(cls, OVLatentConsistencyModelPipeline) else 50 + num_inference_steps = 4 if isinstance(sd_model, OVLatentConsistencyModelPipeline) else 50 quantization_config.dataset = dataset if isinstance(quantization_config.dataset, str): From 783a6541a41372cadef17a9835b121700ecf7159 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Wed, 6 Mar 2024 17:32:18 +0000 Subject: [PATCH 06/11] apply review comments --- optimum/intel/openvino/configuration.py | 28 ++++----- optimum/intel/openvino/modeling_decoder.py | 3 +- optimum/intel/openvino/modeling_diffusion.py | 57 +++++++++-------- optimum/intel/openvino/quantization.py | 64 +++++++++++--------- tests/openvino/test_quantization.py | 12 ++-- 5 files changed, 85 insertions(+), 79 deletions(-) diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 9d8f23f9dd..3c52bbcd6a 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -167,7 +167,7 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): bits (`int`, defaults to 8): The number of bits to quantize to. - sym (`bool`, *optional*, defaults to `False`): + sym (`bool`, defaults to `False`): Whether to use symetric quantization. tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*): The tokenizer used to process the dataset. You can pass either: @@ -177,26 +177,24 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. - dataset (`Union[List[str]]`, *optional*): + dataset (`str or List[str]`, *optional*): The dataset used for data-aware compression. You can provide your own dataset in a list of string or just use the the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs or - ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for SD models - group_size (`int`, *optional*, defaults to 128): - The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. - ratio (`float`, *optional*, defaults to 1.0): + ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for SD models. + ratio (`float`, defaults to 1.0): The ratio between baseline and backup precisions (e.g. 0.9 means 90% of layers quantized to INT4_ASYM and the rest to INT8_ASYM). + group_size (`int`, *optional*): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. all_layers (`bool`, *optional*): Defines how many layers are compressed to 4-bits while the rest are kept in 8-bit presicion. - sensitivity_metric (`nncf.SensitivityMetric`, *optional*): + sensitivity_metric (`str`, *optional*): The sensitivity metric for assigning quantization precision to layers. In order to preserve the accuracy of the model, the more sensitive layers receives a higher precision. - awq (`bool`, *optional*): - Enables AWQ method to unify weight ranges and improve overall model accuracy. - ignored_scope (`nncf.IgnoredScope`, *optional*): + ignored_scope (`dict`, *optional*): An ignored scope that defined the list of model control flow graph nodes to be ignored during quantization. - subset_size (`int`, *optional*, defaults to 128): - Number of data samples to calculate activation statistics. + num_samples (`int`, *optional*): + The maximum number of samples composing the calibration dataset. """ @@ -205,13 +203,13 @@ def __init__( bits: int = 8, sym: bool = False, tokenizer: Optional[Any] = None, - dataset: Optional[str] = None, + dataset: Optional[Union[str, List[str]]] = None, ratio: float = 1.0, group_size: Optional[int] = None, all_layers: Optional[bool] = None, sensitivity_metric: Optional[str] = None, ignored_scope: Optional[dict] = None, - subset_size: int = 128, + num_samples: Optional[int] = None, **kwargs, ): self.bits = bits @@ -223,7 +221,7 @@ def __init__( self.all_layers = all_layers self.sensitivity_metric = sensitivity_metric self.ignored_scope = ignored_scope - self.subset_size = subset_size + self.num_samples = num_samples self.quant_method = "default" # TODO : enable AWQ after nncf v2.9.0 release self.post_init() diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 92a2ce436d..aab94c9e99 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -635,7 +635,8 @@ def _from_pretrained( # from optimum.gptq.utils import get_seqlen # seqlen = get_seqlen(causal_model) - dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32) + nsamples = quantization_config.num_samples if quantization_config.num_samples else 128 + dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32, nsamples=nsamples) dataset = prepare_dataset(dataset) quantization_config = copy.deepcopy(quantization_config) quantization_config.dataset = nncf.Dataset(dataset, lambda x: causal_model.prepare_inputs(**x)) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 7d9e625d9f..c6c963355d 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -17,6 +17,7 @@ import math import os import shutil +from copy import deepcopy from pathlib import Path from tempfile import TemporaryDirectory, gettempdir from typing import Any, Dict, List, Optional, Union @@ -35,7 +36,6 @@ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, is_invisible_watermark_available from huggingface_hub import snapshot_download -from nncf import Dataset from openvino._offline_transformations import compress_model_transformation from openvino.runtime import Core from transformers import CLIPFeatureExtractor, CLIPTokenizer @@ -276,17 +276,15 @@ def _from_pretrained( kwargs[name] = load_method(new_model_save_dir) quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) - - dataset = None - if quantization_config: - dataset = quantization_config.dataset - quantization_config.dataset = None # apply weight compression without dataset - + weight_quantization_config = deepcopy(quantization_config) unet_path = new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name - if quantization_config and dataset is None: - unet = cls.load_model(unet_path, quantization_config) - else: + if weight_quantization_config is not None and weight_quantization_config.dataset is not None: + # load the UNet model uncompressed to apply hybrid quantization further unet = cls.load_model(unet_path) + # Apply weights compression to other `components` without dataset + weight_quantization_config.dataset = None + else: + unet = cls.load_model(unet_path, quantization_config) components = { "vae_encoder": new_model_save_dir / DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER / vae_encoder_file_name, @@ -296,12 +294,12 @@ def _from_pretrained( } for key, value in components.items(): - components[key] = cls.load_model(value, quantization_config) if value.is_file() else None + components[key] = cls.load_model(value, weight_quantization_config) if value.is_file() else None if model_save_dir is None: model_save_dir = new_model_save_dir - if quantization_config and dataset is not None: + if quantization_config and quantization_config.dataset is not None: sd_model = cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs) supported_pipelines = ( @@ -313,23 +311,23 @@ def _from_pretrained( raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") num_inference_steps = 4 if isinstance(sd_model, OVLatentConsistencyModelPipeline) else 50 - quantization_config.dataset = dataset + nsamples = quantization_config.num_samples if quantization_config.num_samples else 200 + dataset = deepcopy(quantization_config.dataset) - if isinstance(quantization_config.dataset, str): + if isinstance(dataset, str): from .quantization import get_stable_diffusion_dataset - dataset_name = quantization_config.dataset - num_samples = math.ceil(quantization_config.subset_size / num_inference_steps) - quantization_config.dataset = get_stable_diffusion_dataset(dataset_name, num_samples) + num_unet_runs = math.ceil(nsamples / num_inference_steps) + dataset = get_stable_diffusion_dataset(dataset, num_unet_runs) - unet_inputs = sd_model.prepare_inputs( - quantization_config.dataset, quantization_config.subset_size, num_inference_steps - ) - quantization_config.dataset = unet_inputs + unet_inputs = sd_model._prepare_unet_inputs(dataset, nsamples, num_inference_steps) from .quantization import _hybrid_quantization - unet = _hybrid_quantization(sd_model.unet.model, quantization_config) + hybrid_quantization_config = deepcopy(quantization_config) + hybrid_quantization_config.dataset = unet_inputs + hybrid_quantization_config.num_samples = nsamples + unet = _hybrid_quantization(sd_model.unet.model, hybrid_quantization_config) return cls( unet=unet, @@ -340,27 +338,26 @@ def _from_pretrained( **kwargs, ) - def prepare_inputs( + def _prepare_unet_inputs( self, - dataset: Dataset, - subset_size: int, + dataset: List[str], + num_samples: int, num_inference_steps: int, height: Optional[int] = 512, width: Optional[int] = 512, **kwargs, - ) -> Dataset: + ) -> Dict[str, Any]: self.compile() calibration_data = [] from .quantization import InferRequestWrapper self.unet.request = InferRequestWrapper(self.unet.request, calibration_data) - for prompt in dataset.get_inference_data(): + for prompt in dataset: _ = self.__call__(prompt, num_inference_steps=num_inference_steps, height=height, width=width) - if len(calibration_data) >= subset_size: - break + self.unet.request = self.unet.request.request - return Dataset(calibration_data) + return calibration_data[:num_samples] @classmethod def _from_transformers( diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index a993702876..b481734c50 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -19,7 +19,7 @@ from collections import deque from copy import deepcopy from pathlib import Path -from typing import Any, Callable, Dict, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, Union import nncf import openvino @@ -554,7 +554,7 @@ def _remove_unused_columns(self, dataset: "Dataset"): def _weight_only_quantization( model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict] -): +) -> openvino.runtime.Model: config = quantization_config if isinstance(config, dict): config = OVWeightQuantizationConfig.from_dict(quantization_config) @@ -568,7 +568,8 @@ def _weight_only_quantization( from optimum.gptq.data import get_dataset, prepare_dataset - dataset = get_dataset(config.dataset, tokenizer, seqlen=32) + nsamples = config.num_samples if config.num_samples else 128 + dataset = get_dataset(config.dataset, tokenizer, seqlen=32, nsamples=nsamples) dataset = prepare_dataset(dataset) sensitivity_metric = None @@ -594,7 +595,7 @@ def _weight_only_quantization( # awq=config.quant_method == "awq", # TODO : remove and add it back once nncf v2.9.0 ignored_scope=ignored_scope, dataset=dataset, - # subset_size=config.subset_size, # TODO : enable from nncf v2.9.0 + # subset_size=config.num_samples if config.num_samples else 128, # TODO : enable from nncf v2.9.0 ) @@ -645,7 +646,7 @@ def _collect_ops_with_weights(model): def get_stable_diffusion_dataset( dataset_name: str, nsamples: int = 50, seed: int = 0, text_column: str = "caption" -) -> nncf.Dataset: +) -> List[str]: if dataset_name not in ["conceptual_captions", "laion/220k-GPT4Vision-captions-from-LIVIS", "laion/filtered-wit"]: raise ValueError( f"""You have entered a string value for dataset. You can only choose between @@ -655,37 +656,46 @@ def get_stable_diffusion_dataset( data = load_dataset(dataset_name, split="train", streaming=True).shuffle(seed=seed).take(nsamples) dataset = [batch[text_column] for batch in data] - return nncf.Dataset(dataset) + return dataset -def _hybrid_quantization(model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict]): - dataset = quantization_config.dataset - wc_ignored_scope = deepcopy(quantization_config.ignored_scope) - - if isinstance(wc_ignored_scope, dict): - wc_ignored_scope["types"] = wc_ignored_scope.get("types", []) + ["Convolution"] - else: - assert wc_ignored_scope is None - wc_ignored_scope = {"types": ["Convolution"]} +def _hybrid_quantization( + model: openvino.runtime.Model, quantization_config: OVWeightQuantizationConfig +) -> openvino.runtime.Model: + """ + Quantize a model in hybrid mode with NNCF which means that we quantize: + weights of MatMul and Embedding layers and activations of other layers. + The optimization specifications defined in `quantization_config`. + + Args: + model (`openvino.runtime.Model`): + The OpenVINO Runtime model for applying hybrid quantization. + quantization_config (`OVWeightQuantizationConfig`): + The configuration containing the parameters related to quantization. + Returns: + The OpenVINO Runtime model with applied hybrid quantization. + """ + ignored_scope = quantization_config.ignored_scope if quantization_config.ignored_scope is not None else {} ops_to_compress = _collect_ops_with_weights(model) - ptq_ignored_scope = deepcopy(quantization_config.ignored_scope) - if isinstance(ptq_ignored_scope, dict): - ptq_ignored_scope["names"] = ptq_ignored_scope.get("names", []) + ops_to_compress - else: - assert ptq_ignored_scope is None - ptq_ignored_scope = {"names": ops_to_compress} + ptq_ignored_scope = deepcopy(ignored_scope) + ptq_ignored_scope["names"] = ignored_scope.get("names", []) + ops_to_compress - quantization_config.dataset = None # Apply Weight Compression without dataset - quantization_config.ignored_scope = wc_ignored_scope - compressed_model = _weight_only_quantization(model, quantization_config) + wc_quantization_config = deepcopy(quantization_config) + wc_quantization_config.ignored_scope = ignored_scope + wc_quantization_config.ignored_scope["types"] = ignored_scope.get("types", []) + ["Convolution"] + # Apply Weight Compression without dataset + wc_quantization_config.dataset = None + compressed_model = _weight_only_quantization(model, wc_quantization_config) + subset_size = quantization_config.num_samples if quantization_config.num_samples else 200 quantized_model = nncf.quantize( - compressed_model, - dataset, + model=compressed_model, + calibration_dataset=nncf.Dataset(quantization_config.dataset), model_type=nncf.ModelType.TRANSFORMER, ignored_scope=nncf.IgnoredScope(**ptq_ignored_scope), + # The SQ algo should be disabled for MatMul nodes because their weights are already compressed advanced_parameters=nncf.AdvancedQuantizationParameters(AdvancedSmoothQuantParameters(matmul=-1)), - subset_size=quantization_config.subset_size, + subset_size=subset_size, ) return quantized_model diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 1d13696959..51ba1787a9 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -362,7 +362,7 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type): @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) def test_ovmodel_hybrid_quantization(self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8): model_id = MODEL_NAMES[model_type] - quantization_config = OVWeightQuantizationConfig(bits=8, dataset="conceptual_captions", subset_size=5) + quantization_config = OVWeightQuantizationConfig(bits=8, dataset="conceptual_captions", num_samples=2) with tempfile.TemporaryDirectory() as tmp_dir: model = model_cls.from_pretrained(model_id, export=True, quantization_config=quantization_config) @@ -373,18 +373,18 @@ def test_ovmodel_hybrid_quantization(self, model_cls, model_type, expected_num_f model.save_pretrained(tmp_dir) - @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION[2:]) def test_ovmodel_hybrid_quantization_with_custom_dataset( self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8 ): model_id = MODEL_NAMES[model_type] - dataset_name = "daspartho/stable-diffusion-prompts" - dataset = load_dataset(dataset_name, split="train", streaming=True) - quantization_dataset = nncf.Dataset(dataset, lambda x: x["prompt"]) + dataset = [ + "dream rose covered with clean crystal, sharp edges, transparent, beautiful, highly detailed, high render" + ] model = model_cls.from_pretrained( model_id, export=True, - quantization_config=OVWeightQuantizationConfig(bits=8, dataset=quantization_dataset, subset_size=3), + quantization_config=OVWeightQuantizationConfig(bits=8, dataset=dataset, num_samples=3), ) num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet) self.assertEqual(expected_num_fake_quantize, num_fake_quantize) From 24de9661143ed2b6c01394acf8f078fee302d24c Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Thu, 7 Mar 2024 16:50:07 +0000 Subject: [PATCH 07/11] rework dataset processing --- optimum/intel/openvino/configuration.py | 6 +- optimum/intel/openvino/modeling_diffusion.py | 79 +++++++++++++------- optimum/intel/openvino/quantization.py | 39 +++------- optimum/intel/openvino/utils.py | 7 ++ 4 files changed, 74 insertions(+), 57 deletions(-) diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 3c52bbcd6a..610d7bf956 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -178,9 +178,9 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. dataset (`str or List[str]`, *optional*): - The dataset used for data-aware compression. You can provide your own dataset in a list of string or just use the - the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs or - ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for SD models. + The dataset used for data-aware compression or quantization with NNCF. You can provide your own dataset + in a list of string or just use the the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs + or ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for SD models. ratio (`float`, defaults to 1.0): The ratio between baseline and backup precisions (e.g. 0.9 means 90% of layers quantized to INT4_ASYM and the rest to INT8_ASYM). diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index c6c963355d..27894da21a 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -14,7 +14,6 @@ import importlib import logging -import math import os import shutil from copy import deepcopy @@ -59,7 +58,13 @@ from .configuration import OVConfig, OVWeightQuantizationConfig from .loaders import OVTextualInversionLoaderMixin from .modeling_base import OVBaseModel -from .utils import ONNX_WEIGHTS_NAME, OV_TO_NP_TYPE, OV_XML_FILE_NAME, _print_compiled_model_properties +from .utils import ( + ONNX_WEIGHTS_NAME, + OV_TO_NP_TYPE, + OV_XML_FILE_NAME, + PREDEFINED_SD_DATASETS, + _print_compiled_model_properties, +) core = Core() @@ -276,13 +281,15 @@ def _from_pretrained( kwargs[name] = load_method(new_model_save_dir) quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) - weight_quantization_config = deepcopy(quantization_config) + + dataset = None unet_path = new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name - if weight_quantization_config is not None and weight_quantization_config.dataset is not None: + if quantization_config is not None and quantization_config.dataset is not None: + dataset = quantization_config.dataset # load the UNet model uncompressed to apply hybrid quantization further unet = cls.load_model(unet_path) # Apply weights compression to other `components` without dataset - weight_quantization_config.dataset = None + quantization_config.dataset = None else: unet = cls.load_model(unet_path, quantization_config) @@ -294,12 +301,12 @@ def _from_pretrained( } for key, value in components.items(): - components[key] = cls.load_model(value, weight_quantization_config) if value.is_file() else None + components[key] = cls.load_model(value, quantization_config) if value.is_file() else None if model_save_dir is None: model_save_dir = new_model_save_dir - if quantization_config and quantization_config.dataset is not None: + if dataset is not None: sd_model = cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs) supported_pipelines = ( @@ -310,24 +317,13 @@ def _from_pretrained( if not isinstance(sd_model, supported_pipelines): raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") - num_inference_steps = 4 if isinstance(sd_model, OVLatentConsistencyModelPipeline) else 50 nsamples = quantization_config.num_samples if quantization_config.num_samples else 200 - dataset = deepcopy(quantization_config.dataset) - - if isinstance(dataset, str): - from .quantization import get_stable_diffusion_dataset - - num_unet_runs = math.ceil(nsamples / num_inference_steps) - dataset = get_stable_diffusion_dataset(dataset, num_unet_runs) - - unet_inputs = sd_model._prepare_unet_inputs(dataset, nsamples, num_inference_steps) + unet_inputs = sd_model._prepare_unet_inputs(dataset, nsamples) from .quantization import _hybrid_quantization - hybrid_quantization_config = deepcopy(quantization_config) - hybrid_quantization_config.dataset = unet_inputs - hybrid_quantization_config.num_samples = nsamples - unet = _hybrid_quantization(sd_model.unet.model, hybrid_quantization_config) + unet = _hybrid_quantization(sd_model.unet.model, quantization_config, dataset=unet_inputs) + quantization_config.dataset = dataset return cls( unet=unet, @@ -340,21 +336,52 @@ def _from_pretrained( def _prepare_unet_inputs( self, - dataset: List[str], + dataset: Union[str, List[Any]], num_samples: int, - num_inference_steps: int, height: Optional[int] = 512, width: Optional[int] = 512, + seed: Optional[int] = 42, **kwargs, ) -> Dict[str, Any]: self.compile() - calibration_data = [] + + if isinstance(dataset, str): + dataset = deepcopy(dataset) + available_datasets = PREDEFINED_SD_DATASETS.keys() + if dataset not in available_datasets: + raise ValueError( + f"""You have entered a string value for dataset. You can only choose between + {list(available_datasets)}, but the {dataset} was found""" + ) + + from datasets import load_dataset + + dataset_metadata = PREDEFINED_SD_DATASETS[dataset] + dataset = load_dataset(dataset, split=dataset_metadata["split"], streaming=True).shuffle(seed=seed) + input_names = dataset_metadata["inputs"] + dataset = dataset.select_columns(list(input_names.values())) + + def transform_fn(data_item): + return {inp_name: data_item[column] for inp_name, column in input_names.items()} + + else: + + def transform_fn(data_item): + return data_item if isinstance(data_item, (list, dict)) else [data_item] from .quantization import InferRequestWrapper + calibration_data = [] self.unet.request = InferRequestWrapper(self.unet.request, calibration_data) - for prompt in dataset: - _ = self.__call__(prompt, num_inference_steps=num_inference_steps, height=height, width=width) + + for inputs in dataset: + inputs = transform_fn(inputs) + if isinstance(inputs, dict): + self.__call__(**inputs, height=height, width=width) + else: + self.__call__(*inputs, height=height, width=width) + if len(calibration_data) > num_samples: + break self.unet.request = self.unet.request.request return calibration_data[:num_samples] diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index b481734c50..c46f29092b 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -17,15 +17,13 @@ import logging import os from collections import deque -from copy import deepcopy from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Optional, Tuple, Union import nncf import openvino import torch import transformers -from datasets import load_dataset from nncf import CompressWeightsMode, IgnoredScope, NNCFConfig, SensitivityMetric from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters from nncf.torch import create_compressed_model, register_default_init_args, register_module @@ -644,23 +642,8 @@ def _collect_ops_with_weights(model): return ops_with_weights -def get_stable_diffusion_dataset( - dataset_name: str, nsamples: int = 50, seed: int = 0, text_column: str = "caption" -) -> List[str]: - if dataset_name not in ["conceptual_captions", "laion/220k-GPT4Vision-captions-from-LIVIS", "laion/filtered-wit"]: - raise ValueError( - f"""You have entered a string value for dataset. You can only choose between - ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'], - but we found {dataset_name}""" - ) - - data = load_dataset(dataset_name, split="train", streaming=True).shuffle(seed=seed).take(nsamples) - dataset = [batch[text_column] for batch in data] - return dataset - - def _hybrid_quantization( - model: openvino.runtime.Model, quantization_config: OVWeightQuantizationConfig + model: openvino.runtime.Model, quantization_config: OVWeightQuantizationConfig, dataset: Dict[str, Any] ) -> openvino.runtime.Model: """ Quantize a model in hybrid mode with NNCF which means that we quantize: @@ -672,28 +655,28 @@ def _hybrid_quantization( The OpenVINO Runtime model for applying hybrid quantization. quantization_config (`OVWeightQuantizationConfig`): The configuration containing the parameters related to quantization. + dataset (`Dict[str, Any]`): + The dataset used for hybrid quantization. Returns: The OpenVINO Runtime model with applied hybrid quantization. """ - ignored_scope = quantization_config.ignored_scope if quantization_config.ignored_scope is not None else {} - ops_to_compress = _collect_ops_with_weights(model) - ptq_ignored_scope = deepcopy(ignored_scope) - ptq_ignored_scope["names"] = ignored_scope.get("names", []) + ops_to_compress - wc_quantization_config = deepcopy(quantization_config) + ignored_scope = quantization_config.ignored_scope if isinstance(quantization_config.ignored_scope, dict) else {} + ptq_ignored_scope = nncf.IgnoredScope(**ignored_scope) + ptq_ignored_scope.names += ops_to_compress + + wc_quantization_config = copy.deepcopy(quantization_config) wc_quantization_config.ignored_scope = ignored_scope wc_quantization_config.ignored_scope["types"] = ignored_scope.get("types", []) + ["Convolution"] - # Apply Weight Compression without dataset - wc_quantization_config.dataset = None compressed_model = _weight_only_quantization(model, wc_quantization_config) subset_size = quantization_config.num_samples if quantization_config.num_samples else 200 quantized_model = nncf.quantize( model=compressed_model, - calibration_dataset=nncf.Dataset(quantization_config.dataset), + calibration_dataset=nncf.Dataset(dataset), model_type=nncf.ModelType.TRANSFORMER, - ignored_scope=nncf.IgnoredScope(**ptq_ignored_scope), + ignored_scope=ptq_ignored_scope, # The SQ algo should be disabled for MatMul nodes because their weights are already compressed advanced_parameters=nncf.AdvancedQuantizationParameters(AdvancedSmoothQuantParameters(matmul=-1)), subset_size=subset_size, diff --git a/optimum/intel/openvino/utils.py b/optimum/intel/openvino/utils.py index 49aec81e57..cbcc696393 100644 --- a/optimum/intel/openvino/utils.py +++ b/optimum/intel/openvino/utils.py @@ -99,6 +99,13 @@ } +PREDEFINED_SD_DATASETS = { + "conceptual_captions": {"split": "train", "inputs": {"prompt": "caption"}}, + "laion/220k-GPT4Vision-captions-from-LIVIS": {"split": "train", "inputs": {"prompt": "caption"}}, + "laion/filtered-wit": {"split": "train", "inputs": {"prompt": "caption"}}, +} + + def use_external_data_format(num_parameters: int) -> bool: """ Returns whether or not the model requires using external data format for the ONNX export From 3544c4bd823623d657a43659d8871cd33a964ac1 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Thu, 7 Mar 2024 18:22:44 +0000 Subject: [PATCH 08/11] Add doc --- docs/source/optimization_ov.mdx | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 088b78f0d3..3506027803 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -69,6 +69,23 @@ from optimum.intel import OVModelForCausalLM model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) ``` +## Hybrid quantization + +Traditional optimization methods like post-training 8-bit quantization do not work for Stable Diffusion models because accuracy drops significantly. On the other hand, weight compression does not improve performance when applied to Stable Diffusion models, as the size of activations is comparable to weights. +The UNet model takes up most of the overall execution time of the pipeline. Thus, optimizing just one model brings substantial benefits in terms of inference speed while keeping acceptable accuracy without fine-tuning. Quantizing the rest of the diffusion pipeline does not significantly improve inference performance but could potentially lead to substantial degradation of accuracy. +Therefore, the proposal is to apply quantization in hybrid mode for the UNet model and weight-only quantization for other pipeline components. The hybrid mode involves the quantization of weights in MatMul and Embedding layers, and activations of other layers, facilitating accuracy preservation post-optimization while reducing the model size. +For optimizing the Stable Diffusion pipeline, utilize the `quantization_config` to define optimization parameters. To enable hybrid quantization, specify the quantization dataset in the `quantization_config`; otherwise, weight-only quantization in specified precisions will be applied to UNet. + +```python +from optimum.intel import OVStableDiffusionPipeline, OVWeightQuantizationConfig + +model = OVStableDiffusionPipeline.from_pretrained( + model_id, + export=True, + quantization_config=OVWeightQuantizationConfig(bits=8, dataset="conceptual_captions"), +) +``` + `load_in_8bit` is enabled by default for the models larger than 1 billion parameters. From 067c6d571b8e06b9a3174f1088b479d898b44406 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Sun, 10 Mar 2024 21:30:00 +0000 Subject: [PATCH 09/11] remove SDXL test --- tests/openvino/test_quantization.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 51ba1787a9..cb85b4aa48 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -236,7 +236,6 @@ class OVWeightCompressionTest(unittest.TestCase): SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION = ( (OVStableDiffusionPipeline, "stable-diffusion", 72, 195), - (OVStableDiffusionXLPipeline, "stable-diffusion-xl", 84, 331), (OVLatentConsistencyModelPipeline, "latent-consistency", 50, 135), ) @@ -373,7 +372,7 @@ def test_ovmodel_hybrid_quantization(self, model_cls, model_type, expected_num_f model.save_pretrained(tmp_dir) - @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION[2:]) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) def test_ovmodel_hybrid_quantization_with_custom_dataset( self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8 ): From 2dc40878f88065fa637d00970fa4027c3c0e4db2 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 11 Mar 2024 12:06:25 +0000 Subject: [PATCH 10/11] Apply comments --- docs/source/optimization_ov.mdx | 6 ++--- optimum/intel/openvino/configuration.py | 6 ++--- optimum/intel/openvino/modeling_diffusion.py | 26 +++++++++++--------- tests/openvino/test_quantization.py | 5 ++-- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 3506027803..70c98f14f7 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -71,10 +71,10 @@ model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) ## Hybrid quantization -Traditional optimization methods like post-training 8-bit quantization do not work for Stable Diffusion models because accuracy drops significantly. On the other hand, weight compression does not improve performance when applied to Stable Diffusion models, as the size of activations is comparable to weights. +Traditional optimization methods like post-training 8-bit quantization do not work well for Stable Diffusion models and can lead to poor generation results. On the other hand, weight compression does not improve performance significantly when applied to Stable Diffusion models, as the size of activations is comparable to weights. The UNet model takes up most of the overall execution time of the pipeline. Thus, optimizing just one model brings substantial benefits in terms of inference speed while keeping acceptable accuracy without fine-tuning. Quantizing the rest of the diffusion pipeline does not significantly improve inference performance but could potentially lead to substantial degradation of accuracy. -Therefore, the proposal is to apply quantization in hybrid mode for the UNet model and weight-only quantization for other pipeline components. The hybrid mode involves the quantization of weights in MatMul and Embedding layers, and activations of other layers, facilitating accuracy preservation post-optimization while reducing the model size. -For optimizing the Stable Diffusion pipeline, utilize the `quantization_config` to define optimization parameters. To enable hybrid quantization, specify the quantization dataset in the `quantization_config`; otherwise, weight-only quantization in specified precisions will be applied to UNet. +Therefore, the proposal is to apply quantization in *hybrid mode* for the UNet model and weight-only quantization for the rest of the pipeline components. The hybrid mode involves the quantization of weights in MatMul and Embedding layers, and activations of other layers, facilitating accuracy preservation post-optimization while reducing the model size. +The `quantization_config` is utilized to define optimization parameters for optimizing the Stable Diffusion pipeline. To enable hybrid quantization, specify the quantization dataset in the `quantization_config`. Otherwise, weight-only quantization to a specified data type (8 tr 4 bits) is applied to UNet model. ```python from optimum.intel import OVStableDiffusionPipeline, OVWeightQuantizationConfig diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 610d7bf956..40a60bb58e 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -179,8 +179,8 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. dataset (`str or List[str]`, *optional*): The dataset used for data-aware compression or quantization with NNCF. You can provide your own dataset - in a list of string or just use the the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs - or ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for SD models. + in a list of strings or just use the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs + or ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for diffusion models. ratio (`float`, defaults to 1.0): The ratio between baseline and backup precisions (e.g. 0.9 means 90% of layers quantized to INT4_ASYM and the rest to INT8_ASYM). @@ -243,7 +243,7 @@ def post_init(self): if self.dataset not in llm_datasets + stable_diffusion_datasets: raise ValueError( f"""You have entered a string value for dataset. You can only choose between - {llm_datasets} for LLLMs or {stable_diffusion_datasets} for SD models, but we found {self.dataset}""" + {llm_datasets} for LLLMs or {stable_diffusion_datasets} for diffusion models, but we found {self.dataset}""" ) if self.bits not in [4, 8]: diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 27894da21a..6ae5601241 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -282,16 +282,17 @@ def _from_pretrained( quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) - dataset = None unet_path = new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name if quantization_config is not None and quantization_config.dataset is not None: - dataset = quantization_config.dataset # load the UNet model uncompressed to apply hybrid quantization further unet = cls.load_model(unet_path) # Apply weights compression to other `components` without dataset - quantization_config.dataset = None + q_config_params = quantization_config.__dict__ + wc_params = {param: value for param, value in q_config_params.items() if param != "dataset"} + wc_quantization_config = OVWeightQuantizationConfig.from_dict(wc_params) else: - unet = cls.load_model(unet_path, quantization_config) + wc_quantization_config = quantization_config + unet = cls.load_model(unet_path, wc_quantization_config) components = { "vae_encoder": new_model_save_dir / DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER / vae_encoder_file_name, @@ -301,12 +302,12 @@ def _from_pretrained( } for key, value in components.items(): - components[key] = cls.load_model(value, quantization_config) if value.is_file() else None + components[key] = cls.load_model(value, wc_quantization_config) if value.is_file() else None if model_save_dir is None: model_save_dir = new_model_save_dir - if dataset is not None: + if quantization_config is not None and quantization_config.dataset is not None: sd_model = cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs) supported_pipelines = ( @@ -318,12 +319,11 @@ def _from_pretrained( raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") nsamples = quantization_config.num_samples if quantization_config.num_samples else 200 - unet_inputs = sd_model._prepare_unet_inputs(dataset, nsamples) + unet_inputs = sd_model._prepare_unet_inputs(quantization_config.dataset, nsamples) from .quantization import _hybrid_quantization - unet = _hybrid_quantization(sd_model.unet.model, quantization_config, dataset=unet_inputs) - quantization_config.dataset = dataset + unet = _hybrid_quantization(sd_model.unet.model, wc_quantization_config, dataset=unet_inputs) return cls( unet=unet, @@ -338,13 +338,17 @@ def _prepare_unet_inputs( self, dataset: Union[str, List[Any]], num_samples: int, - height: Optional[int] = 512, - width: Optional[int] = 512, + height: Optional[int] = None, + width: Optional[int] = None, seed: Optional[int] = 42, **kwargs, ) -> Dict[str, Any]: self.compile() + size = self.unet.config.get("sample_size", 64) * self.vae_scale_factor + height = height or min(size, 512) + width = width or min(size, 512) + if isinstance(dataset, str): dataset = deepcopy(dataset) available_datasets = PREDEFINED_SD_DATASETS.keys() diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index cb85b4aa48..23008c8598 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -159,7 +159,7 @@ class OVWeightCompressionTest(unittest.TestCase): ) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 62, 86),) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 150),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 148),) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTO_COMPRESSED_MATMULS = ( (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 14, 50), ) @@ -236,6 +236,7 @@ class OVWeightCompressionTest(unittest.TestCase): SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION = ( (OVStableDiffusionPipeline, "stable-diffusion", 72, 195), + (OVStableDiffusionXLPipeline, "stable-diffusion-xl", 84, 331), (OVLatentConsistencyModelPipeline, "latent-consistency", 50, 135), ) @@ -372,7 +373,7 @@ def test_ovmodel_hybrid_quantization(self, model_cls, model_type, expected_num_f model.save_pretrained(tmp_dir) - @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION[-1:]) def test_ovmodel_hybrid_quantization_with_custom_dataset( self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8 ): From 636a613386cb2de06c36faf78d1f8aec4ded8872 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 11 Mar 2024 18:33:41 +0000 Subject: [PATCH 11/11] reformat --- optimum/intel/openvino/modeling_diffusion.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 6ae5601241..bbe7bf24a4 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -287,12 +287,13 @@ def _from_pretrained( # load the UNet model uncompressed to apply hybrid quantization further unet = cls.load_model(unet_path) # Apply weights compression to other `components` without dataset - q_config_params = quantization_config.__dict__ - wc_params = {param: value for param, value in q_config_params.items() if param != "dataset"} - wc_quantization_config = OVWeightQuantizationConfig.from_dict(wc_params) + weight_quantization_params = { + param: value for param, value in quantization_config.__dict__.items() if param != "dataset" + } + weight_quantization_config = OVWeightQuantizationConfig.from_dict(weight_quantization_params) else: - wc_quantization_config = quantization_config - unet = cls.load_model(unet_path, wc_quantization_config) + weight_quantization_config = quantization_config + unet = cls.load_model(unet_path, weight_quantization_config) components = { "vae_encoder": new_model_save_dir / DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER / vae_encoder_file_name, @@ -302,7 +303,7 @@ def _from_pretrained( } for key, value in components.items(): - components[key] = cls.load_model(value, wc_quantization_config) if value.is_file() else None + components[key] = cls.load_model(value, weight_quantization_config) if value.is_file() else None if model_save_dir is None: model_save_dir = new_model_save_dir @@ -323,7 +324,7 @@ def _from_pretrained( from .quantization import _hybrid_quantization - unet = _hybrid_quantization(sd_model.unet.model, wc_quantization_config, dataset=unet_inputs) + unet = _hybrid_quantization(sd_model.unet.model, weight_quantization_config, dataset=unet_inputs) return cls( unet=unet,