From 86d378f9582dc96901ad91aaf2d352330636719b Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Mon, 16 Oct 2023 23:46:04 +0800 Subject: [PATCH 01/64] Support weight-only quantization with quantized operators in intel-extension-for-transformers --- .../language-modeling/run_clm.py | 52 +++--- .../intel/neural_compressor/configuration.py | 5 +- .../intel/neural_compressor/quantization.py | 167 ++++++++++-------- tests/neural_compressor/test_optimization.py | 75 +------- 4 files changed, 122 insertions(+), 177 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index cbc523b663..cff7acd3fd 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -56,7 +56,7 @@ from transformers.utils import check_min_version from transformers.utils.versions import require_version -from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer +from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer, WeightOnlyQuantConfig os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -196,9 +196,9 @@ class OptimizationArguments: default=False, metadata={"help": "Whether or not to verify the loading of the quantized model."}, ) - bits: int = field( - default=8, - metadata={"help": "Bits for weight only quantization, 1-8 bits."}, + weight_dtype: str = field( + default="int8", + metadata={"help": "weight dtype for weight only quantization."}, ) group_size: int = field( default=-1, @@ -625,26 +625,18 @@ def compute_metrics(eval_preds): else: recipes = {} if optim_args.quantization_approach == "weight_only": - op_type_dict = { - ".*": { - "weight": { - "bits": optim_args.bits, - "group_size": optim_args.group_size, - "scheme": optim_args.weight_only_scheme, - "algorithm": optim_args.quantization_methodology, - }, - }, - } - if optim_args.quantization_methodology == "GPTQ": - gptq_args = { - "pad_max_length": block_size, - } - recipes.update({"gptq_args": gptq_args}) + if optim_args.apply_pruning or optim_args.apply_distillation: + raise ValueError("Can't mixture weight only quantization and pruning, distillation.") + quantization_config = WeightOnlyQuantConfig( + weight_dtype=optim_args.weight_dtype, + group_size=optim_args.group_size, + scheme=optim_args.weight_only_scheme, + algorithm=optim_args.quantization_methodology + ) else: - op_type_dict = {} - quantization_config = PostTrainingQuantConfig( - approach=optim_args.quantization_approach, op_type_dict=op_type_dict, recipes=recipes - ) + quantization_config = PostTrainingQuantConfig( + approach=optim_args.quantization_approach, recipes=recipes + ) if optim_args.apply_pruning: if optim_args.end_step is None: @@ -724,10 +716,12 @@ def compute_metrics(eval_preds): if optim_args.apply_quantization and optim_args.quantization_approach in {"static", "dynamic", "weight_only"}: model = trainer.model if isinstance(trainer.model, PreTrainedModel) else trainer.model._model quantizer = INCQuantizer.from_pretrained(model) - if optim_args.quantization_approach in ["static", "weight_only"]: + if optim_args.quantization_approach == "static": num_calibration_samples = min(len(train_dataset), optim_args.num_calibration_samples) train_dataset = train_dataset.select(range(num_calibration_samples)) quantization_config.calibration_sampling_size = num_calibration_samples + elif optim_args.quantization_approach == "weight_only": + train_dataset = train_dataset.select(range(num_calibration_samples)) quantizer.quantize( quantization_config=quantization_config, @@ -735,13 +729,15 @@ def compute_metrics(eval_preds): calibration_dataset=train_dataset if optim_args.quantization_approach in ["static", "weight_only"] else None, - batch_size=1 # batch_size > 1 for GPTQ is WIP - if optim_args.quantization_approach == "weight_only" and optim_args.quantization_methodology == "GPTQ" + batch_size=1 + if optim_args.quantization_approach == "weight_only" else training_args.per_device_train_batch_size, - weight_only=True if optim_args.quantization_approach == "weight_only" else False, ) trainer.model = quantizer._quantized_model - if optim_args.apply_quantization and optim_args.verify_loading: + + # Weight only quantization didn't support save/load function due to weight only model has private linear operator. + if (optim_args.apply_quantization and optim_args.verify_loading + and optim_args.quantization_approach != "weight_only"): loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) tokens = tokenizer("This is a sample input", return_tensors="pt") with torch.no_grad(): diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index 7f5370e5ee..0a44e9dddb 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig from typing import Dict, Optional, Union from neural_compressor.config import DistillationConfig, WeightPruningConfig, _BaseQuantizationConfig @@ -35,7 +36,7 @@ class INCConfig(BaseConfig): def __init__( self, - quantization: Optional[Union[Dict, _BaseQuantizationConfig]] = None, + quantization: Optional[Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]] = None, pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None, distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None, save_onnx_model: bool = False, @@ -50,7 +51,7 @@ def __init__( self.save_onnx_model = save_onnx_model @staticmethod - def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig]): + def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]): # TODO : add activations_dtype and weights_dtype if isinstance(config, _BaseQuantizationConfig): approach = _quantization_model[config.approach] diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index d4846adc15..61fd63aed1 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -23,6 +23,7 @@ import torch from datasets import Dataset, load_dataset +from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model from neural_compressor.adaptor.pytorch import PyTorch_FXAdaptor, _cfg_to_qconfig, _propagate_qconfig from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.experimental.export import torch_to_int8_onnx @@ -143,8 +144,8 @@ def from_pretrained(cls, model: PreTrainedModel, **kwargs): def quantize( self, - quantization_config: "PostTrainingQuantConfig", save_directory: Union[str, Path], + quantization_config: Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] = None, calibration_dataset: Dataset = None, batch_size: int = 8, data_collator: Optional[DataCollator] = None, @@ -185,7 +186,7 @@ def quantize( calibration_dataloader = None self._set_task() - if weight_only: + if weight_only or isinstance(quantization_config, WeightOnlyQuantConfig): # check neural-compressor version if is_neural_compressor_version("<", NEURAL_COMPRESSOR_WEIGHT_ONLY_MINIMUM_VERSION): raise ImportError( @@ -193,14 +194,15 @@ def quantize( f"but only version {NEURAL_COMPRESSOR_WEIGHT_ONLY_MINIMUM_VERSION} or higher supports weight-only quantization." ) - # If op_type_dict of quantization_config is not defined, it will use default values for weight-only quantization: - # {"bits": 4, "group_size": 32, "scheme": "sym", "algorithm": "RTN"} - if isinstance(quantization_config.op_type_dict, dict) and len(quantization_config.op_type_dict) > 0: - algo = [] - for _, val in quantization_config.op_type_dict.items(): - algo += val.get("weight", {}).get("algorithm", ["RTN"]) - else: + if quantization_config is None: + quantization_config = WeightOnlyQuantConfig() algo = ["RTN"] + elif isinstance(quantization_config, WeightOnlyQuantConfig): + algo = quantization_config.algorithm + else: + raise ValueError( + "Weight-only quantization needs a object of WeightOnlyQuantConfig." + ) if calibration_dataset is None and ("GPTQ" in algo or "AWQ" in algo): raise ValueError( @@ -215,8 +217,11 @@ def quantize( batch_size=batch_size, remove_unused_columns=remove_unused_columns, data_collator=data_collator, - use_label=False if "GPTQ" in algo else True, + use_label=False, ) + quantization_config.calib_dataloader = calibration_dataloader + + save_onnx_model = False elif INCQuantizationMode(quantization_config.approach) == INCQuantizationMode.STATIC: # Since PyTorch fx trace does not really require an example_inputs, only need calibration_dataset or calibration_fn here. @@ -249,7 +254,9 @@ def quantize( save_onnx_model = False if ( - quantization_config.backend == "ipex" + not weight_only + and not isinstance(quantization_config, WeightOnlyQuantConfig) + and quantization_config.backend == "ipex" and is_ipex_version("<", IPEX_MINIMUM_VERSION) and "generation" in self.task ): @@ -258,76 +265,80 @@ def quantize( f"but only version {IPEX_MINIMUM_VERSION} or higher is supported." ) - if isinstance(self._original_model.config, PretrainedConfig): - self._original_model.config.backend = quantization_config.backend - - if isinstance(self._original_model, ORTModel): - # TODO : enable seq2seq models - if isinstance(self._original_model, ORTModelForConditionalGeneration): - raise RuntimeError("ORTModelForConditionalGeneration not supported for quantization") - - if isinstance(self._original_model, ORTModelForCausalLM): - model_or_path = self._original_model.onnx_paths - if len(model_or_path) > 1: - raise RuntimeError( - f"Too many ONNX model files were found in {self._original_model.onnx_paths}, only `use_cache=False` is supported" - ) - model_or_path = str(model_or_path[0]) - default_name = ONNX_DECODER_NAME - else: - model_or_path = str(self._original_model.model_path) + if isinstance(quantization_config, WeightOnlyQuantConfig): + self._quantized_model = convert_to_quantized_model(self._original_model, quantization_config) else: - model_or_path = self._original_model - - compressed_model = fit( - model_or_path, - conf=quantization_config, - calib_dataloader=calibration_dataloader, - eval_func=self.eval_fn, - calib_func=self.calibration_fn, - ) - - if not hasattr(compressed_model, "_model") or compressed_model._model is None: - raise RuntimeError( - "The maximum number of trials specified has been reached and no quantized model meeting the specified" - " accuracy tolerance has been found. Either the tolerance or the number of trials need to be increased." + if isinstance(self._original_model.config, PretrainedConfig): + self._original_model.config.backend = quantization_config.backend + + if isinstance(self._original_model, ORTModel): + # TODO : enable seq2seq models + if isinstance(self._original_model, ORTModelForConditionalGeneration): + raise RuntimeError("ORTModelForConditionalGeneration not supported for quantization") + + if isinstance(self._original_model, ORTModelForCausalLM): + model_or_path = self._original_model.onnx_paths + if len(model_or_path) > 1: + raise RuntimeError( + f"Too many ONNX model files were found in {self._original_model.onnx_paths}, only `use_cache=False` is supported" + ) + model_or_path = str(model_or_path[0]) + default_name = ONNX_DECODER_NAME + else: + model_or_path = str(self._original_model.model_path) + else: + model_or_path = self._original_model + + compressed_model = fit( + model_or_path, + conf=quantization_config, + calib_dataloader=calibration_dataloader, + eval_func=self.eval_fn, + calib_func=self.calibration_fn, ) - if isinstance(self._original_model.config, PretrainedConfig): - # If backend is IPEX, then the quantized model is JIT model which will drop the config attribute, - # so need set config from original_model. - model_config = copy.deepcopy(self._original_model.config) - model_config.torch_dtype = "int8" - if isinstance(compressed_model, IPEXModel): - model_config.torchscript = True - model_config.backend = "ipex" - elif not isinstance(compressed_model, ONNXModel): - compressed_model._model.config = model_config - model_config.save_pretrained(save_directory) - - self._quantized_model = compressed_model._model - - if save_onnx_model: - model_type = self._original_model.config.model_type.replace("_", "-") - model_name = getattr(self._original_model, "name", None) - onnx_config_class = TasksManager.get_exporter_config_constructor( - exporter="onnx", - model=self._original_model, - task=self.task, - model_type=model_type, - model_name=model_name, - ) - onnx_config = onnx_config_class(self._original_model.config) - compressed_model.eval() - output_onnx_path = save_directory.joinpath(ONNX_WEIGHTS_NAME) - # Export the compressed model to the ONNX format - self._onnx_export(compressed_model, onnx_config, output_onnx_path) - - output_path = save_directory.joinpath(file_name or default_name) - # Save the quantized model - self._save_pretrained(compressed_model, output_path) - quantization_config = INCConfig(quantization=quantization_config, save_onnx_model=save_onnx_model) - quantization_config.save_pretrained(save_directory) + if not hasattr(compressed_model, "_model") or compressed_model._model is None: + raise RuntimeError( + "The maximum number of trials specified has been reached and no quantized model meeting the specified" + " accuracy tolerance has been found. Either the tolerance or the number of trials need to be increased." + ) + + if isinstance(self._original_model.config, PretrainedConfig): + # If backend is IPEX, then the quantized model is JIT model which will drop the config attribute, + # so need set config from original_model. + model_config = copy.deepcopy(self._original_model.config) + model_config.torch_dtype = "int8" + if isinstance(compressed_model, IPEXModel): + model_config.torchscript = True + model_config.backend = "ipex" + elif not isinstance(compressed_model, ONNXModel): + compressed_model._model.config = model_config + model_config.save_pretrained(save_directory) + + self._quantized_model = compressed_model._model + + if save_onnx_model: + model_type = self._original_model.config.model_type.replace("_", "-") + model_name = getattr(self._original_model, "name", None) + onnx_config_class = TasksManager.get_exporter_config_constructor( + exporter="onnx", + model=self._original_model, + task=self.task, + model_type=model_type, + model_name=model_name, + ) + onnx_config = onnx_config_class(self._original_model.config) + compressed_model.eval() + output_onnx_path = save_directory.joinpath(ONNX_WEIGHTS_NAME) + # Export the compressed model to the ONNX format + self._onnx_export(compressed_model, onnx_config, output_onnx_path) + + output_path = save_directory.joinpath(file_name or default_name) + # Save the quantized model + self._save_pretrained(compressed_model, output_path) + quantization_config = INCConfig(quantization=quantization_config, save_onnx_model=save_onnx_model) + quantization_config.save_pretrained(save_directory) + return self._quantized_model @staticmethod def _save_pretrained(model: Union[PyTorchModel, IPEXModel], output_path: str): diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index f28c720138..15f450e19b 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -17,7 +17,7 @@ import os import tempfile - +import copy import evaluate import numpy as np import torch @@ -56,9 +56,11 @@ INCSeq2SeqTrainer, INCStableDiffusionPipeline, ) +from optimum.intel.neural_compressor import WeightOnlyQuantConfig from optimum.intel.utils.constant import DIFFUSION_WEIGHTS_NAME from optimum.onnxruntime import ORTModelForCausalLM, ORTModelForSequenceClassification from optimum.pipelines import ORT_SUPPORTED_TASKS +import unittest os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -194,84 +196,19 @@ def test_ipex_static_quantization_with_smoothquant(self, task, model_name, expec def test_weight_only_quantization(self): model_name = "hf-internal-testing/tiny-random-GPTNeoForCausalLM" - op_type_dict = { - ".*": { - "weight": { - "bits": 8, - "group_size": -1, - "scheme": "sym", - "algorithm": "RTN", - }, - }, - } - quantization_config = PostTrainingQuantConfig(approach="weight_only", op_type_dict=op_type_dict) + quantization_config = WeightOnlyQuantConfig(weight_dtype="int8") model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) - quantizer = INCQuantizer.from_pretrained(model, task="text-generation") + quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) with tempfile.TemporaryDirectory() as tmp_dir: - quantizer.quantize( - quantization_config=quantization_config, - calibration_dataset=calibration_dataset, - save_directory=tmp_dir, - weight_only=True, - ) - q_model = AutoModelForCausalLM.from_pretrained(tmp_dir) - inp = torch.tensor([calibration_dataset[0]["input_ids"]]) - out = model(inp)[0] - q_out = q_model(inp)[0] - self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) - - op_type_dict = { - ".*": { - "weight": { - "bits": 8, - "group_size": -1, - "scheme": "sym", - "algorithm": "AWQ", - }, - }, - } - quantization_config = PostTrainingQuantConfig(approach="weight_only", op_type_dict=op_type_dict) - - with tempfile.TemporaryDirectory() as tmp_dir: - quantizer.quantize( - quantization_config=quantization_config, - calibration_dataset=calibration_dataset, - save_directory=tmp_dir, - weight_only=True, - ) - q_model = AutoModelForCausalLM.from_pretrained(tmp_dir) - inp = torch.tensor([calibration_dataset[0]["input_ids"]]) - out = model(inp)[0] - q_out = q_model(inp)[0] - self.assertTrue(torch.all(torch.isclose(out, q_out, atol=6e-1))) - - op_type_dict = { - ".*": { - "weight": { - "bits": 8, - "group_size": -1, - "scheme": "sym", - "algorithm": "GPTQ", - }, - }, - } - recipes = {"gptq_args": {"pad_max_length": len(calibration_dataset[0]["input_ids"])}} - quantization_config = PostTrainingQuantConfig( - approach="weight_only", op_type_dict=op_type_dict, recipes=recipes - ) - - with tempfile.TemporaryDirectory() as tmp_dir: - quantizer.quantize( + q_model = quantizer.quantize( quantization_config=quantization_config, calibration_dataset=calibration_dataset, save_directory=tmp_dir, - weight_only=True, ) - q_model = AutoModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) out = model(inp)[0] q_out = q_model(inp)[0] From ca58fa5a10cd6f2b223168fcf24baf0548d8694c Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 17 Oct 2023 00:21:11 +0800 Subject: [PATCH 02/64] Update code style --- examples/neural_compressor/language-modeling/run_clm.py | 9 ++++++--- optimum/intel/neural_compressor/configuration.py | 2 +- optimum/intel/neural_compressor/quantization.py | 4 +--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index cff7acd3fd..3baa257395 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -631,7 +631,7 @@ def compute_metrics(eval_preds): weight_dtype=optim_args.weight_dtype, group_size=optim_args.group_size, scheme=optim_args.weight_only_scheme, - algorithm=optim_args.quantization_methodology + algorithm=optim_args.quantization_methodology, ) else: quantization_config = PostTrainingQuantConfig( @@ -736,8 +736,11 @@ def compute_metrics(eval_preds): trainer.model = quantizer._quantized_model # Weight only quantization didn't support save/load function due to weight only model has private linear operator. - if (optim_args.apply_quantization and optim_args.verify_loading - and optim_args.quantization_approach != "weight_only"): + if ( + optim_args.apply_quantization + and optim_args.verify_loading + and optim_args.quantization_approach != "weight_only" + ): loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) tokens = tokenizer("This is a sample input", return_tensors="pt") with torch.no_grad(): diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index 0a44e9dddb..b9548ffa69 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig from typing import Dict, Optional, Union +from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig from neural_compressor.config import DistillationConfig, WeightPruningConfig, _BaseQuantizationConfig from optimum.configuration_utils import BaseConfig diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 61fd63aed1..723a0b3150 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -200,9 +200,7 @@ def quantize( elif isinstance(quantization_config, WeightOnlyQuantConfig): algo = quantization_config.algorithm else: - raise ValueError( - "Weight-only quantization needs a object of WeightOnlyQuantConfig." - ) + raise ValueError("Weight-only quantization needs a object of WeightOnlyQuantConfig.") if calibration_dataset is None and ("GPTQ" in algo or "AWQ" in algo): raise ValueError( From 4837b2f7f432ff7d952eb5965ee9f750132c1fa1 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 17 Oct 2023 00:51:28 +0800 Subject: [PATCH 03/64] Update readme for weight-only quantization example --- examples/neural_compressor/language-modeling/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/README.md b/examples/neural_compressor/language-modeling/README.md index b005bb78a4..b8b74e8041 100644 --- a/examples/neural_compressor/language-modeling/README.md +++ b/examples/neural_compressor/language-modeling/README.md @@ -97,4 +97,4 @@ respectively `dynamic`, `static`, `weight_only` or `aware_training`. The flag `--verify_loading` can be passed along to verify that the resulting quantized model can be loaded correctly. -> **_Note:_** `weight_only` quantization_approach requires neural-compressor >= 2.3 +> **_Note:_** `weight_only` quantization_approach requires neural-compressor >= 2.3 and intel-extension-for-transformers >= 1.2. From 25b2664ea07548705ec7ae000c949d9d26fb5293 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Thu, 19 Oct 2023 14:54:19 +0800 Subject: [PATCH 04/64] Update code --- .../language-modeling/requirements.txt | 2 ++ .../language-modeling/run_clm.py | 14 ++++----- .../intel/neural_compressor/configuration.py | 5 ++- .../intel/neural_compressor/quantization.py | 15 +++++++-- optimum/intel/utils/import_utils.py | 31 +++++++++++++++++++ setup.py | 2 ++ tests/neural_compressor/test_optimization.py | 2 +- 7 files changed, 57 insertions(+), 14 deletions(-) diff --git a/examples/neural_compressor/language-modeling/requirements.txt b/examples/neural_compressor/language-modeling/requirements.txt index 410b038891..a14c910510 100644 --- a/examples/neural_compressor/language-modeling/requirements.txt +++ b/examples/neural_compressor/language-modeling/requirements.txt @@ -3,3 +3,5 @@ torch >= 1.9 datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf +intel-extension-for-transformers +peft diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 3baa257395..83d16ede20 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -33,6 +33,7 @@ import torch import transformers from datasets import load_dataset +from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig from neural_compressor import ( DistillationConfig, PostTrainingQuantConfig, @@ -56,7 +57,7 @@ from transformers.utils import check_min_version from transformers.utils.versions import require_version -from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer, WeightOnlyQuantConfig +from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -626,7 +627,7 @@ def compute_metrics(eval_preds): recipes = {} if optim_args.quantization_approach == "weight_only": if optim_args.apply_pruning or optim_args.apply_distillation: - raise ValueError("Can't mixture weight only quantization and pruning, distillation.") + raise ValueError("Weight only quantization and pruning or distillation cannot be combined.") quantization_config = WeightOnlyQuantConfig( weight_dtype=optim_args.weight_dtype, group_size=optim_args.group_size, @@ -716,12 +717,11 @@ def compute_metrics(eval_preds): if optim_args.apply_quantization and optim_args.quantization_approach in {"static", "dynamic", "weight_only"}: model = trainer.model if isinstance(trainer.model, PreTrainedModel) else trainer.model._model quantizer = INCQuantizer.from_pretrained(model) - if optim_args.quantization_approach == "static": + if optim_args.quantization_approach != "dynamic": num_calibration_samples = min(len(train_dataset), optim_args.num_calibration_samples) train_dataset = train_dataset.select(range(num_calibration_samples)) - quantization_config.calibration_sampling_size = num_calibration_samples - elif optim_args.quantization_approach == "weight_only": - train_dataset = train_dataset.select(range(num_calibration_samples)) + if optim_args.quantization_approach == "static": + quantization_config.calibration_sampling_size = num_calibration_samples quantizer.quantize( quantization_config=quantization_config, @@ -735,7 +735,7 @@ def compute_metrics(eval_preds): ) trainer.model = quantizer._quantized_model - # Weight only quantization didn't support save/load function due to weight only model has private linear operator. + # TODO: Weight only quantization didn't support save/load function now. Will implement it soon. if ( optim_args.apply_quantization and optim_args.verify_loading diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index b9548ffa69..9990cabda8 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -14,7 +14,6 @@ from typing import Dict, Optional, Union -from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig from neural_compressor.config import DistillationConfig, WeightPruningConfig, _BaseQuantizationConfig from optimum.configuration_utils import BaseConfig @@ -36,7 +35,7 @@ class INCConfig(BaseConfig): def __init__( self, - quantization: Optional[Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]] = None, + quantization=None, pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None, distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None, save_onnx_model: bool = False, @@ -51,7 +50,7 @@ def __init__( self.save_onnx_model = save_onnx_model @staticmethod - def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]): + def _create_quantization_config(config): # TODO : add activations_dtype and weights_dtype if isinstance(config, _BaseQuantizationConfig): approach = _quantization_model[config.approach] diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 723a0b3150..f2d95cbc53 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -23,7 +23,6 @@ import torch from datasets import Dataset, load_dataset -from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model from neural_compressor.adaptor.pytorch import PyTorch_FXAdaptor, _cfg_to_qconfig, _propagate_qconfig from neural_compressor.config import PostTrainingQuantConfig from neural_compressor.experimental.export import torch_to_int8_onnx @@ -59,6 +58,7 @@ from ..utils.import_utils import ( _ipex_version, _neural_compressor_version, + is_intel_extension_for_transformers_available, is_ipex_version, is_neural_compressor_version, ) @@ -77,6 +77,10 @@ from .utils import INCDataLoader, _cfgs_to_fx_cfgs +if is_intel_extension_for_transformers_available(): + from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model + from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig + logger = logging.getLogger(__name__) NEURAL_COMPRESSOR_MINIMUM_VERSION = "2.1.0" @@ -145,7 +149,7 @@ def from_pretrained(cls, model: PreTrainedModel, **kwargs): def quantize( self, save_directory: Union[str, Path], - quantization_config: Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] = None, + quantization_config=None, calibration_dataset: Dataset = None, batch_size: int = 8, data_collator: Optional[DataCollator] = None, @@ -186,13 +190,18 @@ def quantize( calibration_dataloader = None self._set_task() - if weight_only or isinstance(quantization_config, WeightOnlyQuantConfig): + if weight_only or not isinstance(quantization_config, PostTrainingQuantConfig): # check neural-compressor version if is_neural_compressor_version("<", NEURAL_COMPRESSOR_WEIGHT_ONLY_MINIMUM_VERSION): raise ImportError( f"Found an incompatible version of neural-compressor. Found version {_neural_compressor_version}, " f"but only version {NEURAL_COMPRESSOR_WEIGHT_ONLY_MINIMUM_VERSION} or higher supports weight-only quantization." ) + if not is_intel_extension_for_transformers_available(): + raise ImportError( + "Didn't find out intel-etension-for-transformers package. " + "Please install packages: pip install intel-etension-for-transformers and pip install peft." + ) if quantization_config is None: quantization_config = WeightOnlyQuantConfig() diff --git a/optimum/intel/utils/import_utils.py b/optimum/intel/utils/import_utils.py index f778bbfcbd..8eeae7cc5a 100644 --- a/optimum/intel/utils/import_utils.py +++ b/optimum/intel/utils/import_utils.py @@ -58,6 +58,15 @@ _neural_compressor_available = False +_intel_extension_for_transformers_available = importlib.util.find_spec("intel_extension_for_transformers") is not None +_intel_extension_for_transformers_version = "N/A" +if _intel_extension_for_transformers_available: + try: + _intel_extension_for_transformers_version = importlib_metadata.version("intel_extension_for_transformers") + except importlib_metadata.PackageNotFoundError: + _intel_extension_for_transformers_available = False + + _ipex_available = importlib.util.find_spec("intel_extension_for_pytorch") is not None _ipex_version = "N/A" if _ipex_available: @@ -122,6 +131,10 @@ def is_neural_compressor_available(): return _neural_compressor_available +def is_intel_extension_for_transformers_available(): + return _intel_extension_for_transformers_available + + def is_ipex_available(): return _ipex_available @@ -189,6 +202,15 @@ def is_neural_compressor_version(operation: str, version: str): return compare_versions(parse(_neural_compressor_version), operation, version) +def is_intel_extension_for_transformers_version(operation: str, version: str): + """ + Compare the current intel_extension_for_transformers version to a given reference with an operation. + """ + if not _intel_extension_for_transformers_available: + return False + return compare_versions(parse(_intel_extension_for_transformers_version), operation, version) + + def is_openvino_version(operation: str, version: str): """ Compare the current OpenVINO version to a given reference with an operation. @@ -262,6 +284,11 @@ def is_timm_version(operation: str, version: str): `pip install neural-compressor`. Please note that you may need to restart your runtime after installation. """ +INTEL_EXTENSION_FOR_TRANSFORMERS_IMPORT_ERROR = """ +{0} requires the intel-extension-for-transformers library but it was not found in your environment. You can install it with pip: +`pip install neural-compressor`. Please note that you may need to restart your runtime after installation. +""" + BACKENDS_MAPPING = OrderedDict( [ ("diffusers", (is_diffusers_available, DIFFUSERS_IMPORT_ERROR)), @@ -269,6 +296,10 @@ def is_timm_version(operation: str, version: str): ("nncf", (is_nncf_available, NNCF_IMPORT_ERROR)), ("openvino", (is_openvino_available, OPENVINO_IMPORT_ERROR)), ("neural_compressor", (is_neural_compressor_available, NEURAL_COMPRESSOR_IMPORT_ERROR)), + ( + "intel_extension_for_transformers", + (is_intel_extension_for_transformers_available, NEURAL_COMPRESSOR_IMPORT_ERROR), + ), ] ) diff --git a/setup.py b/setup.py index c54983da45..a9bb25b19b 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,8 @@ "rjieba", "timm", "invisible-watermark>=0.2.0", + "intel-extension-for-transformers", + "peft", ] QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241"] diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index 15f450e19b..408a11f217 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -56,7 +56,7 @@ INCSeq2SeqTrainer, INCStableDiffusionPipeline, ) -from optimum.intel.neural_compressor import WeightOnlyQuantConfig +from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig from optimum.intel.utils.constant import DIFFUSION_WEIGHTS_NAME from optimum.onnxruntime import ORTModelForCausalLM, ORTModelForSequenceClassification from optimum.pipelines import ORT_SUPPORTED_TASKS From a36584b2e1f4ddc6e868ea0b6f76aa0d52cd2b6e Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 19 Dec 2023 14:15:02 +0800 Subject: [PATCH 05/64] Adapt intel-extension-for-transformers 1.3 API change Signed-off-by: Cheng, Penghui --- optimum/intel/neural_compressor/quantization.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index f2d95cbc53..b4c8170cde 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -59,6 +59,7 @@ _ipex_version, _neural_compressor_version, is_intel_extension_for_transformers_available, + is_intel_extension_for_transformers_version, is_ipex_version, is_neural_compressor_version, ) @@ -79,7 +80,10 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model - from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig + if is_intel_extension_for_transformers_version("<=", "1.2.2"): + from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig + else: + from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig logger = logging.getLogger(__name__) From 9ebc5a93d85f6e91a3e4e79cb33eac93aa27cd2b Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Mon, 16 Oct 2023 23:46:04 +0800 Subject: [PATCH 06/64] Support weight-only quantization with quantized operators in intel-extension-for-transformers --- .../neural_compressor/language-modeling/run_clm.py | 9 ++------- optimum/intel/neural_compressor/__init__.py | 2 +- optimum/intel/neural_compressor/configuration.py | 5 +++-- optimum/intel/neural_compressor/quantization.py | 11 +++++------ tests/neural_compressor/test_optimization.py | 2 +- 5 files changed, 12 insertions(+), 17 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 83d16ede20..36422bb024 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -33,7 +33,7 @@ import torch import transformers from datasets import load_dataset -from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig +from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig from neural_compressor import ( DistillationConfig, PostTrainingQuantConfig, @@ -735,12 +735,7 @@ def compute_metrics(eval_preds): ) trainer.model = quantizer._quantized_model - # TODO: Weight only quantization didn't support save/load function now. Will implement it soon. - if ( - optim_args.apply_quantization - and optim_args.verify_loading - and optim_args.quantization_approach != "weight_only" - ): + if optim_args.apply_quantization and optim_args.verify_loading: loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) tokens = tokenizer("This is a sample input", return_tensors="pt") with torch.no_grad(): diff --git a/optimum/intel/neural_compressor/__init__.py b/optimum/intel/neural_compressor/__init__.py index cb5621a333..90a74ee258 100644 --- a/optimum/intel/neural_compressor/__init__.py +++ b/optimum/intel/neural_compressor/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. from ..utils.import_utils import is_diffusers_available -from .configuration import INCConfig +from .configuration import INCConfig, WeightOnlyQuantConfig from .modeling_base import ( INCModel, INCModelForMaskedLM, diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index 9990cabda8..b9548ffa69 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -14,6 +14,7 @@ from typing import Dict, Optional, Union +from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig from neural_compressor.config import DistillationConfig, WeightPruningConfig, _BaseQuantizationConfig from optimum.configuration_utils import BaseConfig @@ -35,7 +36,7 @@ class INCConfig(BaseConfig): def __init__( self, - quantization=None, + quantization: Optional[Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]] = None, pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None, distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None, save_onnx_model: bool = False, @@ -50,7 +51,7 @@ def __init__( self.save_onnx_model = save_onnx_model @staticmethod - def _create_quantization_config(config): + def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]): # TODO : add activations_dtype and weights_dtype if isinstance(config, _BaseQuantizationConfig): approach = _quantization_model[config.approach] diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index b4c8170cde..41467327e7 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -59,7 +59,6 @@ _ipex_version, _neural_compressor_version, is_intel_extension_for_transformers_available, - is_intel_extension_for_transformers_version, is_ipex_version, is_neural_compressor_version, ) @@ -80,10 +79,7 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model - if is_intel_extension_for_transformers_version("<=", "1.2.2"): - from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig - else: - from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig + from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig logger = logging.getLogger(__name__) @@ -186,7 +182,7 @@ def quantize( save_directory.mkdir(parents=True, exist_ok=True) save_onnx_model = kwargs.pop("save_onnx_model", False) - if save_onnx_model and isinstance(self._original_model, ORTModel): + if save_onnx_model and (isinstance(self._original_model, ORTModel) or weight_only): save_onnx_model = False logger.warning("Model provided is an ONNX model, `save_onnx_model` is set to False") @@ -278,6 +274,9 @@ def quantize( if isinstance(quantization_config, WeightOnlyQuantConfig): self._quantized_model = convert_to_quantized_model(self._original_model, quantization_config) + # Save the quantized model + output_path = save_directory.joinpath(file_name or default_name) + self._quantized_model.save_pretrained(output_path) else: if isinstance(self._original_model.config, PretrainedConfig): self._original_model.config.backend = quantization_config.backend diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index 408a11f217..6c5fc007c1 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -56,7 +56,7 @@ INCSeq2SeqTrainer, INCStableDiffusionPipeline, ) -from intel_extension_for_transformers.transformers.utils.quantization_config import WeightOnlyQuantConfig +from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig from optimum.intel.utils.constant import DIFFUSION_WEIGHTS_NAME from optimum.onnxruntime import ORTModelForCausalLM, ORTModelForSequenceClassification from optimum.pipelines import ORT_SUPPORTED_TASKS From d0f1c71e262000f773a2acf446f2db29790259e1 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Thu, 19 Oct 2023 14:54:19 +0800 Subject: [PATCH 07/64] Update code --- optimum/intel/neural_compressor/__init__.py | 2 +- optimum/intel/neural_compressor/configuration.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/optimum/intel/neural_compressor/__init__.py b/optimum/intel/neural_compressor/__init__.py index 90a74ee258..cb5621a333 100644 --- a/optimum/intel/neural_compressor/__init__.py +++ b/optimum/intel/neural_compressor/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. from ..utils.import_utils import is_diffusers_available -from .configuration import INCConfig, WeightOnlyQuantConfig +from .configuration import INCConfig from .modeling_base import ( INCModel, INCModelForMaskedLM, diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index b9548ffa69..9990cabda8 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -14,7 +14,6 @@ from typing import Dict, Optional, Union -from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig from neural_compressor.config import DistillationConfig, WeightPruningConfig, _BaseQuantizationConfig from optimum.configuration_utils import BaseConfig @@ -36,7 +35,7 @@ class INCConfig(BaseConfig): def __init__( self, - quantization: Optional[Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]] = None, + quantization=None, pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None, distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None, save_onnx_model: bool = False, @@ -51,7 +50,7 @@ def __init__( self.save_onnx_model = save_onnx_model @staticmethod - def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]): + def _create_quantization_config(config): # TODO : add activations_dtype and weights_dtype if isinstance(config, _BaseQuantizationConfig): approach = _quantization_model[config.approach] From ed873c96951bdb510702ea251bf195a3f8aa5f11 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 16 Jan 2024 17:57:51 +0800 Subject: [PATCH 08/64] rebase code on main branch Signed-off-by: Cheng, Penghui --- examples/neural_compressor/language-modeling/README.md | 2 +- examples/neural_compressor/language-modeling/requirements.txt | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/neural_compressor/language-modeling/README.md b/examples/neural_compressor/language-modeling/README.md index b8b74e8041..2b22688c1d 100644 --- a/examples/neural_compressor/language-modeling/README.md +++ b/examples/neural_compressor/language-modeling/README.md @@ -97,4 +97,4 @@ respectively `dynamic`, `static`, `weight_only` or `aware_training`. The flag `--verify_loading` can be passed along to verify that the resulting quantized model can be loaded correctly. -> **_Note:_** `weight_only` quantization_approach requires neural-compressor >= 2.3 and intel-extension-for-transformers >= 1.2. +> **_Note:_** `weight_only` quantization_approach requires neural-compressor >= 2.3 and intel-extension-for-transformers >= 1.3. diff --git a/examples/neural_compressor/language-modeling/requirements.txt b/examples/neural_compressor/language-modeling/requirements.txt index a14c910510..0e71b6fc46 100644 --- a/examples/neural_compressor/language-modeling/requirements.txt +++ b/examples/neural_compressor/language-modeling/requirements.txt @@ -3,5 +3,5 @@ torch >= 1.9 datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -intel-extension-for-transformers +intel-extension-for-transformers >=1.3 peft diff --git a/setup.py b/setup.py index a9bb25b19b..921656eec3 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ "rjieba", "timm", "invisible-watermark>=0.2.0", - "intel-extension-for-transformers", + "intel-extension-for-transformers>=1.3", "peft", ] From de190fd650e043b4cc0c8a2048b136bac43f34e0 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 17 Jan 2024 08:41:41 +0800 Subject: [PATCH 09/64] Update example Signed-off-by: Cheng, Penghui --- examples/neural_compressor/language-modeling/run_clm.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 36422bb024..f3c1b44e57 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -717,11 +717,10 @@ def compute_metrics(eval_preds): if optim_args.apply_quantization and optim_args.quantization_approach in {"static", "dynamic", "weight_only"}: model = trainer.model if isinstance(trainer.model, PreTrainedModel) else trainer.model._model quantizer = INCQuantizer.from_pretrained(model) - if optim_args.quantization_approach != "dynamic": + if optim_args.quantization_approach in ["static", "weight_only"]: num_calibration_samples = min(len(train_dataset), optim_args.num_calibration_samples) train_dataset = train_dataset.select(range(num_calibration_samples)) - if optim_args.quantization_approach == "static": - quantization_config.calibration_sampling_size = num_calibration_samples + quantization_config.calibration_sampling_size = num_calibration_samples quantizer.quantize( quantization_config=quantization_config, From 416b528bf52f3a43043c04304d653a48c9e4bcea Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 14:56:06 +0800 Subject: [PATCH 10/64] Update optimum/intel/neural_compressor/quantization.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/neural_compressor/quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 41467327e7..53b269957a 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -209,7 +209,7 @@ def quantize( elif isinstance(quantization_config, WeightOnlyQuantConfig): algo = quantization_config.algorithm else: - raise ValueError("Weight-only quantization needs a object of WeightOnlyQuantConfig.") + raise TypeError(f"For weight-only quantization, `quantization_config` should be an instance of `WeightOnlyQuantConfig`, but got: {type(quantization_config)} instead.") if calibration_dataset is None and ("GPTQ" in algo or "AWQ" in algo): raise ValueError( From d62964a360009ab63c4fcb2c970656aa07ababb6 Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Wed, 21 Feb 2024 12:49:33 +0400 Subject: [PATCH 11/64] [OV]: Fixed inference after 4 bit weight compression (#569) * [OV]: Fixed inferece after 4 bit weight compression * Fixed issue * Update optimum/intel/openvino/modeling_decoder.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> * Applied comments * Fixed issue when request is None --------- Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/openvino/modeling_decoder.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 08165578f0..8bcf877bff 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -419,7 +419,8 @@ def prepare_inputs( # past_key_values are not used explicitly, instead they are handled inside the model if past_key_values is None: # This is the first iteration in a sequence, reset all states - self.request.reset_state() + if self.request is not None: + self.request.reset_state() # Set initial value for the next beam_idx input that will be used at the current iteration # and will be optionally updated by _reorder_cache at the next iterations if beam_search is used self.next_beam_idx = np.arange(batch_size, dtype=int) @@ -592,7 +593,10 @@ def _from_pretrained( else: init_cls = cls - causal_model = init_cls(model=model, config=config, model_save_dir=model_cache_path.parent, **kwargs) + enable_compilation = kwargs.pop("compile", True) and not load_in_4bit + causal_model = init_cls( + model=model, config=config, model_save_dir=model_cache_path.parent, compile=enable_compilation, **kwargs + ) if load_in_4bit: if not is_nncf_available(): From 65d5a977c5576d1d460414b4a657ca13e030f74f Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Wed, 21 Feb 2024 17:10:49 +0400 Subject: [PATCH 12/64] Updated docs with load_in_4bit (#558) * Updated docs with load_in_4bit * Update documentation * Update documentation * typo --------- Co-authored-by: Ella Charlaix --- docs/source/optimization_ov.mdx | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 09986961ba..0b653cf726 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -74,19 +74,16 @@ model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) > **NOTE:** `load_in_8bit` is enabled by default for models larger than 1 billion parameters. -For the 4-bit weight quantization we recommend using the NNCF API like below: +For the 4-bit weight quantization you can use the `quantization_config` to specify the optimization parameters, for example: + ```python -from optimum.intel import OVModelForCausalLM -import nncf - -model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=False) -model.model = nncf.compress_weights( - model.model, - mode=nncf.CompressWeightsMode.INT4_SYM, - ratio=0.8, - group_size=128, - ) -model.save_pretrained("compressed_model") +from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig + +model = OVModelForCausalLM.from_pretrained( + model_id, + export=True, + quantization_config=OVWeightQuantizationConfig(bits=4, sym=False, ratio=0.8, dataset="ptb"), +) ``` For more details, please refer to the corresponding NNCF [documentation](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/CompressWeights.md). From bddd2030d4f8b3a183c57ca92b3b046f27a2fa8c Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Wed, 21 Feb 2024 16:19:22 +0100 Subject: [PATCH 13/64] Update Transformers dependency requirements (#571) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index acd75896f9..a61a59f5a9 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ INSTALL_REQUIRE = [ "torch>=1.11", "optimum>=1.17.0", - "transformers>=4.26.0", + "transformers>=4.29.0,<4.38.0", "datasets>=1.4.0", "sentencepiece", "scipy", From 70a637314783ddb8677421fdba5d8f6e156da000 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Thu, 22 Feb 2024 15:47:51 +0100 Subject: [PATCH 14/64] Fix compatibility for latest transformers release (#570) * fix compatibility for latest transformers release * update setup * update setup * fix test input size * fix prepare generation for llama models --- optimum/intel/ipex/modeling_base.py | 72 +++++++++++++++++++++- optimum/intel/neural_compressor/trainer.py | 39 ++++++++++++ setup.py | 7 +-- tests/ipex/test_inference.py | 2 +- tests/ipex/test_modeling.py | 10 +-- tests/openvino/test_modeling.py | 13 ++-- 6 files changed, 126 insertions(+), 17 deletions(-) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 67810ae067..2b6b569343 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -46,7 +46,7 @@ from optimum.utils import NormalizedConfigManager from ..generation.modeling import jit_trace, prepare_jit_inputs -from ..utils.import_utils import is_torch_version +from ..utils.import_utils import is_torch_version, is_transformers_version from ..utils.modeling_utils import MULTI_QUERY_ATTN_MODELS, patch_decoder_attention_mask @@ -326,7 +326,8 @@ def __init__( # Perform the initial warmup at the end of __init__ super().__init__(model, config, model_save_dir=model_save_dir, warmup=False) - self.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.model_type)(config) + model_type = config.model_type.replace("_", "-") + self.normalized_config = NormalizedConfigManager.get_normalized_config_class(model_type)(config) self.model_dtype = kwargs.get("model_dtype", self.dtype) self.use_cache = "past_key_values" in self.input_names @@ -339,6 +340,7 @@ def __init__( ) config.is_decoder = True config.is_encoder_decoder = False + self.generation_config = GenerationConfig.from_model_config(config) try: self.model_cls = get_class_from_dynamic_module( @@ -347,7 +349,12 @@ def __init__( except AttributeError: self.model_cls = get_model_class(self.config, AutoModelForCausalLM._model_mapping) self._reorder_cache = self.model_cls._reorder_cache.__get__(self) - self.prepare_inputs_for_generation = self.model_cls.prepare_inputs_for_generation.__get__(self) + + if is_transformers_version(">=", "4.38.0") and model_type in {"llama", "phi", "persimmon"}: + self.prepare_inputs_for_generation = _prepare_inputs_for_generation_for_llama + else: + self.prepare_inputs_for_generation = self.model_cls.prepare_inputs_for_generation.__get__(self) + if hasattr(self.model_cls, "_convert_to_standard_cache"): self._convert_to_standard_cache = self.model_cls._convert_to_standard_cache if hasattr(self.model_cls, "_convert_to_bloom_cache"): @@ -430,3 +437,62 @@ def forward( past_key_values = outputs["past_key_values"] if self.use_cache else None return CausalLMOutputWithPast(logits=logits, past_key_values=past_key_values) + + +def _prepare_inputs_for_generation_for_llama( + input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs +): + from transformers.cache_utils import Cache + + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs diff --git a/optimum/intel/neural_compressor/trainer.py b/optimum/intel/neural_compressor/trainer.py index 4490bf27b2..fc20cdafeb 100644 --- a/optimum/intel/neural_compressor/trainer.py +++ b/optimum/intel/neural_compressor/trainer.py @@ -941,3 +941,42 @@ def get_model_sparsity(self): if self._compression_manager is not None: sparsity = self._compression_manager.model.report_sparsity()[-1] return sparsity + + def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval): + # TODO : can be removed once transformers >= v4.38.0 + if self.control.should_log and self.state.global_step > self._globalstep_last_logged: + if is_torch_tpu_available(): + xm.mark_step() + + logs: Dict[str, float] = {} + + # all_gather + mean() to get average loss over all processes + tr_loss_scalar = self._nested_gather(tr_loss).mean().item() + + # reset tr_loss to zero + tr_loss -= tr_loss + + logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) + logs["learning_rate"] = self._get_learning_rate() + + self._total_loss_scalar += tr_loss_scalar + self._globalstep_last_logged = self.state.global_step + self.store_flos() + + self.log(logs) + + metrics = None + if self.control.should_evaluate: + metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) + self._report_to_hp_search(trial, self.state.global_step, metrics) + + # Run delayed LR scheduler now that metrics are populated + if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + self.lr_scheduler.step(metrics[metric_to_check]) + + if self.control.should_save: + self._save_checkpoint(model, trial, metrics=metrics) + self.control = self.callback_handler.on_save(self.args, self.state, self.control) diff --git a/setup.py b/setup.py index a61a59f5a9..f1a72a52a8 100644 --- a/setup.py +++ b/setup.py @@ -13,8 +13,8 @@ INSTALL_REQUIRE = [ "torch>=1.11", - "optimum>=1.17.0", - "transformers>=4.29.0,<4.38.0", + "optimum~=1.17", + "transformers>=4.36.0,<4.39.0", "datasets>=1.4.0", "sentencepiece", "scipy", @@ -45,14 +45,11 @@ "neural-compressor>=2.2.0", "onnx", "onnxruntime<1.15.0", - "transformers>=4.34.0", ], "openvino": [ "openvino>=2023.3", "onnx", "onnxruntime", - "transformers>=4.36.0", - "optimum>=1.16.1", ], "openvino-tokenizers": ["openvino-tokenizers[transformers]"], "nncf": ["nncf>=2.8.1"], diff --git a/tests/ipex/test_inference.py b/tests/ipex/test_inference.py index 706b1ded5d..bc1890453d 100644 --- a/tests/ipex/test_inference.py +++ b/tests/ipex/test_inference.py @@ -115,7 +115,7 @@ def test_text_generation_pipeline_inference(self, model_arch): model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32, return_dict=False) model = model.eval() tokenizer = AutoTokenizer.from_pretrained(model_id) - inputs = "DeepSpeed is a machine learning framework for deep neural networks and deep reinforcement learning. It is written in C++ and is available for Linux, Mac OS X," + inputs = "This is a simple input" text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer) with torch.inference_mode(): output = text_generator(inputs) diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index 27a49f3e9b..ffc2ca6a89 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -67,7 +67,6 @@ "gptj": "hf-internal-testing/tiny-random-GPTJModel", "levit": "hf-internal-testing/tiny-random-LevitModel", "llama": "fxmarty/tiny-llama-fast-tokenizer", - "opt": "hf-internal-testing/tiny-random-OPTModel", "marian": "sshleifer/tiny-marian-en-de", "mbart": "hf-internal-testing/tiny-random-mbart", "mistral": "echarlaix/tiny-random-mistral", @@ -76,6 +75,8 @@ "mobilevit": "hf-internal-testing/tiny-random-mobilevit", "mpt": "hf-internal-testing/tiny-random-MptForCausalLM", "mt5": "stas/mt5-tiny-random", + "opt": "hf-internal-testing/tiny-random-OPTModel", + "phi": "hf-internal-testing/tiny-random-PhiForCausalLM", "resnet": "hf-internal-testing/tiny-random-resnet", "roberta": "hf-internal-testing/tiny-random-roberta", "roformer": "hf-internal-testing/tiny-random-roformer", @@ -199,7 +200,7 @@ def test_pipeline(self, model_arch): class IPEXModelForCausalLMTest(unittest.TestCase): SUPPORTED_ARCHITECTURES = ( "bart", - # "gpt_bigcode", + "gpt_bigcode", "blenderbot", "blenderbot-small", "bloom", @@ -208,8 +209,9 @@ class IPEXModelForCausalLMTest(unittest.TestCase): "gpt_neo", "gpt_neox", "llama", - # "mistral", - # "mpt", + "mistral", + # "phi", + "mpt", "opt", ) GENERATION_LENGTH = 100 diff --git a/tests/openvino/test_modeling.py b/tests/openvino/test_modeling.py index 5f3208fd58..2188b7061f 100644 --- a/tests/openvino/test_modeling.py +++ b/tests/openvino/test_modeling.py @@ -483,7 +483,7 @@ class OVModelForCausalLMIntegrationTest(unittest.TestCase): "gpt_neo", "gpt_neox", "llama", - "llama_gptq", + # "llama_gptq", "marian", "mistral", "mpt", @@ -504,7 +504,7 @@ def test_compare_to_transformers(self, model_arch): ov_model = OVModelForCausalLM.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) self.assertIsInstance(ov_model.config, PretrainedConfig) self.assertTrue(ov_model.use_cache) - self.assertEqual(ov_model.stateful, self.IS_SUPPORT_STATEFUL and model_arch != "gpt_bigcode") + transformers_model = AutoModelForCausalLM.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) tokens = tokenizer( @@ -520,10 +520,15 @@ def test_compare_to_transformers(self, model_arch): self.assertIsInstance(ov_outputs.logits, torch.Tensor) self.assertTrue("past_key_values" in ov_outputs) self.assertIsInstance(ov_outputs.past_key_values, tuple) - if self.IS_SUPPORT_STATEFUL and model_arch != "gpt_bigcode": + + is_stateful = ov_model.config.model_type not in {"gpt_bigcode", "llama"} and self.IS_SUPPORT_STATEFUL + self.assertEqual(ov_model.stateful, is_stateful) + if is_stateful: self.assertTrue(len(ov_outputs.past_key_values) == 1 and len(ov_outputs.past_key_values[0]) == 0) + with torch.no_grad(): transformers_outputs = transformers_model(**tokens) + # Compare tensor outputs self.assertTrue(torch.allclose(ov_outputs.logits, transformers_outputs.logits, atol=1e-4)) del transformers_model @@ -540,7 +545,7 @@ def test_pipeline(self, model_arch): model.half() model.compile() pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) - outputs = pipe("This is a sample", max_length=10) + outputs = pipe("This is a sample", max_length=20) self.assertEqual(pipe.device, model.device) self.assertTrue(all("This is a sample" in item["generated_text"] for item in outputs)) del pipe From 1437b1bdcba2caf27e78ffe1ca05e3d9f25565a6 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Tue, 27 Feb 2024 14:05:00 +0100 Subject: [PATCH 15/64] Deprecate compression options (#565) * deprecate compression options * style * fix configuration * Update CLI argument * update documentation * deprecate torch nn modules for ov quantizer * fix ov config for fp32 models * fix format * update documentation * Add check for configuration * fix ratio default value for SD models * add quantization_config argument for OVModel * remove commented line * Update docs/source/inference.mdx Co-authored-by: Alexander Kozlov * add default config for causal LM * fix warning message --------- Co-authored-by: Alexander Kozlov --- README.md | 2 +- docs/source/inference.mdx | 26 ++- docs/source/optimization_ov.mdx | 6 +- optimum/commands/export/openvino.py | 53 ++++++- optimum/exporters/openvino/__init__.py | 14 ++ optimum/exporters/openvino/__main__.py | 41 ++++- optimum/exporters/openvino/convert.py | 148 ++++++------------ optimum/intel/__init__.py | 14 +- optimum/intel/openvino/__init__.py | 3 +- optimum/intel/openvino/configuration.py | 37 +++-- optimum/intel/openvino/modeling.py | 22 ++- optimum/intel/openvino/modeling_base.py | 38 +++-- .../intel/openvino/modeling_base_seq2seq.py | 41 +++-- optimum/intel/openvino/modeling_decoder.py | 45 ++++-- optimum/intel/openvino/modeling_diffusion.py | 26 ++- optimum/intel/openvino/quantization.py | 29 ++-- .../utils/dummy_openvino_and_nncf_objects.py | 22 --- optimum/intel/utils/dummy_openvino_objects.py | 22 +++ 18 files changed, 362 insertions(+), 227 deletions(-) diff --git a/README.md b/README.md index 85f50d24e7..7b762cce26 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,7 @@ from optimum.intel import OVQuantizer, OVModelForSequenceClassification from transformers import AutoTokenizer, AutoModelForSequenceClassification model_id = "distilbert-base-uncased-finetuned-sst-2-english" -model = AutoModelForSequenceClassification.from_pretrained(model_id) +model = OVModelForSequenceClassification.from_pretrained(model_id, export=True) tokenizer = AutoTokenizer.from_pretrained(model_id) def preprocess_fn(examples, tokenizer): return tokenizer( diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx index f05edfaee2..a9ee5529da 100644 --- a/docs/source/inference.mdx +++ b/docs/source/inference.mdx @@ -47,6 +47,8 @@ Here we set the `task` to `text-generation-with-past`, with the `-with-past` suf optimum-cli export openvino --model local_path --task text-generation-with-past ov_model ``` +To export your model in fp16, you can add `--weight-format fp16` when exporting your model. + Once the model is exported, you can load the OpenVINO model using : ```python @@ -96,7 +98,7 @@ tokenizer.save_pretrained(save_directory) ### Weight-only quantization -You can also apply 8-bit or 4-bit weight quantization when exporting your model with the CLI: +You can also apply 8-bit or 4-bit weight quantization when exporting your model with the CLI by setting the `weight-format` argument to respectively `int8` or `int4`: ```bash optimum-cli export openvino --model gpt2 --weight-format int8 ov_model @@ -104,7 +106,15 @@ optimum-cli export openvino --model gpt2 --weight-format int8 ov_model This will result in the exported model linear and embedding layers to be quantized to INT8 or INT4, the activations will be kept in floating point precision. This type of optimization allows reducing the footprint and latency of LLMs. -This can also be done when loading your model by setting the `load_in_8bit` argument when calling the `from_pretrained()` method. +By default the quantization scheme will be [assymmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#asymmetric-quantization), to make it [symmetric](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md#symmetric-quantization) you can add `--sym`. + +For INT4 quantization you can also specify the following arguments : +* The `--group-size` parameter will define the group size to use for quantization, `-1` it will results in per-column quantization. +* The `--ratio` CLI parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`. + +Smaller `group_size` and `ratio` of usually improve accuracy at the sacrifice of the model size and inference latency. + +You can also apply 8-bit quantization on your model's weight when loading your model by setting the `load_in_8bit=True` argument when calling the `from_pretrained()` method. ```python from optimum.intel import OVModelForCausalLM @@ -114,18 +124,6 @@ model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) > **NOTE:** `load_in_8bit` is enabled by default for the models larger than 1 billion parameters. -There are also alternative compression options for a different performance-accuracy trade-off: - -| Option | Description | -|---------------------------------------------------------------------|-------------------| -| `fp16` | Float16 weights | -| `int8` | INT8 weights | -| `int4_sym_g128`, `int4_asym_g128`, `int4_sym_g64`, `int4_asym_g64`* | INT4 weights | - -*`sym` and `asym` stand for symmetric and asymmetric quantization, `g128` and `g64` means the group size `128` and `64` respectively. - -`--ratio` CLI parameter controls the ratio between 4-bit and 8-bit quantized layers and can also change performance-accuracy trade-off for the optimized model. It is valid only for INT4 quantization options. - To apply quantization on both weights and activations, you can use the `OVQuantizer`, more information in the [documentation](https://huggingface.co/docs/optimum/main/en/intel/optimization_ov#optimization). diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 0b653cf726..77dab40159 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -26,11 +26,11 @@ Here is how to apply static quantization on a fine-tuned DistilBERT: ```python from functools import partial -from transformers import AutoModelForSequenceClassification, AutoTokenizer -from optimum.intel import OVConfig, OVQuantizer +from transformers import AutoTokenizer +from optimum.intel import OVConfig, OVQuantizer, OVModelForSequenceClassification, model_id = "distilbert-base-uncased-finetuned-sst-2-english" -model = AutoModelForSequenceClassification.from_pretrained(model_id) +model = OVModelForSequenceClassification.from_pretrained(model_id, export=True) tokenizer = AutoTokenizer.from_pretrained(model_id) # The directory where the quantized model will be saved save_dir = "ptq_model" diff --git a/optimum/commands/export/openvino.py b/optimum/commands/export/openvino.py index 0f1f71d252..255e2a7e13 100644 --- a/optimum/commands/export/openvino.py +++ b/optimum/commands/export/openvino.py @@ -77,7 +77,7 @@ def parse_args_openvino(parser: "ArgumentParser"): optional_group.add_argument( "--weight-format", type=str, - choices=["fp32", "fp16", "int8", "int4_sym_g128", "int4_asym_g128", "int4_sym_g64", "int4_asym_g64"], + choices=["fp32", "fp16", "int8", "int4", "int4_sym_g128", "int4_asym_g128", "int4_sym_g64", "int4_asym_g64"], default=None, help=( "The weight format of the exporting model, e.g. f32 stands for float32 weights, f16 - for float16 weights, i8 - INT8 weights, int4_* - for INT4 compressed weights." @@ -86,12 +86,24 @@ def parse_args_openvino(parser: "ArgumentParser"): optional_group.add_argument( "--ratio", type=float, - default=0.8, + default=None, help=( "Compression ratio between primary and backup precision. In the case of INT4, NNCF evaluates layer sensitivity and keeps the most impactful layers in INT8" "precision (by default 20%% in INT8). This helps to achieve better accuracy after weight compression." ), ) + optional_group.add_argument( + "--sym", + action="store_true", + default=None, + help=("Whether to apply symmetric quantization"), + ) + optional_group.add_argument( + "--group-size", + type=int, + default=None, + help=("The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization."), + ) optional_group.add_argument( "--disable-stateful", action="store_true", @@ -132,6 +144,7 @@ def parse_args(parser: "ArgumentParser"): def run(self): from ...exporters.openvino.__main__ import main_export + from ...intel.openvino.configuration import _DEFAULT_4BIT_CONFIGS, OVConfig if self.args.fp16: logger.warning( @@ -144,6 +157,39 @@ def run(self): ) self.args.weight_format = "int8" + weight_format = self.args.weight_format or "fp32" + + ov_config = None + if weight_format in {"fp16", "fp32"}: + ov_config = OVConfig(dtype=weight_format) + else: + is_int8 = weight_format == "int8" + + # For int4 quantization if not parameter is provided, then use the default config if exist + if ( + not is_int8 + and self.args.ratio is None + and self.args.group_size is None + and self.args.sym is None + and self.args.model in _DEFAULT_4BIT_CONFIGS + ): + quantization_config = _DEFAULT_4BIT_CONFIGS[self.args.model] + else: + quantization_config = { + "bits": 8 if is_int8 else 4, + "ratio": 1 if is_int8 else (self.args.ratio or 0.8), + "sym": self.args.sym or False, + "group_size": -1 if is_int8 else self.args.group_size, + } + + if weight_format in {"int4_sym_g128", "int4_asym_g128", "int4_sym_g64", "int4_asym_g64"}: + logger.warning( + f"--weight-format {weight_format} is deprecated, possible choices are fp32, fp16, int8, int4" + ) + quantization_config["sym"] = "asym" not in weight_format + quantization_config["group_size"] = 128 if "128" in weight_format else 64 + ov_config = OVConfig(quantization_config=quantization_config) + # TODO : add input shapes main_export( model_name_or_path=self.args.model, @@ -153,8 +199,7 @@ def run(self): cache_dir=self.args.cache_dir, trust_remote_code=self.args.trust_remote_code, pad_token_id=self.args.pad_token_id, - compression_option=self.args.weight_format, - compression_ratio=self.args.ratio, + ov_config=ov_config, stateful=not self.args.disable_stateful, convert_tokenizer=self.args.convert_tokenizer, # **input_shapes, diff --git a/optimum/exporters/openvino/__init__.py b/optimum/exporters/openvino/__init__.py index 41b456abce..9664f6ae6d 100644 --- a/optimum/exporters/openvino/__init__.py +++ b/optimum/exporters/openvino/__init__.py @@ -1,3 +1,17 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from .__main__ import main_export from .convert import export, export_from_model, export_models, export_pytorch_via_onnx from .stateful import ensure_stateful_is_available, patch_stateful diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 18f650c2ad..24b65f9032 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -14,7 +14,7 @@ import logging from pathlib import Path -from typing import Any, Callable, Dict, Optional, Union +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union from requests.exceptions import ConnectionError as RequestsConnectionError from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizerBase @@ -41,6 +41,18 @@ ] +if TYPE_CHECKING: + from optimum.intel.openvino.configuration import OVConfig + +_COMPRESSION_OPTIONS = { + "int8": {"bits": 8}, + "int4_sym_g128": {"bits": 4, "sym": True, "group_size": 128}, + "int4_asym_g128": {"bits": 4, "sym": False, "group_size": 128}, + "int4_sym_g64": {"bits": 4, "sym": True, "group_size": 64}, + "int4_asym_g64": {"bits": 4, "sym": False, "group_size": 64}, +} + + logger = logging.getLogger(__name__) @@ -63,6 +75,7 @@ def main_export( fn_get_submodels: Optional[Callable] = None, compression_option: Optional[str] = None, compression_ratio: Optional[float] = None, + ov_config: "OVConfig" = None, stateful: bool = True, convert_tokenizer: bool = False, library_name: Optional[str] = None, @@ -137,6 +150,29 @@ def main_export( >>> main_export("gpt2", output="gpt2_onnx/") ``` """ + + if compression_option is not None: + logger.warning( + "The `compression_option` argument is deprecated and will be removed in optimum-intel v1.17.0. " + "Please, pass an `ov_config` argument instead `OVConfig(..., quantization_config=quantization_config)`." + ) + + if compression_ratio is not None: + logger.warning( + "The `compression_ratio` argument is deprecated and will be removed in optimum-intel v1.17.0. " + "Please, pass an `ov_config` argument instead `OVConfig(quantization_config={ratio=compression_ratio})`." + ) + + if ov_config is None and compression_option is not None: + from ...intel.openvino.configuration import OVConfig + + if compression_option == "fp16": + ov_config = OVConfig(dtype="fp16") + elif compression_option != "fp32": + q_config = _COMPRESSION_OPTIONS[compression_option] if compression_option in _COMPRESSION_OPTIONS else {} + q_config["ratio"] = compression_ratio or 1.0 + ov_config = OVConfig(quantization_config=q_config) + original_task = task task = TasksManager.map_from_synonym(task) framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, framework=framework) @@ -293,8 +329,7 @@ class StoreAttr(object): model=model, output=output, task=task, - compression_option=compression_option, - compression_ratio=compression_ratio, + ov_config=ov_config, stateful=stateful, model_kwargs=model_kwargs, custom_onnx_configs=custom_onnx_configs, diff --git a/optimum/exporters/openvino/convert.py b/optimum/exporters/openvino/convert.py index 876a20746f..5353912d48 100644 --- a/optimum/exporters/openvino/convert.py +++ b/optimum/exporters/openvino/convert.py @@ -18,7 +18,7 @@ import logging import os from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union from transformers import T5Tokenizer, T5TokenizerFast from transformers.utils import is_tf_available, is_torch_available @@ -71,42 +71,26 @@ from transformers.modeling_tf_utils import TFPreTrainedModel -def _save_model(model, path: str, compression_option: Optional[str] = None, compression_ratio: Optional[float] = None): - if compression_option is not None and compression_option != "fp16" and compression_option != "fp32": - if not is_nncf_available(): - raise ImportError( - "Quantization of the weights to int8 requires nncf, please install it with `pip install nncf`" - ) +if TYPE_CHECKING: + from optimum.intel.openvino.configuration import OVConfig + + +def _save_model(model, path: str, ov_config: Optional["OVConfig"] = None): + compress_to_fp16 = False + + if ov_config is not None: + if ov_config.quantization_config: + if not is_nncf_available(): + raise ImportError( + "Quantization of the weights to int8 requires nncf, please install it with `pip install nncf`" + ) + + from optimum.intel.openvino.quantization import _weight_only_quantization + + _weight_only_quantization(model, ov_config.quantization_config) + + compress_to_fp16 = ov_config.dtype == "fp16" - import nncf - - COMPRESSION_OPTIONS = { - "int8": {"mode": nncf.CompressWeightsMode.INT8}, - "int4_sym_g128": { - "mode": nncf.CompressWeightsMode.INT4_SYM, - "group_size": 128, - "ratio": compression_ratio, - }, - "int4_asym_g128": { - "mode": nncf.CompressWeightsMode.INT4_ASYM, - "group_size": 128, - "ratio": compression_ratio, - }, - "int4_sym_g64": { - "mode": nncf.CompressWeightsMode.INT4_SYM, - "group_size": 64, - "ratio": compression_ratio, - }, - "int4_asym_g64": { - "mode": nncf.CompressWeightsMode.INT4_ASYM, - "group_size": 64, - "ratio": compression_ratio, - }, - } - - model = nncf.compress_weights(model, **COMPRESSION_OPTIONS[compression_option]) - - compress_to_fp16 = compression_option == "fp16" save_model(model, path, compress_to_fp16) @@ -118,8 +102,7 @@ def export( device: str = "cpu", input_shapes: Optional[Dict] = None, model_kwargs: Optional[Dict[str, Any]] = None, - compression_option: Optional[str] = None, - compression_ratio: Optional[float] = None, + ov_config: Optional["OVConfig"] = None, stateful: bool = True, ) -> Tuple[List[str], List[str]]: """ @@ -137,11 +120,8 @@ def export( device (`str`, *optional*, defaults to `cpu`): The device on which the model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for export on CUDA devices. - compression_option (`Optional[str]`, defaults to `None`): - The weight compression option, e.g. `f16` stands for float16 weights, `i8` - INT8 weights, `int4_sym_g128` - INT4 symmetric weights w/ group size 128, `int4_asym_g128` - as previous but asymmetric w/ zero-point, - `int4_sym_g64` - INT4 symmetric weights w/ group size 64, "int4_asym_g64" - as previous but asymmetric w/ zero-point. - compression_ratio (`Optional[float]`, defaults to `None`): - Compression ratio between primary and backup precision (only relevant to INT4). + ov_config (`OVConfig`, *optional*): + The configuration containing the parameters related to quantization. input_shapes (`Optional[Dict]`, defaults to `None`): If specified, allows to use specific shapes for the example input provided to the exporter. stateful (`bool`, defaults to `True`): @@ -172,8 +152,7 @@ def export( output, device=device, input_shapes=input_shapes, - compression_option=compression_option, - compression_ratio=compression_ratio, + ov_config=ov_config, model_kwargs=model_kwargs, stateful=stateful, ) @@ -186,9 +165,7 @@ def export( raise RuntimeError("`tf2onnx` does not support export on CUDA device.") if input_shapes is not None: logger.info("`input_shapes` argument is not supported by the Tensorflow ONNX export and will be ignored.") - return export_tensorflow( - model, config, opset, output, compression_option=compression_option, compression_ratio=compression_ratio - ) + return export_tensorflow(model, config, opset, output, ov_config=ov_config) else: raise RuntimeError( @@ -201,8 +178,7 @@ def export_tensorflow( config: OnnxConfig, opset: int, output: Path, - compression_option: Optional[str] = None, - compression_ratio: Optional[float] = None, + ov_config: Optional["OVConfig"] = None, ): """ Export the TensorFlow model to OpenVINO format. @@ -221,9 +197,7 @@ def export_tensorflow( onnx_path = Path(output).with_suffix(".onnx") input_names, output_names = export_tensorflow_onnx(model, config, opset, onnx_path) ov_model = convert_model(str(onnx_path)) - _save_model( - ov_model, output.parent / output, compression_option=compression_option, compression_ratio=compression_ratio - ) + _save_model(ov_model, output.parent / output, ov_config=ov_config) return input_names, output_names, True @@ -235,8 +209,7 @@ def export_pytorch_via_onnx( device: str = "cpu", input_shapes: Optional[Dict] = None, model_kwargs: Optional[Dict[str, Any]] = None, - compression_option: Optional[str] = None, - compression_ratio: Optional[float] = None, + ov_config: Optional["OVConfig"] = None, ): """ Exports a PyTorch model to an OpenVINO Intermediate Representation via ONNX export. @@ -257,11 +230,8 @@ def export_pytorch_via_onnx( If specified, allows to use specific shapes for the example input provided to the exporter. model_kwargs (optional[Dict[str, Any]], defaults to `None`): Additional kwargs for model export. - compression_option (`Optional[str]`, defaults to `None`): - The weight compression option, e.g. `f16` stands for float16 weights, `i8` - INT8 weights, `int4_sym_g128` - INT4 symmetric weights w/ group size 128, `int4_asym_g128` - as previous but asymmetric w/ zero-point, - `int4_sym_g64` - INT4 symmetric weights w/ group size 64, "int4_asym_g64" - as previous but asymmetric w/ zero-point. - compression_ratio (`Optional[float]`, defaults to `None`): - Compression ratio between primary and backup precision (only relevant to INT4). + ov_config (`OVConfig`, *optional*): + The configuration containing the parameters related to quantization. Returns: `Tuple[List[str], List[str], bool]`: A tuple with an ordered list of the model's inputs, and the named inputs from @@ -280,12 +250,7 @@ def export_pytorch_via_onnx( ) torch.onnx.export = orig_torch_onnx_export ov_model = convert_model(str(onnx_output)) - _save_model( - ov_model, - output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output, - compression_option=compression_option, - compression_ratio=compression_ratio, - ) + _save_model(ov_model, output.parent / OV_XML_FILE_NAME if output.suffix != ".xml" else output, ov_config=ov_config) return input_names, output_names, True @@ -297,8 +262,7 @@ def export_pytorch( device: str = "cpu", input_shapes: Optional[Dict] = None, model_kwargs: Optional[Dict[str, Any]] = None, - compression_option: Optional[str] = None, - compression_ratio: Optional[float] = None, + ov_config: Optional["OVConfig"] = None, stateful: bool = False, ) -> Tuple[List[str], List[str]]: """ @@ -320,11 +284,8 @@ def export_pytorch( If specified, allows to use specific shapes for the example input provided to the exporter. model_kwargs (optional[Dict[str, Any]], defaults to `None`): Additional kwargs for model export - compression_option (`Optional[str]`, defaults to `None`): - The weight compression option, e.g. `f16` stands for float16 weights, `i8` - INT8 weights, `int4_sym_g128` - INT4 symmetric weights w/ group size 128, `int4_asym_g128` - as previous but asymmetric w/ zero-point, - `int4_sym_g64` - INT4 symmetric weights w/ group size 64, "int4_asym_g64" - as previous but asymmetric w/ zero-point. - compression_ratio (`Optional[float]`, defaults to `None`): - Compression ratio between primary and backup precision (only relevant to INT4). + ov_config (`OVConfig`, *optional*): + The configuration containing the parameters related to quantization. stateful (`bool`, defaults to `False`): Produce stateful model where all kv-cache inputs and outputs are hidden in the model and are not exposed as model inputs and outputs. Applicable only for decoder models. @@ -422,8 +383,7 @@ def ts_patched_forward(*args, **kwargs): device, input_shapes, model_kwargs, - compression_option=compression_option, - compression_ratio=compression_ratio, + ov_config=ov_config, ) sig = inspect.signature(model.forward) if hasattr(model, "forward") else inspect.signature(model.call) @@ -450,7 +410,7 @@ def ts_patched_forward(*args, **kwargs): if stateful: patch_stateful(model.config, ov_model) - _save_model(ov_model, output, compression_option=compression_option, compression_ratio=compression_ratio) + _save_model(ov_model, output, ov_config=ov_config) clear_class_registry() del model gc.collect() @@ -467,8 +427,7 @@ def export_models( device: str = "cpu", input_shapes: Optional[Dict] = None, model_kwargs: Optional[Dict[str, Any]] = None, - compression_option: Optional[str] = None, - compression_ratio: Optional[int] = None, + ov_config: Optional["OVConfig"] = None, stateful: bool = True, ) -> Tuple[List[List[str]], List[List[str]]]: """ @@ -484,11 +443,8 @@ def export_models( export on CUDA devices. input_shapes (Optional[Dict], optional, Defaults to None): If specified, allows to use specific shapes for the example input provided to the exporter. - compression_option (`Optional[str]`, defaults to `None`): - The weight compression option, e.g. `f16` stands for float16 weights, `i8` - INT8 weights, `int4_sym_g128` - INT4 symmetric weights w/ group size 128, `int4_asym_g128` - as previous but asymmetric w/ zero-point, - `int4_sym_g64` - INT4 symmetric weights w/ group size 64, "int4_asym_g64" - as previous but asymmetric w/ zero-point. - compression_ratio (`Optional[int]`, defaults to `None`): - Compression ratio between primary and backup precision (only relevant to INT4). + ov_config (`OVConfig`, *optional*): + The configuration containing the parameters related to quantization. model_kwargs (Optional[Dict[str, Any]], optional): Additional kwargs for model export. stateful (`bool`, defaults to `True`) @@ -501,7 +457,6 @@ def export_models( list of input_names and output_names from ONNX configuration """ - # TODO : modify compression_option to quantization_config outputs = [] if output_names is not None and len(output_names) != len(models_and_onnx_configs): @@ -523,8 +478,7 @@ def export_models( device=device, input_shapes=input_shapes, model_kwargs=model_kwargs, - compression_option=compression_option, - compression_ratio=compression_ratio, + ov_config=ov_config, stateful=stateful, ) ) @@ -537,8 +491,7 @@ def export_from_model( model: Union["PreTrainedModel", "TFPreTrainedModel"], output: Union[str, Path], task: Optional[str] = None, - compression_option: Optional[str] = None, - compression_ratio: Optional[float] = None, + ov_config: Optional["OVConfig"] = None, stateful: bool = True, opset: Optional[int] = None, model_kwargs: Optional[Dict[str, Any]] = None, @@ -548,14 +501,9 @@ def export_from_model( device: str = "cpu", **kwargs_shapes, ): - if ( - compression_option is not None - and compression_option != "fp16" - and compression_option != "fp32" - and not is_nncf_available() - ): + if ov_config is not None and ov_config.quantization_config and not is_nncf_available(): raise ImportError( - f"Compression of the weights to {compression_option} requires nncf, please install it with `pip install nncf`" + f"Compression of the weights to {ov_config.quantization_config} requires nncf, please install it with `pip install nncf`" ) model_kwargs = model_kwargs or {} @@ -635,7 +583,7 @@ def export_from_model( legacy=False, ) - if compression_option is None: + if ov_config is None: if library_name == "diffusers": num_parameters = model.unet.num_parameters() else: @@ -643,7 +591,10 @@ def export_from_model( if num_parameters >= _MAX_UNCOMPRESSED_SIZE: if is_nncf_available(): - compression_option = "int8" + from ...intel.openvino.configuration import OVConfig + + ov_config = OVConfig(quantization_config={"bits": 8}) + logger.info("The model weights will be quantized to int8.") else: logger.warning( @@ -697,8 +648,7 @@ def export_from_model( output_names=files_subpaths, input_shapes=input_shapes, device=device, - compression_option=compression_option, - compression_ratio=compression_ratio, + ov_config=ov_config, stateful=stateful, opset=opset, model_kwargs=model_kwargs, diff --git a/optimum/intel/__init__.py b/optimum/intel/__init__.py index 478e3257bd..93a4417bfc 100644 --- a/optimum/intel/__init__.py +++ b/optimum/intel/__init__.py @@ -58,16 +58,12 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: _import_structure["utils.dummy_openvino_and_nncf_objects"] = [ - "OVConfig", "OVQuantizer", "OVTrainer", "OVTrainingArguments", - "OVWeightQuantizationConfig", ] else: - _import_structure["openvino"].extend( - ["OVConfig", "OVQuantizer", "OVTrainer", "OVTrainingArguments", "OVWeightQuantizationConfig"] - ) + _import_structure["openvino"].extend(["OVQuantizer", "OVTrainer", "OVTrainingArguments"]) try: if not (is_openvino_available() and is_diffusers_available()): @@ -119,6 +115,8 @@ "OVModelForSpeechSeq2Seq", "OVModelForSequenceClassification", "OVModelForTokenClassification", + "OVWeightQuantizationConfig", + "OVConfig", ] ) @@ -180,14 +178,12 @@ raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_openvino_and_nncf_objects import ( - OVConfig, OVQuantizer, OVTrainer, OVTrainingArguments, - OVWeightQuantizationConfig, ) else: - from .openvino import OVConfig, OVQuantizer, OVTrainer, OVTrainingArguments, OVWeightQuantizationConfig + from .openvino import OVQuantizer, OVTrainer, OVTrainingArguments try: if not (is_openvino_available() and is_diffusers_available()): @@ -218,6 +214,7 @@ from .utils.dummy_openvino_objects import * else: from .openvino import ( + OVConfig, OVModelForAudioClassification, OVModelForAudioFrameClassification, OVModelForAudioXVector, @@ -231,6 +228,7 @@ OVModelForSequenceClassification, OVModelForSpeechSeq2Seq, OVModelForTokenClassification, + OVWeightQuantizationConfig, ) try: diff --git a/optimum/intel/openvino/__init__.py b/optimum/intel/openvino/__init__.py index abb1f43775..a6227615a2 100644 --- a/optimum/intel/openvino/__init__.py +++ b/optimum/intel/openvino/__init__.py @@ -36,11 +36,12 @@ patch_torch_operators() - from .configuration import OVConfig, OVWeightQuantizationConfig from .quantization import OVQuantizer from .trainer import OVTrainer from .training_args import OVTrainingArguments + +from .configuration import OVConfig, OVWeightQuantizationConfig from .modeling import ( OVModelForAudioClassification, OVModelForAudioFrameClassification, diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index f8479dbbe4..6611e5d0dd 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -77,7 +77,7 @@ } -DEFAULT_4BIT_CONFIGS = { +_DEFAULT_4BIT_CONFIGS = { "databricks/dolly-v2-3b": {"bits": 4, "sym": False, "group_size": 32, "ratio": 0.5}, "EleutherAI/gpt-j-6b": {"bits": 4, "sym": False, "group_size": 64}, "facebook/opt-6.7b": {"bits": 4, "sym": False, "group_size": 64, "ratio": 0.8}, @@ -108,7 +108,8 @@ def __init__( compression: Union[List[Dict], Dict, None] = None, input_info: Optional[List] = None, save_onnx_model: bool = False, - quantization_config: Optional[QuantizationConfigMixin] = None, + quantization_config: Optional[Union[QuantizationConfigMixin, Dict]] = None, + dtype: Optional[str] = None, **kwargs, ): super().__init__() @@ -117,7 +118,13 @@ def __init__( self.save_onnx_model = save_onnx_model self._enable_standard_onnx_export_option() self.optimum_version = kwargs.pop("optimum_version", None) - self.quantization_config = quantization_config + self.quantization_config = quantization_config or {} + + if isinstance(quantization_config, QuantizationConfigMixin): + bits = self.quantization_config.bits + else: + bits = self.quantization_config.get("bits", None) + self.dtype = "int" + str(bits) if isinstance(bits, int) else dtype def add_input_info(self, model_inputs: Dict, force_batch_one: bool = False): self.input_info = [ @@ -130,8 +137,6 @@ def add_input_info(self, model_inputs: Dict, force_batch_one: bool = False): ] def save_pretrained(self, *args, **kwargs): - if self.quantization_config is None: - self.quantization_config = OVWeightQuantizationConfig() super().save_pretrained(*args, **kwargs) def _enable_standard_onnx_export_option(self): @@ -195,9 +200,9 @@ def __init__( self, bits: int = 8, sym: bool = False, - tokenizer: Any = None, + tokenizer: Optional[Any] = None, dataset: Optional[str] = None, - ratio: Optional[float] = None, + ratio: float = 1.0, group_size: Optional[int] = None, all_layers: Optional[bool] = None, sensitivity_metric: Optional[str] = None, @@ -208,7 +213,7 @@ def __init__( self.sym = sym self.tokenizer = tokenizer self.dataset = dataset - self.group_size = group_size + self.group_size = group_size or (-1 if bits == 8 else 128) self.ratio = ratio self.all_layers = all_layers self.sensitivity_metric = sensitivity_metric @@ -221,9 +226,9 @@ def post_init(self): Safety checker that arguments are correct """ if self.ratio is not None and not (0 <= self.ratio <= 1): - raise ValueError("damp_percent must between 0 and 1.") + raise ValueError("`ratio` must between 0 and 1.") if self.group_size is not None and self.group_size != -1 and self.group_size <= 0: - raise ValueError("group_size must be greater than 0 or equal to -1") + raise ValueError("`group_size` must be greater than 0 or equal to -1") if self.dataset is not None and isinstance(self.dataset, str): if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: raise ValueError( @@ -234,6 +239,16 @@ def post_init(self): if self.bits not in [4, 8]: raise ValueError(f"Only support quantization to [4,8] bits but found {self.bits}") + if self.bits == 8: + if self.ratio != 1: + raise ValueError( + f"For 8-bit quantization, `ratio` is expected to be set to 1.0, but was set to {self.ratio}" + ) + if self.group_size != -1: + raise ValueError( + f"For 8-bit quantization, `group_size` is expected to be set to -1, but was set to {self.group_size}" + ) + def _check_default_4bit_configs(config: PretrainedConfig): - return DEFAULT_4BIT_CONFIGS.get(config.name_or_path, None) + return _DEFAULT_4BIT_CONFIGS.get(config.name_or_path, None) diff --git a/optimum/intel/openvino/modeling.py b/optimum/intel/openvino/modeling.py index 558cc3b904..7831305d5f 100644 --- a/optimum/intel/openvino/modeling.py +++ b/optimum/intel/openvino/modeling.py @@ -16,7 +16,7 @@ import os from pathlib import Path from tempfile import TemporaryDirectory -from typing import Optional, Union +from typing import Dict, Optional, Union import numpy as np import openvino @@ -53,6 +53,7 @@ from ...exporters.openvino import main_export from ..utils.import_utils import is_timm_available, is_timm_version +from .configuration import OVConfig, OVWeightQuantizationConfig from .modeling_base import OVBaseModel from .utils import _is_timm_ov_dir @@ -427,14 +428,17 @@ def _from_transformers( task: Optional[str] = None, trust_remote_code: bool = False, load_in_8bit: Optional[bool] = None, - load_in_4bit: Optional[bool] = None, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit is not specified then compression_option should be set to None and will be set by default in main_export depending on the model size - compression_option = "fp32" if load_in_8bit is not None else None + # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None or not quantization_config: + ov_config = None + else: + ov_config = OVConfig(dtype="fp32") # OVModelForFeatureExtraction works with Transformers type of models, thus even sentence-transformers models are loaded as such. main_export( @@ -448,12 +452,18 @@ def _from_transformers( local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, - compression_option=compression_option, + ov_config=ov_config, library_name="transformers", ) config.save_pretrained(save_dir_path) - return cls._from_pretrained(model_id=save_dir_path, config=config, load_in_8bit=load_in_8bit, **kwargs) + return cls._from_pretrained( + model_id=save_dir_path, + config=config, + load_in_8bit=load_in_8bit, + quantization_config=quantization_config, + **kwargs, + ) MASKED_LM_EXAMPLE = r""" diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index 094840c297..51633b0210 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -31,6 +31,7 @@ from ...exporters.openvino import export, main_export from ..utils.import_utils import is_nncf_available +from .configuration import OVConfig, OVWeightQuantizationConfig from .utils import ONNX_WEIGHTS_NAME, OV_XML_FILE_NAME, _print_compiled_model_properties @@ -91,7 +92,7 @@ def __init__( self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None @staticmethod - def load_model(file_name: Union[str, Path], load_in_8bit: bool = False): + def load_model(file_name: Union[str, Path], quantization_config: Union[OVWeightQuantizationConfig, Dict] = None): """ Loads the model. @@ -118,14 +119,15 @@ def fix_op_names_duplicates(model: openvino.runtime.Model): if file_name.suffix == ".onnx": model = fix_op_names_duplicates(model) # should be called during model conversion to IR - if load_in_8bit: + if quantization_config: if not is_nncf_available(): raise ImportError( "Quantization of the weights to int8 requires nncf, please install it with `pip install nncf`" ) - import nncf - model = nncf.compress_weights(model) + from optimum.intel.openvino.quantization import _weight_only_quantization + + model = _weight_only_quantization(model, quantization_config) return model @@ -155,6 +157,7 @@ def _from_pretrained( from_onnx: bool = False, local_files_only: bool = False, load_in_8bit: bool = False, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): """ @@ -199,7 +202,12 @@ def _from_pretrained( subfolder=subfolder, local_files_only=local_files_only, ) - model = cls.load_model(model_cache_path, load_in_8bit=load_in_8bit) + + # Give default quantization config if not provided and load_in_8bit=True + if load_in_8bit: + quantization_config = quantization_config or {"bits": 8} + + model = cls.load_model(model_cache_path, quantization_config=quantization_config) return cls(model, config=config, model_save_dir=model_cache_path.parent, **kwargs) @staticmethod @@ -252,6 +260,7 @@ def _from_transformers( task: Optional[str] = None, trust_remote_code: bool = False, load_in_8bit: Optional[bool] = None, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): """ @@ -275,10 +284,11 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit is not specified then compression_option should be set to None and will be set by default in main_export depending on the model size - compression_option = None - if load_in_8bit is not None: - compression_option = "fp32" + # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None or not quantization_config: + ov_config = None + else: + ov_config = OVConfig(dtype="fp32") main_export( model_name_or_path=model_id, @@ -291,11 +301,17 @@ def _from_transformers( local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, - compression_option=compression_option, + ov_config=ov_config, ) config.save_pretrained(save_dir_path) - return cls._from_pretrained(model_id=save_dir_path, config=config, load_in_8bit=load_in_8bit, **kwargs) + return cls._from_pretrained( + model_id=save_dir_path, + config=config, + load_in_8bit=load_in_8bit, + quantization_config=quantization_config, + **kwargs, + ) @classmethod def _to_load( diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index b6fc14e081..df9449b0b5 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -25,6 +25,7 @@ from transformers.file_utils import add_start_docstrings from ...exporters.openvino import main_export +from .configuration import OVConfig, OVWeightQuantizationConfig from .modeling_base import OVBaseModel from .utils import ( ONNX_DECODER_NAME, @@ -111,6 +112,7 @@ def _from_pretrained( use_cache: bool = True, from_onnx: bool = False, load_in_8bit: bool = False, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): """ @@ -152,12 +154,19 @@ def _from_pretrained( decoder_file_name = decoder_file_name or default_decoder_file_name decoder_with_past_file_name = decoder_with_past_file_name or default_decoder_with_past_file_name decoder_with_past = None + + # Give default quantization config if not provided and load_in_8bit=True + if load_in_8bit: + quantization_config = quantization_config or {"bits": 8} + # Load model from a local directory if os.path.isdir(model_id): - encoder = cls.load_model(os.path.join(model_id, encoder_file_name), load_in_8bit) - decoder = cls.load_model(os.path.join(model_id, decoder_file_name), load_in_8bit) + encoder = cls.load_model(os.path.join(model_id, encoder_file_name), quantization_config) + decoder = cls.load_model(os.path.join(model_id, decoder_file_name), quantization_config) if use_cache: - decoder_with_past = cls.load_model(os.path.join(model_id, decoder_with_past_file_name), load_in_8bit) + decoder_with_past = cls.load_model( + os.path.join(model_id, decoder_with_past_file_name), quantization_config + ) model_save_dir = Path(model_id) @@ -185,10 +194,10 @@ def _from_pretrained( file_names[name] = model_cache_path model_save_dir = Path(model_cache_path).parent - encoder = cls.load_model(file_names["encoder"], load_in_8bit) - decoder = cls.load_model(file_names["decoder"], load_in_8bit) + encoder = cls.load_model(file_names["encoder"], quantization_config) + decoder = cls.load_model(file_names["decoder"], quantization_config) if use_cache: - decoder_with_past = cls.load_model(file_names["decoder_with_past"], load_in_8bit) + decoder_with_past = cls.load_model(file_names["decoder_with_past"], quantization_config) return cls( encoder=encoder, @@ -214,6 +223,7 @@ def _from_transformers( use_cache: bool = True, trust_remote_code: bool = False, load_in_8bit: Optional[bool] = None, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): """ @@ -240,13 +250,15 @@ def _from_transformers( if task is None: task = cls.export_feature - if use_cache: task = task + "-with-past" - compression_option = None - if load_in_8bit is not None: - compression_option = "fp32" + # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None or not quantization_config: + ov_config = None + else: + ov_config = OVConfig(dtype="fp32") + main_export( model_name_or_path=model_id, output=save_dir_path, @@ -258,12 +270,17 @@ def _from_transformers( local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, - compression_option=compression_option, + ov_config=ov_config, ) config.save_pretrained(save_dir_path) return cls._from_pretrained( - model_id=save_dir_path, config=config, use_cache=use_cache, load_in_8bit=load_in_8bit, **kwargs + model_id=save_dir_path, + config=config, + use_cache=use_cache, + load_in_8bit=load_in_8bit, + quantization_config=quantization_config, + **kwargs, ) def _reshape(self, model: openvino.runtime.Model, batch_size: int, sequence_length: int, is_decoder=True): diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 8bcf877bff..c0274d3f5b 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -23,7 +23,7 @@ import torch from openvino.preprocess import PrePostProcessor from openvino.runtime import Core, Tensor, Type -from transformers import AutoModelForCausalLM, PretrainedConfig +from transformers import AutoModelForCausalLM, AutoTokenizer, PretrainedConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.generation import GenerationMixin from transformers.modeling_outputs import CausalLMOutputWithPast @@ -34,7 +34,7 @@ from ...exporters.openvino.stateful import model_has_state from ..utils.import_utils import is_nncf_available from ..utils.modeling_utils import MULTI_QUERY_ATTN_MODELS -from .configuration import OVWeightQuantizationConfig, _check_default_4bit_configs +from .configuration import _DEFAULT_4BIT_CONFIGS, OVConfig, OVWeightQuantizationConfig, _check_default_4bit_configs from .modeling import _TOKENIZER_FOR_DOC, INPUTS_DOCSTRING, MODEL_START_DOCSTRING, OVModel from .utils import ONNX_WEIGHTS_NAME, OV_XML_FILE_NAME, STR_TO_OV_TYPE @@ -252,16 +252,17 @@ def _from_transformers( if task is None: task = cls.export_feature - if use_cache: task = task + "-with-past" - # If load_in_8bit is not specified then compression_option should be set to None and will be set by default in main_export depending on the model size - compression_option = None - if load_in_8bit is not None or quantization_config is not None: - compression_option = "fp32" + # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None or not quantization_config: + ov_config = None + else: + ov_config = OVConfig(dtype="fp32") stateful = kwargs.pop("stateful", ensure_stateful_is_available(warn=False) and use_cache) + main_export( model_name_or_path=model_id, output=save_dir_path, @@ -273,7 +274,7 @@ def _from_transformers( local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, - compression_option=compression_option, + ov_config=ov_config, stateful=stateful, ) @@ -284,8 +285,8 @@ def _from_transformers( model_id=save_dir_path, config=config, use_cache=use_cache, - load_in_8bit=load_in_8bit, stateful=None, + load_in_8bit=load_in_8bit, quantization_config=quantization_config, **kwargs, ) @@ -575,11 +576,18 @@ def _from_pretrained( local_files_only=local_files_only, ) + # Give default quantization config if not provided and load_in_8bit=True + if load_in_8bit: + quantization_config = quantization_config or {"bits": 8} + if isinstance(quantization_config, dict): + if quantization_config == {"bits": 4} and config.name_or_path in _DEFAULT_4BIT_CONFIGS: + quantization_config = _DEFAULT_4BIT_CONFIGS[config.name_or_path] + quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config) load_in_4bit = quantization_config.bits == 4 if quantization_config else False - model = cls.load_model(model_cache_path, load_in_8bit=False if load_in_4bit else load_in_8bit) + model = cls.load_model(model_cache_path, quantization_config=None if load_in_4bit else quantization_config) model_type = config.model_type.replace("_", "-") if model_type == "bloom": @@ -603,6 +611,8 @@ def _from_pretrained( raise ImportError( "Quantization of the weights requires nncf, please install it with `pip install nncf`" ) + import nncf + from .quantization import _weight_only_quantization default_config = _check_default_4bit_configs(config) @@ -612,7 +622,20 @@ def _from_pretrained( f"For the given model, we recommend the following `quantization_config` : {default_config}" ) - _weight_only_quantization(causal_model, quantization_config) + if isinstance(quantization_config.dataset, str): + tokenizer = quantization_config.tokenizer or AutoTokenizer.from_pretrained(model_id) + + from optimum.gptq.data import get_dataset, prepare_dataset + + # from optimum.gptq.utils import get_seqlen + + # seqlen = get_seqlen(causal_model) + dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32) + dataset = prepare_dataset(dataset) + quantization_config.dataset = nncf.Dataset(dataset, lambda x: causal_model.prepare_inputs(**x)) + + _weight_only_quantization(model, quantization_config) + return causal_model diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 867354a543..5633f852a8 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -54,6 +54,7 @@ ) from ...exporters.openvino import main_export +from .configuration import OVConfig, OVWeightQuantizationConfig from .loaders import OVTextualInversionLoaderMixin from .modeling_base import OVBaseModel from .utils import ONNX_WEIGHTS_NAME, OV_TO_NP_TYPE, OV_XML_FILE_NAME, _print_compiled_model_properties @@ -193,6 +194,7 @@ def _from_pretrained( from_onnx: bool = False, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, load_in_8bit: bool = False, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): default_file_name = ONNX_WEIGHTS_NAME if from_onnx else OV_XML_FILE_NAME @@ -255,8 +257,12 @@ def _from_pretrained( else: kwargs[name] = load_method(new_model_save_dir) + # Give default quantization config if not provided and load_in_8bit=True + if load_in_8bit: + quantization_config = quantization_config or {"bits": 8} + unet = cls.load_model( - new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, load_in_8bit=load_in_8bit + new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config ) components = { @@ -267,7 +273,7 @@ def _from_pretrained( } for key, value in components.items(): - components[key] = cls.load_model(value, load_in_8bit=load_in_8bit) if value.is_file() else None + components[key] = cls.load_model(value, quantization_config) if value.is_file() else None if model_save_dir is None: model_save_dir = new_model_save_dir @@ -287,16 +293,19 @@ def _from_transformers( tokenizer: Optional["CLIPTokenizer"] = None, scheduler: Union["DDIMScheduler", "PNDMScheduler", "LMSDiscreteScheduler"] = None, feature_extractor: Optional["CLIPFeatureExtractor"] = None, - load_in_8bit: Optional[bool] = None, tokenizer_2: Optional["CLIPTokenizer"] = None, + load_in_8bit: Optional[bool] = None, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - compression_option = None - if load_in_8bit is not None: - compression_option = "int8" if load_in_8bit else "fp32" + # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None or not quantization_config: + ov_config = None + else: + ov_config = OVConfig(dtype="fp32") main_export( model_name_or_path=model_id, @@ -309,7 +318,7 @@ def _from_transformers( use_auth_token=use_auth_token, local_files_only=local_files_only, force_download=force_download, - compression_option=compression_option, + ov_config=ov_config, ) return cls._from_pretrained( @@ -326,7 +335,8 @@ def _from_transformers( tokenizer_2=tokenizer_2, scheduler=scheduler, feature_extractor=feature_extractor, - load_in_8bit=False, + load_in_8bit=load_in_8bit, + quantization_config=quantization_config, **kwargs, ) diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 5fd743c5b4..5ec4eac556 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -231,6 +231,9 @@ def quantize( ) ov_config = ov_config or quantization_config + if ov_config is not None and not isinstance(ov_config, OVConfig): + raise TypeError(f"`ov_config` should be an `OVConfig`, but got: {type(ov_config)} instead.") + if isinstance(self.model, OVBaseModel): self._quantize_ovbasemodel( calibration_dataset, @@ -244,6 +247,10 @@ def quantize( ) elif isinstance(self.model, torch.nn.Module): + logger.warning( + "The support of `torch.nn.Module` will be deprecated in a future release of optimum-intel, please use the corresponding `OVModelForXxx` class to load you model." + "To convert a PyTorch model to OpenVINO, you can set `export=True` when loading your model as `OVModelForXxx.from_pretrained(..., export=True)`" + ) self._quantize_torchmodel( calibration_dataset, save_directory, @@ -272,11 +279,10 @@ def _quantize_ovbasemodel( save_directory.mkdir(parents=True, exist_ok=True) if weights_only: + q_config = getattr(ov_config, "quantization_config", None) # Use default 8-bit compression if not provided - q_config = ( - OVWeightQuantizationConfig(bits=8, sym=True) if ov_config is None else ov_config.quantization_config - ) - _weight_only_quantization(self.model, q_config) + q_config = q_config or OVWeightQuantizationConfig(bits=8, sym=True) + _weight_only_quantization(self.model.model, q_config) self.model.save_pretrained(save_directory) return @@ -529,9 +535,9 @@ def _remove_unused_columns(self, dataset: "Dataset"): return dataset.remove_columns(ignored_columns) -def _weight_only_quantization(model: OVBaseModel, quantization_config: Union[OVWeightQuantizationConfig, Dict]): - ov_model = model.model - +def _weight_only_quantization( + model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict] +): config = quantization_config if isinstance(config, dict): config = OVWeightQuantizationConfig.from_dict(quantization_config) @@ -540,16 +546,13 @@ def _weight_only_quantization(model: OVBaseModel, quantization_config: Union[OVW if config.dataset is not None and isinstance(config.dataset, str): tokenizer = config.tokenizer - if tokenizer is None: - tokenizer = AutoTokenizer.from_pretrained(model.config.name_or_path) - elif isinstance(tokenizer, str): + if isinstance(tokenizer, str): tokenizer = AutoTokenizer.from_pretrained(tokenizer) from optimum.gptq.data import get_dataset, prepare_dataset dataset = get_dataset(config.dataset, tokenizer, seqlen=32) dataset = prepare_dataset(dataset) - dataset = nncf.Dataset(dataset, lambda x: model.prepare_inputs(**x)) sensitivity_metric = None if isinstance(config.sensitivity_metric, str): @@ -564,8 +567,8 @@ def _weight_only_quantization(model: OVBaseModel, quantization_config: Union[OVW else: mode = CompressWeightsMode.INT4_SYM if config.sym else CompressWeightsMode.INT4_ASYM - model.model = nncf.compress_weights( - ov_model, + return nncf.compress_weights( + model, mode=mode, ratio=config.ratio, group_size=config.group_size, diff --git a/optimum/intel/utils/dummy_openvino_and_nncf_objects.py b/optimum/intel/utils/dummy_openvino_and_nncf_objects.py index b940772207..45c390aff2 100644 --- a/optimum/intel/utils/dummy_openvino_and_nncf_objects.py +++ b/optimum/intel/utils/dummy_openvino_and_nncf_objects.py @@ -46,25 +46,3 @@ def __init__(self, *args, **kwargs): @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["openvino", "nncf"]) - - -class OVConfig(metaclass=DummyObject): - _backends = ["openvino", "nncf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["openvino", "nncf"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["openvino", "nncf"]) - - -class OVWeightQuantizationConfig(metaclass=DummyObject): - _backends = ["openvino", "nncf"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["openvino", "nncf"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["openvino", "nncf"]) diff --git a/optimum/intel/utils/dummy_openvino_objects.py b/optimum/intel/utils/dummy_openvino_objects.py index 9e17035d70..81a05d1eca 100644 --- a/optimum/intel/utils/dummy_openvino_objects.py +++ b/optimum/intel/utils/dummy_openvino_objects.py @@ -167,3 +167,25 @@ def __init__(self, *args, **kwargs): @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["openvino"]) + + +class OVConfig(metaclass=DummyObject): + _backends = ["openvino"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["openvino"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["openvino"]) + + +class OVWeightQuantizationConfig(metaclass=DummyObject): + _backends = ["openvino"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["openvino"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["openvino"]) From 32e8fa2971fdea8a1887ca9373f08ab8a9381019 Mon Sep 17 00:00:00 2001 From: Lyalyushkin Nikolay Date: Wed, 28 Feb 2024 17:51:39 +0100 Subject: [PATCH 16/64] Add default quantization int4 config for Mixtral-8x7B (#576) --- optimum/intel/openvino/configuration.py | 1 + 1 file changed, 1 insertion(+) diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 6611e5d0dd..9f3e3a06ca 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -96,6 +96,7 @@ "openlm-research/open_llama_3b": {"bits": 4, "sym": True, "group_size": 64, "all_layers": True}, "tiiuae/falcon-7b": {"bits": 4, "sym": True, "group_size": 64, "all_layers": True}, "psmathur/orca_mini_3b": {"bits": 4, "sym": True, "group_size": 64, "all_layers": True}, + "mistralai/Mixtral-8x7B-v0.1": {"bits": 4, "sym": True, "group_size": 128, "ratio": 0.8}, } From e3f009b016fc509aac4e9f40151f9724f6e33320 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Thu, 29 Feb 2024 09:24:39 +0100 Subject: [PATCH 17/64] Update stable diffusion example requirements (#579) --- examples/openvino/stable-diffusion/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/openvino/stable-diffusion/requirements.txt b/examples/openvino/stable-diffusion/requirements.txt index 8dd237913a..014b0645c8 100644 --- a/examples/openvino/stable-diffusion/requirements.txt +++ b/examples/openvino/stable-diffusion/requirements.txt @@ -1,5 +1,6 @@ accelerate diffusers torch~=1.13 -nncf @ git+https://github.com/openvinotoolkit/nncf.git +torchvision~=0.14 +nncf tomesd @ git+https://github.com/AlexKoff88/tomesd.git@openvino From 6621611e7e69d2e91a27bc18738d698dedb41c9f Mon Sep 17 00:00:00 2001 From: Nikita Savelyev Date: Fri, 1 Mar 2024 10:44:30 +0100 Subject: [PATCH 18/64] Fix collecting duplicate tensors in quantization calibration dataset (#577) * Added deepcopying of inputs collected by InferRequestWrapper. Added a test covering the fixed issue. * Phrasing tweaks * Add soundfile to test requirements * Added librosa to test requirements * Added copying to other data cache appends * Remove the need for real test data * Process __call__ call properly * Addressed suggested changes --- optimum/intel/openvino/quantization.py | 10 +++++-- tests/openvino/test_quantization.py | 40 ++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 5ec4eac556..331248e023 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import inspect import logging import os @@ -87,11 +88,14 @@ def __init__(self, request, data_cache=None): self.data_cache = data_cache def __call__(self, *args, **kwargs): - self.data_cache.append(*args) + # If __call__ is invoked then self.request must be an instance of CompiledModel + signature = inspect.signature(self.request) + bound_args = signature.bind(*args, **kwargs).arguments + self.data_cache.append(copy.deepcopy(bound_args["inputs"])) return self.request(*args, **kwargs) def infer(self, inputs: Any = None, share_inputs: bool = False): - self.data_cache.append(inputs) + self.data_cache.append(copy.deepcopy(inputs)) return self.request.infer(inputs, share_inputs) def start_async( @@ -102,7 +106,7 @@ def start_async( *, shared_memory: Any = None, ): - self.data_cache.append(inputs) + self.data_cache.append(copy.deepcopy(inputs)) self.request.infer(inputs, share_inputs, share_outputs=True) def wait(self): diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 07a9f14774..16f848da9e 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -16,10 +16,12 @@ import tempfile import unittest +from collections import defaultdict from functools import partial import evaluate import numpy as np +import torch from datasets import load_dataset from parameterized import parameterized import openvino.runtime as ov @@ -30,6 +32,7 @@ AutoModelForCausalLM, AutoModelForTokenClassification, AutoTokenizer, + AutoProcessor, TrainingArguments, default_data_collator, ) @@ -45,6 +48,7 @@ OVModelForSeq2SeqLM, OVModelForSequenceClassification, OVModelForTokenClassification, + OVModelForSpeechSeq2Seq, OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVQuantizer, @@ -54,6 +58,7 @@ from optimum.intel.openvino.configuration import INT8_WEIGHT_COMPRESSION_CONFIG +from optimum.intel.openvino.quantization import InferRequestWrapper from optimum.intel.utils.import_utils import is_openvino_version from utils_tests import MODEL_NAMES, get_num_quantized_nodes, _ARCHITECTURES_TO_EXPECTED_INT8 @@ -589,3 +594,38 @@ def compute_metrics(p): tokens = tokenizer("This is a sample input", return_tensors="pt") outputs = model(**tokens) self.assertTrue("logits" in outputs) + + +class InferRequestWrapperTest(unittest.TestCase): + MODEL_ID = ("openai/whisper-tiny.en",) + + @staticmethod + def _generate_random_audio_data(processor): + t = np.linspace(0, 1.0, int(1000), endpoint=False) + audio_data = 0.5 * np.sin((2 + np.random.random()) * np.pi * t) + input_features = processor( + audio_data, + sampling_rate=16000, + return_tensors="pt", + ).input_features + return input_features + + @parameterized.expand(MODEL_ID) + def test_calibration_data_uniqueness(self, model_id): + ov_model = OVModelForSpeechSeq2Seq.from_pretrained(model_id, export=True, compile=True) + processor = AutoProcessor.from_pretrained(model_id) + + calibration_data = [] + ov_model.decoder_with_past.request = InferRequestWrapper(ov_model.decoder_with_past.request, calibration_data) + for _ in range(2): + input_features = self._generate_random_audio_data(processor) + ov_model.generate(input_features) + + data_hashes_per_key = defaultdict(list) + for inputs_dict in calibration_data: + for k, v in inputs_dict.items(): + x = (v.numpy() if isinstance(v, torch.Tensor) else v).copy() + data_hashes_per_key[k].append(hash(x.tobytes())) + for k, data_hashes in data_hashes_per_key.items(): + # All hashes can not be equal because calibration dataset contains at least 2 different samples + self.assertTrue(any(data_hashes[0] != it for it in data_hashes)) From ca33bed5fdd49c40d9db4fc2600926d750a4d55d Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Fri, 1 Mar 2024 15:00:40 +0100 Subject: [PATCH 19/64] Save an openvino config summarizing all information related to quantization when saving model (#578) * fix doc * remove default compression value * set default compression config when not provided * save openvino config to include quantization configuration * fix style * add test * update setup * style * remove from quantization_config key from ov_config * add test * update setup * modify method name --- docs/source/optimization_ov.mdx | 3 - optimum/intel/openvino/configuration.py | 2 +- optimum/intel/openvino/modeling_base.py | 38 +++++++++-- .../intel/openvino/modeling_base_seq2seq.py | 11 +++- optimum/intel/openvino/modeling_decoder.py | 24 ++++--- optimum/intel/openvino/modeling_diffusion.py | 21 ++++-- optimum/intel/openvino/quantization.py | 11 ++-- optimum/intel/openvino/trainer.py | 66 ++++++++++--------- setup.py | 12 +--- tests/openvino/test_quantization.py | 31 ++++++--- 10 files changed, 139 insertions(+), 80 deletions(-) diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 77dab40159..5686af4bf3 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -38,8 +38,6 @@ save_dir = "ptq_model" def preprocess_function(examples, tokenizer): return tokenizer(examples["sentence"], padding="max_length", max_length=128, truncation=True) -# Load the default quantization configuration detailing the quantization we wish to apply -quantization_config = OVConfig() # Instantiate our OVQuantizer using the desired configuration quantizer = OVQuantizer.from_pretrained(model) # Create the calibration dataset used to perform static quantization @@ -52,7 +50,6 @@ calibration_dataset = quantizer.get_calibration_dataset( ) # Apply static quantization and export the resulting quantized model to OpenVINO IR format quantizer.quantize( - quantization_config=quantization_config, calibration_dataset=calibration_dataset, save_directory=save_dir, ) diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 9f3e3a06ca..8ddd005279 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -114,7 +114,7 @@ def __init__( **kwargs, ): super().__init__() - self.compression = compression or DEFAULT_QUANTIZATION_CONFIG + self.compression = compression self.input_info = input_info self.save_onnx_model = save_onnx_model self._enable_standard_onnx_export_option() diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index 51633b0210..af00f7a06e 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -57,6 +57,7 @@ def __init__( dynamic_shapes: bool = True, ov_config: Optional[Dict[str, str]] = None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, + quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, **kwargs, ): self.config = config @@ -91,6 +92,10 @@ def __init__( self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + self._openvino_config = None + if quantization_config: + self._openvino_config = OVConfig(quantization_config=quantization_config) + @staticmethod def load_model(file_name: Union[str, Path], quantization_config: Union[OVWeightQuantizationConfig, Dict] = None): """ @@ -143,6 +148,15 @@ def _save_pretrained(self, save_directory: Union[str, Path]): dst_path = os.path.join(save_directory, OV_XML_FILE_NAME) openvino.save_model(self.model, dst_path, compress_to_fp16=False) + self._save_openvino_config(save_directory) + + def _save_openvino_config(self, save_directory: Union[str, Path]): + if self._openvino_config is not None: + if not isinstance(self._openvino_config.quantization_config.dataset, (str, type(None))): + self._openvino_config.quantization_config.dataset = None + + self._openvino_config.save_pretrained(save_directory) + @classmethod def _from_pretrained( cls, @@ -203,12 +217,28 @@ def _from_pretrained( local_files_only=local_files_only, ) - # Give default quantization config if not provided and load_in_8bit=True - if load_in_8bit: - quantization_config = quantization_config or {"bits": 8} + quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) model = cls.load_model(model_cache_path, quantization_config=quantization_config) - return cls(model, config=config, model_save_dir=model_cache_path.parent, **kwargs) + return cls( + model, + config=config, + model_save_dir=model_cache_path.parent, + quantization_config=quantization_config, + **kwargs, + ) + + @staticmethod + def _prepare_weight_quantization_config( + quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, load_in_8bit: bool = False + ): + # Give default quantization config if not provided and load_in_8bit=True + if not quantization_config and load_in_8bit: + quantization_config = OVWeightQuantizationConfig(bits=8) + elif isinstance(quantization_config, dict): + quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config) + + return quantization_config @staticmethod def _cached_file( diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index df9449b0b5..3cb43e61b8 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -58,6 +58,7 @@ def __init__( dynamic_shapes: bool = True, ov_config: Optional[Dict[str, str]] = None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, + quantization_config: Union[OVWeightQuantizationConfig, Dict] = None, **kwargs, ): self.config = config @@ -76,6 +77,9 @@ def __init__( self.decoder_model = decoder self.decoder_with_past_model = decoder_with_past self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None + self._openvino_config = None + if quantization_config: + self._openvino_config = OVConfig(quantization_config=quantization_config) def _save_pretrained(self, save_directory: Union[str, Path]): """ @@ -96,6 +100,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]): dst_path = os.path.join(save_directory, dst_file_name) openvino.save_model(src_file, dst_path, compress_to_fp16=False) + self._save_openvino_config(save_directory) + @classmethod def _from_pretrained( cls, @@ -155,9 +161,7 @@ def _from_pretrained( decoder_with_past_file_name = decoder_with_past_file_name or default_decoder_with_past_file_name decoder_with_past = None - # Give default quantization config if not provided and load_in_8bit=True - if load_in_8bit: - quantization_config = quantization_config or {"bits": 8} + quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) # Load model from a local directory if os.path.isdir(model_id): @@ -205,6 +209,7 @@ def _from_pretrained( decoder_with_past=decoder_with_past, config=config, model_save_dir=model_save_dir, + quantization_config=quantization_config, **kwargs, ) diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index c0274d3f5b..92a2ce436d 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import logging import os from pathlib import Path @@ -100,6 +101,7 @@ def __init__( dynamic_shapes: bool = True, ov_config: Optional[Dict[str, str]] = None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, + quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, **kwargs, ): if not dynamic_shapes: @@ -117,6 +119,7 @@ def __init__( dynamic_shapes=False, ov_config=ov_config, model_save_dir=model_save_dir, + quantization_config=quantization_config, **kwargs, ) @@ -224,6 +227,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]): dst_path = os.path.join(save_directory, OV_XML_FILE_NAME) openvino.save_model(model_to_save, dst_path, compress_to_fp16=False) + self._save_openvino_config(save_directory) + @classmethod def _from_transformers( cls, @@ -576,15 +581,10 @@ def _from_pretrained( local_files_only=local_files_only, ) - # Give default quantization config if not provided and load_in_8bit=True - if load_in_8bit: - quantization_config = quantization_config or {"bits": 8} - - if isinstance(quantization_config, dict): - if quantization_config == {"bits": 4} and config.name_or_path in _DEFAULT_4BIT_CONFIGS: - quantization_config = _DEFAULT_4BIT_CONFIGS[config.name_or_path] + if isinstance(quantization_config, dict) and quantization_config == {"bits": 4}: + quantization_config = _DEFAULT_4BIT_CONFIGS.get(config.name_or_path, quantization_config) - quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config) + quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) load_in_4bit = quantization_config.bits == 4 if quantization_config else False model = cls.load_model(model_cache_path, quantization_config=None if load_in_4bit else quantization_config) @@ -603,7 +603,12 @@ def _from_pretrained( enable_compilation = kwargs.pop("compile", True) and not load_in_4bit causal_model = init_cls( - model=model, config=config, model_save_dir=model_cache_path.parent, compile=enable_compilation, **kwargs + model=model, + config=config, + model_save_dir=model_cache_path.parent, + compile=enable_compilation, + quantization_config=quantization_config, + **kwargs, ) if load_in_4bit: @@ -632,6 +637,7 @@ def _from_pretrained( # seqlen = get_seqlen(causal_model) dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32) dataset = prepare_dataset(dataset) + quantization_config = copy.deepcopy(quantization_config) quantization_config.dataset = nncf.Dataset(dataset, lambda x: causal_model.prepare_inputs(**x)) _weight_only_quantization(model, quantization_config) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 5633f852a8..1570a22457 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -87,6 +87,7 @@ def __init__( compile: bool = True, ov_config: Optional[Dict[str, str]] = None, model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None, + quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, **kwargs, ): self._internal_dict = config @@ -140,6 +141,10 @@ def __init__( self._internal_dict.pop("vae", None) + self._openvino_config = None + if quantization_config: + self._openvino_config = OVConfig(quantization_config=quantization_config) + def _save_pretrained(self, save_directory: Union[str, Path]): """ Saves the model to the OpenVINO IR format so that it can be re-loaded using the @@ -177,6 +182,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]): if self.tokenizer_2 is not None: self.tokenizer_2.save_pretrained(save_directory / "tokenizer_2") + self._save_openvino_config(save_directory) + @classmethod def _from_pretrained( cls, @@ -257,10 +264,7 @@ def _from_pretrained( else: kwargs[name] = load_method(new_model_save_dir) - # Give default quantization config if not provided and load_in_8bit=True - if load_in_8bit: - quantization_config = quantization_config or {"bits": 8} - + quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) unet = cls.load_model( new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config ) @@ -278,7 +282,14 @@ def _from_pretrained( if model_save_dir is None: model_save_dir = new_model_save_dir - return cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs) + return cls( + unet=unet, + config=config, + model_save_dir=model_save_dir, + quantization_config=quantization_config, + **components, + **kwargs, + ) @classmethod def _from_transformers( diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index 331248e023..d7b88f2be3 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -45,7 +45,7 @@ from ..utils.constant import _TASK_ALIASES from ..utils.import_utils import DATASETS_IMPORT_ERROR, is_datasets_available from ..utils.modeling_utils import get_model_device -from .configuration import OVConfig, OVWeightQuantizationConfig +from .configuration import DEFAULT_QUANTIZATION_CONFIG, OVConfig, OVWeightQuantizationConfig from .modeling_base import OVBaseModel from .utils import ( MAX_ONNX_OPSET, @@ -235,8 +235,11 @@ def quantize( ) ov_config = ov_config or quantization_config - if ov_config is not None and not isinstance(ov_config, OVConfig): - raise TypeError(f"`ov_config` should be an `OVConfig`, but got: {type(ov_config)} instead.") + if ov_config is not None: + if not isinstance(ov_config, OVConfig): + raise TypeError(f"`ov_config` should be an `OVConfig`, but got: {type(ov_config)} instead.") + elif ov_config.compression is None: + ov_config.compression = DEFAULT_QUANTIZATION_CONFIG if isinstance(self.model, OVBaseModel): self._quantize_ovbasemodel( @@ -355,7 +358,7 @@ def _quantize_torchmodel( logger.info( "No configuration describing the quantization process was provided, a default OVConfig will be generated." ) - ov_config = OVConfig() + ov_config = OVConfig(compression=DEFAULT_QUANTIZATION_CONFIG) onnx_file_name = ( ONNX_WEIGHTS_NAME if file_name is None and ov_config.save_onnx_model diff --git a/optimum/intel/openvino/trainer.py b/optimum/intel/openvino/trainer.py index 5c7d392292..b7d110c96a 100644 --- a/optimum/intel/openvino/trainer.py +++ b/optimum/intel/openvino/trainer.py @@ -89,7 +89,7 @@ from ..utils.constant import _TASK_ALIASES from ..utils.import_utils import is_transformers_version -from .configuration import OVConfig +from .configuration import DEFAULT_QUANTIZATION_CONFIG, OVConfig from .quantization import OVDataLoader from .training_args import OVTrainingArguments from .utils import ( @@ -225,37 +225,41 @@ def __init__( self.teacher.eval() self.compression_controller = None - if self.ov_config is not None and self.args.do_train: - self._set_task() - train_dataloader = self.get_train_dataloader() - model_inputs = next(iter(train_dataloader)) - for label_name in self.label_names: - model_inputs.pop(label_name) - force_batch_one = self._is_pruning_enabled() - self.ov_config.add_input_info(model_inputs, force_batch_one) - nncf_config = NNCFConfig.from_dict(self.ov_config.__dict__) - nncf_config.register_extra_structs( - [ - QuantizationRangeInitArgs(OVDataLoader(train_dataloader)), - BNAdaptationInitArgs(OVDataLoader(train_dataloader)), - ] - ) + if self.ov_config is not None: + if self.ov_config.compression is None: + self.ov_config.compression = DEFAULT_QUANTIZATION_CONFIG + + if self.args.do_train: + self._set_task() + train_dataloader = self.get_train_dataloader() + model_inputs = next(iter(train_dataloader)) + for label_name in self.label_names: + model_inputs.pop(label_name) + force_batch_one = self._is_pruning_enabled() + self.ov_config.add_input_info(model_inputs, force_batch_one) + nncf_config = NNCFConfig.from_dict(self.ov_config.__dict__) + nncf_config.register_extra_structs( + [ + QuantizationRangeInitArgs(OVDataLoader(train_dataloader)), + BNAdaptationInitArgs(OVDataLoader(train_dataloader)), + ] + ) - # Configure NNCF logging - # Disable nncf logging to stdout except error - # but to file nncf_output.log - nncf_config["log_dir"] = args.output_dir - nncf_log_file_handler = logging.logging.FileHandler(os.path.join(args.output_dir, NNCF_LOG_FILE_NAME)) - nncf_log_file_handler.setFormatter(logging.logging.Formatter("%(levelname)s:%(name)s:%(message)s")) - nncf_logger.addHandler(nncf_log_file_handler) - set_log_level(logging.ERROR) - nncf_logger.setLevel(logging.INFO) - nncf_log_file_handler.setLevel(logging.INFO) - - self.compression_controller, self.model = create_compressed_model(self.model, nncf_config) - self.model_wrapped = self.model - # TODO : To deprecate once support transformers > 4.30.0 - self.deepspeed = None + # Configure NNCF logging + # Disable nncf logging to stdout except error + # but to file nncf_output.log + nncf_config["log_dir"] = args.output_dir + nncf_log_file_handler = logging.logging.FileHandler(os.path.join(args.output_dir, NNCF_LOG_FILE_NAME)) + nncf_log_file_handler.setFormatter(logging.logging.Formatter("%(levelname)s:%(name)s:%(message)s")) + nncf_logger.addHandler(nncf_log_file_handler) + set_log_level(logging.ERROR) + nncf_logger.setLevel(logging.INFO) + nncf_log_file_handler.setLevel(logging.INFO) + + self.compression_controller, self.model = create_compressed_model(self.model, nncf_config) + self.model_wrapped = self.model + # TODO : To deprecate once support transformers > 4.30.0 + self.deepspeed = None def _set_signature_columns_if_needed(self): if self._signature_columns is None: diff --git a/setup.py b/setup.py index f1a72a52a8..5806120822 100644 --- a/setup.py +++ b/setup.py @@ -41,16 +41,8 @@ QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241"] EXTRAS_REQUIRE = { - "neural-compressor": [ - "neural-compressor>=2.2.0", - "onnx", - "onnxruntime<1.15.0", - ], - "openvino": [ - "openvino>=2023.3", - "onnx", - "onnxruntime", - ], + "neural-compressor": ["neural-compressor>=2.2.0", "onnx", "onnxruntime<1.15.0"], + "openvino": ["openvino>=2023.3", "onnx", "onnxruntime"], "openvino-tokenizers": ["openvino-tokenizers[transformers]"], "nncf": ["nncf>=2.8.1"], "ipex": ["intel-extension-for-pytorch", "onnx"], diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 16f848da9e..0ef89ec8b8 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -56,8 +56,7 @@ OVWeightQuantizationConfig, ) - -from optimum.intel.openvino.configuration import INT8_WEIGHT_COMPRESSION_CONFIG +from optimum.intel.openvino.configuration import INT8_WEIGHT_COMPRESSION_CONFIG, DEFAULT_QUANTIZATION_CONFIG from optimum.intel.openvino.quantization import InferRequestWrapper from optimum.intel.utils.import_utils import is_openvino_version from utils_tests import MODEL_NAMES, get_num_quantized_nodes, _ARCHITECTURES_TO_EXPECTED_INT8 @@ -111,9 +110,8 @@ def preprocess_function(examples, tokenizer): self.assertTrue("logits" in outputs) # Verify that that the configuration is correctly saved and loaded - expected_config = OVConfig() loaded_config = OVConfig.from_pretrained(tmp_dir) - self.assertEqual(expected_config.to_dict()["compression"], loaded_config.to_dict()["compression"]) + self.assertEqual(DEFAULT_QUANTIZATION_CONFIG, loaded_config.to_dict()["compression"]) @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS) def test_ovmodel_static_quantization(self, model_cls, model_name, expected_fake_quantize, expected_int8): @@ -160,7 +158,7 @@ class OVWeightCompressionTest(unittest.TestCase): ) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 64, 365),) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 6, 379),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 388),) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTO_COMPRESSED_MATMULS = ( (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 16, 136), ) @@ -237,6 +235,8 @@ class OVWeightCompressionTest(unittest.TestCase): IS_SUPPORT_STATEFUL = is_openvino_version(">=", "2023.3") + DEFAULT_INT4_CONFIG = {"bits": 4, "sym": True, "group_size": 64, "all_layers": True} + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_8BIT_COMPRESSED_MATMULS) def test_automodel_weight_compression(self, model_cls, model_name, expected_pt_int8, expected_ov_int8): task = model_cls.export_feature @@ -336,6 +336,8 @@ def test_ovmodel_8bit_weight_compression_stateful(self, model_cls, model_id, exp @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_AUTO_COMPRESSION) def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type): model = model_cls.from_pretrained(MODEL_NAMES[model_type], export=True, load_in_8bit=True, stateful=False) + self.assertEqual(model._openvino_config.quantization_config.bits, 8) + self.assertEqual(model._openvino_config.dtype, "int8") if model.export_feature.startswith("text2text-generation"): models = [model.encoder, model.decoder, model.decoder_with_past] @@ -351,12 +353,13 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type): self.assertEqual(expected_ov_int8[i], num_int8) @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS) + @unittest.mock.patch.dict( + "optimum.intel.openvino.configuration._DEFAULT_4BIT_CONFIGS", {"facebook/opt-125m": DEFAULT_INT4_CONFIG} + ) def test_ovmodel_4bit_auto_compression(self, model_cls, model_type, expected_ov_int8, expected_ov_int4): with tempfile.TemporaryDirectory() as tmp_dir: model_id = MODEL_NAMES[model_type] - model = model_cls.from_pretrained( - model_id, export=True, quantization_config=OVWeightQuantizationConfig(bits=4) - ) + model = model_cls.from_pretrained(model_id, export=True, quantization_config={"bits": 4}) tokenizer = AutoTokenizer.from_pretrained(model_id) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token @@ -366,6 +369,13 @@ def test_ovmodel_4bit_auto_compression(self, model_cls, model_type, expected_ov_ self.assertEqual(expected_ov_int8, num_int8) model.save_pretrained(tmp_dir) + openvino_config = OVConfig.from_pretrained(tmp_dir) + self.assertEqual(openvino_config.quantization_config["bits"], 4) + self.assertEqual(openvino_config.dtype, "int4") + if model_id == "facebook/opt-125m": + for key, value in self.DEFAULT_INT4_CONFIG.items(): + self.assertEqual(value, openvino_config.quantization_config[key]) + @parameterized.expand(LOAD_IN_4_BITS_SCOPE) def test_ovmodel_4bit_auto_compression_with_config( self, model_cls, model_id, quantization_config, expected_ov_int4 @@ -380,8 +390,9 @@ def test_ovmodel_4bit_auto_compression_with_config( self.assertEqual(expected_ov_int4, num_int4) model.save_pretrained(tmp_dir) - ov_config = OVConfig(quantization_config=quantization_config) - ov_config.save_pretrained(tmp_dir) + openvino_config = OVConfig.from_pretrained(tmp_dir) + self.assertEqual(openvino_config.quantization_config["bits"], 4) + self.assertEqual(openvino_config.dtype, "int4") @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTO_COMPRESSED_MATMULS) def test_ovmodel_4bit_auto_compression_with_custom_dataset( From 22bc3d0439df66ab6218c08d1a5a649c7345a2b7 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:10:55 +0100 Subject: [PATCH 20/64] Fix warning (#582) * Fix warning * fix message warning --- optimum/intel/generation/modeling.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/optimum/intel/generation/modeling.py b/optimum/intel/generation/modeling.py index 0abdafe666..3d9c657626 100644 --- a/optimum/intel/generation/modeling.py +++ b/optimum/intel/generation/modeling.py @@ -105,13 +105,13 @@ def __init__( self.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.model_type)(config) self.model_dtype = kwargs.get("model_dtype", None) - logger.warning( - f"The class `{self.__class__}` has been depreciated and will be removed in optimum-intel v1.14, please use IPEXModel instead" - ) if isinstance(model, torch.jit.ScriptModule): self.input_names = { inputs.debugName().split(".")[0] for inputs in model.graph.inputs() if inputs.debugName() != "self" } + logger.warning( + f"The class `{self.__class__}` has been depreciated for TorchScript model, please use `IPEXModelForCausalLM` instead" + ) else: self.input_names = set() From b56021e5761f3fa39c75541f87c37ded7cac6658 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:27:39 +0100 Subject: [PATCH 21/64] Add reference to the temporary directory for windows fix (#581) --- optimum/intel/openvino/modeling_diffusion.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 1570a22457..5e8a0cdc59 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -94,9 +94,18 @@ def __init__( self._device = device.upper() self.is_dynamic = dynamic_shapes self.ov_config = ov_config if ov_config is not None else {} - self._model_save_dir = ( - Path(model_save_dir.name) if isinstance(model_save_dir, TemporaryDirectory) else model_save_dir - ) + + # This attribute is needed to keep one reference on the temporary directory, since garbage collecting + # would end-up removing the directory containing the underlying OpenVINO model + self._model_save_dir_tempdirectory_instance = None + if isinstance(model_save_dir, TemporaryDirectory): + self._model_save_dir_tempdirectory_instance = model_save_dir + self._model_save_dir = Path(model_save_dir.name) + elif isinstance(model_save_dir, str): + self._model_save_dir = Path(model_save_dir) + else: + self._model_save_dir = model_save_dir + self.vae_decoder = OVModelVaeDecoder(vae_decoder, self) self.unet = OVModelUnet(unet, self) self.text_encoder = OVModelTextEncoder(text_encoder, self) if text_encoder is not None else None From 8e68d38250f0eba5d694d7e40f1b91f2e644e357 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Mon, 4 Mar 2024 12:02:27 +0100 Subject: [PATCH 22/64] Fix documentation (#583) * Fix documentation * fix --- docs/source/inference.mdx | 7 +++++-- docs/source/optimization_ov.mdx | 6 +++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx index a9ee5529da..905e0aa4dd 100644 --- a/docs/source/inference.mdx +++ b/docs/source/inference.mdx @@ -110,7 +110,7 @@ By default the quantization scheme will be [assymmetric](https://github.com/open For INT4 quantization you can also specify the following arguments : * The `--group-size` parameter will define the group size to use for quantization, `-1` it will results in per-column quantization. -* The `--ratio` CLI parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`. +* The `--ratio` parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`. Smaller `group_size` and `ratio` of usually improve accuracy at the sacrifice of the model size and inference latency. @@ -122,8 +122,11 @@ from optimum.intel import OVModelForCausalLM model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) ``` -> **NOTE:** `load_in_8bit` is enabled by default for the models larger than 1 billion parameters. + +`load_in_8bit` is enabled by default for the models larger than 1 billion parameters. + + To apply quantization on both weights and activations, you can use the `OVQuantizer`, more information in the [documentation](https://huggingface.co/docs/optimum/main/en/intel/optimization_ov#optimization). diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 5686af4bf3..51067b0b64 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -69,7 +69,11 @@ from optimum.intel import OVModelForCausalLM model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) ``` -> **NOTE:** `load_in_8bit` is enabled by default for models larger than 1 billion parameters. + + +`load_in_8bit` is enabled by default for the models larger than 1 billion parameters. + + For the 4-bit weight quantization you can use the `quantization_config` to specify the optimization parameters, for example: From 26977f8a54a171be1546f1d369faa7dfc8452cf4 Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Wed, 6 Mar 2024 18:08:32 +0800 Subject: [PATCH 23/64] Add llama test model to cover MQA (#585) * change llama test model to cover MQA * keep llama and llama2 in tests * fix code style --- tests/generation/test_modeling.py | 2 ++ tests/ipex/test_inference.py | 2 ++ tests/ipex/test_modeling.py | 6 +++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/generation/test_modeling.py b/tests/generation/test_modeling.py index b97fd66a83..9b637d322d 100644 --- a/tests/generation/test_modeling.py +++ b/tests/generation/test_modeling.py @@ -31,6 +31,7 @@ "gpt_neo": "hf-internal-testing/tiny-random-GPTNeoModel", "mistral": "echarlaix/tiny-random-mistral", "llama": "fxmarty/tiny-llama-fast-tokenizer", + "llama2": "Jiqing/tiny_random_llama2", "gpt_bigcode": "hf-internal-testing/tiny-random-GPTBigCodeModel", } @@ -54,6 +55,7 @@ class ModelingIntegrationTest(unittest.TestCase): "gpt_neo", "mistral", "llama", + "llama2", # "gpt_bigcode", ) diff --git a/tests/ipex/test_inference.py b/tests/ipex/test_inference.py index bc1890453d..e120514506 100644 --- a/tests/ipex/test_inference.py +++ b/tests/ipex/test_inference.py @@ -42,6 +42,7 @@ "gpt_neox": "hf-internal-testing/tiny-random-GPTNeoXForCausalLM", "gpt_bigcode": "hf-internal-testing/tiny-random-GPTBigCodeModel", "llama": "fxmarty/tiny-llama-fast-tokenizer", + "llama2": "Jiqing/tiny_random_llama2", "opt": "hf-internal-testing/tiny-random-OPTModel", "mpt": "hf-internal-testing/tiny-random-MptForCausalLM", } @@ -66,6 +67,7 @@ class IPEXIntegrationTest(unittest.TestCase): "gpt_neo", # "gpt_bigcode", "llama", + "llama2", "opt", "mpt", ) diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index ffc2ca6a89..03b7d015d1 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -67,6 +67,7 @@ "gptj": "hf-internal-testing/tiny-random-GPTJModel", "levit": "hf-internal-testing/tiny-random-LevitModel", "llama": "fxmarty/tiny-llama-fast-tokenizer", + "llama2": "Jiqing/tiny_random_llama2", "marian": "sshleifer/tiny-marian-en-de", "mbart": "hf-internal-testing/tiny-random-mbart", "mistral": "echarlaix/tiny-random-mistral", @@ -209,6 +210,7 @@ class IPEXModelForCausalLMTest(unittest.TestCase): "gpt_neo", "gpt_neox", "llama", + "llama2", "mistral", # "phi", "mpt", @@ -226,7 +228,9 @@ def test_compare_to_transformers(self, model_arch): self.assertTrue(ipex_model.use_cache) tokenizer = AutoTokenizer.from_pretrained(model_id) tokens = tokenizer( - "This is a sample", return_tensors="pt", return_token_type_ids=False if model_arch == "llama" else None + "This is a sample", + return_tensors="pt", + return_token_type_ids=False if model_arch in ("llama", "llama2") else None, ) position_ids = None if model_arch.replace("_", "-") in MODEL_TYPES_REQUIRING_POSITION_IDS: From 751663785c1167a669c6ef90fbfc6792c7c2c3c5 Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Wed, 6 Mar 2024 16:27:45 +0400 Subject: [PATCH 24/64] Include nncf in openvino extra (#586) --- README.md | 2 +- docs/source/installation.mdx | 2 +- optimum/exporters/openvino/model_patcher.py | 2 +- setup.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7b762cce26..ec35e602ca 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ To install the latest release of 🤗 Optimum Intel with the corresponding requi | Accelerator | Installation | |:-----------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------| | [Intel Neural Compressor](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) | `pip install --upgrade-strategy eager "optimum[neural-compressor]"` | -| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `pip install --upgrade-strategy eager "optimum[openvino,nncf]"` | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `pip install --upgrade-strategy eager "optimum[openvino]"` | | [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/#introduction) | `pip install --upgrade-strategy eager "optimum[ipex]"` | The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx index cf8688d105..87698a6514 100644 --- a/docs/source/installation.mdx +++ b/docs/source/installation.mdx @@ -21,7 +21,7 @@ To install the latest release of 🤗 Optimum Intel with the corresponding requi | Accelerator | Installation | |:-----------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------| | [Intel Neural Compressor (INC)](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) | `pip install --upgrade-strategy eager "optimum[neural-compressor]"`| -| [Intel OpenVINO](https://docs.openvino.ai/latest/index.html) | `pip install --upgrade-strategy eager "optimum[openvino,nncf]"` | +| [Intel OpenVINO](https://docs.openvino.ai/latest/index.html) | `pip install --upgrade-strategy eager "optimum[openvino]"` | The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. diff --git a/optimum/exporters/openvino/model_patcher.py b/optimum/exporters/openvino/model_patcher.py index f953771a7a..aea57161e2 100644 --- a/optimum/exporters/openvino/model_patcher.py +++ b/optimum/exporters/openvino/model_patcher.py @@ -35,7 +35,7 @@ def patch_model_with_bettertransformer(model): + "[WARNING] For good performance with stateful models, transformers>=4.36.2 and PyTorch>=2.1.1 are required. " f"This Python environment has Transformers {_transformers_version} and PyTorch {_torch_version}. " "Consider upgrading PyTorch and Transformers, for example by running " - "`pip install --upgrade --upgrade-strategy eager optimum[openvino,nncf]`, and export the model again" + "`pip install --upgrade --upgrade-strategy eager optimum[openvino]`, and export the model again" + COLOR_RESET ) diff --git a/setup.py b/setup.py index 5806120822..a4e67d46c2 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ EXTRAS_REQUIRE = { "neural-compressor": ["neural-compressor>=2.2.0", "onnx", "onnxruntime<1.15.0"], - "openvino": ["openvino>=2023.3", "onnx", "onnxruntime"], + "openvino": ["openvino>=2023.3", "onnx", "onnxruntime", "nncf>=2.8.1"], "openvino-tokenizers": ["openvino-tokenizers[transformers]"], "nncf": ["nncf>=2.8.1"], "ipex": ["intel-extension-for-pytorch", "onnx"], From 246c8297e7865578352fba40d8db9c4df9a31c9a Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:16:08 +0100 Subject: [PATCH 25/64] Fix title documentation (#588) --- docs/source/inference.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx index 905e0aa4dd..82e2315454 100644 --- a/docs/source/inference.mdx +++ b/docs/source/inference.mdx @@ -471,7 +471,7 @@ image = refiner(prompt=prompt, image=image[None, :]).images[0] ``` -## Latent Consistency Models +### Latent Consistency Models | Task | Auto Class | @@ -479,7 +479,7 @@ image = refiner(prompt=prompt, image=image[None, :]).images[0] | `text-to-image` | `OVLatentConsistencyModelPipeline` | -### Text-to-Image +#### Text-to-Image Here is an example of how you can load a Latent Consistency Models (LCMs) from [SimianLuo/LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) and run inference using OpenVINO : From 4c481e6c7449317829d188788489282788315f5c Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 7 Mar 2024 10:30:25 +0100 Subject: [PATCH 26/64] Update OpenVINO documentation links in README.md (#587) * Update OpenVINO documentation links in README.md The links are now aligned with OpenVINO 2024.0 documentation, and include permalinks instead of direct links, when possible. * Update inference.mdx * Update index.mdx * Update installation.mdx * Update README.md --- README.md | 8 ++++---- docs/source/index.mdx | 4 ++-- docs/source/inference.mdx | 7 ++++--- docs/source/installation.mdx | 4 ++-- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index ec35e602ca..7905cefded 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Intel [Neural Compressor](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) is an open-source library enabling the usage of the most popular compression techniques such as quantization, pruning and knowledge distillation. It supports automatic accuracy-driven tuning strategies in order for users to easily generate quantized model. The users can easily apply static, dynamic and aware-training quantization approaches while giving an expected accuracy criteria. It also supports different weight pruning techniques enabling the creation of pruned model giving a predefined sparsity target. -[OpenVINO](https://docs.openvino.ai/latest/index.html) is an open-source toolkit that enables high performance inference capabilities for Intel CPUs, GPUs, and special DL inference accelerators ([see](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) the full list of supported devices). It is supplied with a set of tools to optimize your models with compression techniques such as quantization, pruning and knowledge distillation. Optimum Intel provides a simple interface to optimize your Transformers and Diffusers models, convert them to the OpenVINO Intermediate Representation (IR) format and run inference using OpenVINO Runtime. +[OpenVINO](https://docs.openvino.ai) is an open-source toolkit that enables high performance inference capabilities for Intel CPUs, GPUs, and special DL inference accelerators ([see](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) the full list of supported devices). It is supplied with a set of tools to optimize your models with compression techniques such as quantization, pruning and knowledge distillation. Optimum Intel provides a simple interface to optimize your Transformers and Diffusers models, convert them to the OpenVINO Intermediate Representation (IR) format and run inference using OpenVINO Runtime. ## Installation @@ -20,7 +20,7 @@ To install the latest release of 🤗 Optimum Intel with the corresponding requi | Accelerator | Installation | |:-----------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------| | [Intel Neural Compressor](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) | `pip install --upgrade-strategy eager "optimum[neural-compressor]"` | -| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `pip install --upgrade-strategy eager "optimum[openvino]"` | +| [OpenVINO](https://docs.openvino.ai) | `pip install --upgrade-strategy eager "optimum[openvino]"` | | [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/#introduction) | `pip install --upgrade-strategy eager "optimum[ipex]"` | The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. @@ -68,11 +68,11 @@ For more details on the supported compression techniques, please refer to the [d ## OpenVINO -Below are the examples of how to use OpenVINO and its [NNCF](https://docs.openvino.ai/latest/tmo_introduction.html) framework to accelerate inference. +Below are examples of how to use OpenVINO and its [NNCF](https://docs.openvino.ai/2024/openvino-workflow/model-optimization-guide/compressing-models-during-training.html) framework to accelerate inference. #### Export: -It is possible to export your model to the [OpenVINO](https://docs.openvino.ai/2023.1/openvino_ir.html) IR format with the CLI : +It is possible to export your model to the [OpenVINO IR](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html) format with the CLI : ```plain optimum-cli export openvino --model gpt2 ov_model diff --git a/docs/source/index.mdx b/docs/source/index.mdx index cbec79baa9..643b9be044 100644 --- a/docs/source/index.mdx +++ b/docs/source/index.mdx @@ -21,7 +21,7 @@ limitations under the License. [Intel Neural Compressor](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) is an open-source library enabling the usage of the most popular compression techniques such as quantization, pruning and knowledge distillation. It supports automatic accuracy-driven tuning strategies in order for users to easily generate quantized model. The users can easily apply static, dynamic and aware-training quantization approaches while giving an expected accuracy criteria. It also supports different weight pruning techniques enabling the creation of pruned model giving a predefined sparsity target. -[OpenVINO](https://docs.openvino.ai/latest/index.html) is an open-source toolkit that enables high performance inference capabilities for Intel CPUs, GPUs, and special DL inference accelerators ([see](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) the full list of supported devices). It is supplied with a set of tools to optimize your models with compression techniques such as quantization, pruning and knowledge distillation. Optimum Intel provides a simple interface to optimize your Transformers and Diffusers models, convert them to the OpenVINO Intermediate Representation (IR) format and run inference using OpenVINO Runtime. +[OpenVINO](https://docs.openvino.ai) is an open-source toolkit that enables high performance inference capabilities for Intel CPUs, GPUs, and special DL inference accelerators ([see](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) the full list of supported devices). It is supplied with a set of tools to optimize your models with compression techniques such as quantization, pruning and knowledge distillation. Optimum Intel provides a simple interface to optimize your Transformers and Diffusers models, convert them to the OpenVINO Intermediate Representation (IR) format and run inference using OpenVINO Runtime.
@@ -34,4 +34,4 @@ limitations under the License.

Learn how to run inference with OpenVINO Runtime and to apply quantization, pruning and knowledge distillation on your model to further speed up inference.

-
\ No newline at end of file + diff --git a/docs/source/inference.mdx b/docs/source/inference.mdx index 82e2315454..65480c1d2f 100644 --- a/docs/source/inference.mdx +++ b/docs/source/inference.mdx @@ -13,7 +13,8 @@ Optimum Intel can be used to load optimized models from the [Hugging Face Hub](h ## Transformers models -You can now easily perform inference with OpenVINO Runtime on a variety of Intel processors ([see](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) the full list of supported devices). +You can now easily perform inference with OpenVINO Runtime on a variety of Intel processors +([see](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) the full list of supported devices). For that, just replace the `AutoModelForXxx` class with the corresponding `OVModelForXxx` class. As shown in the table below, each task is associated with a class enabling to automatically load your model. @@ -33,7 +34,7 @@ As shown in the table below, each task is associated with a class enabling to au ### Export -It is possible to export your model to the [OpenVINO](https://docs.openvino.ai/2023.1/openvino_ir.html) IR format with the CLI : +It is possible to export your model to the [OpenVINO IR](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html) format with the CLI : ```bash optimum-cli export openvino --model gpt2 ov_model @@ -182,7 +183,7 @@ model.reshape(1,128) model.compile() ``` -To run inference on Intel integrated or discrete GPU, use `.to("gpu")`. On GPU, models run in FP16 precision by default. (See [OpenVINO documentation](https://docs.openvino.ai/nightly/openvino_docs_install_guides_configurations_for_intel_gpu.html) about installing drivers for GPU inference). +To run inference on Intel integrated or discrete GPU, use `.to("gpu")`. On GPU, models run in FP16 precision by default. (See [OpenVINO documentation](https://docs.openvino.ai/2024/get-started/configurations/configurations-intel-gpu.html) about installing drivers for GPU inference). ```python # Static shapes speed up inference diff --git a/docs/source/installation.mdx b/docs/source/installation.mdx index 87698a6514..c29f5ceb95 100644 --- a/docs/source/installation.mdx +++ b/docs/source/installation.mdx @@ -21,7 +21,7 @@ To install the latest release of 🤗 Optimum Intel with the corresponding requi | Accelerator | Installation | |:-----------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------| | [Intel Neural Compressor (INC)](https://www.intel.com/content/www/us/en/developer/tools/oneapi/neural-compressor.html) | `pip install --upgrade-strategy eager "optimum[neural-compressor]"`| -| [Intel OpenVINO](https://docs.openvino.ai/latest/index.html) | `pip install --upgrade-strategy eager "optimum[openvino]"` | +| [Intel OpenVINO](https://docs.openvino.ai ) | `pip install --upgrade-strategy eager "optimum[openvino]"` | The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. @@ -42,4 +42,4 @@ or to install from source including dependencies: python -m pip install "optimum-intel[extras]"@git+https://github.com/huggingface/optimum-intel.git ``` -where `extras` can be one or more of `neural-compressor`, `openvino`, `nncf`. \ No newline at end of file +where `extras` can be one or more of `neural-compressor`, `openvino`, `nncf`. From 126a581144beb1a84294a3399e7d1dd2c949ecb2 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Thu, 7 Mar 2024 16:50:54 +0100 Subject: [PATCH 27/64] Fix default int8 quantization for CLI (#592) --- optimum/commands/export/openvino.py | 19 +++++++++---------- optimum/exporters/openvino/__main__.py | 17 ++--------------- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/optimum/commands/export/openvino.py b/optimum/commands/export/openvino.py index 255e2a7e13..997ec44aa5 100644 --- a/optimum/commands/export/openvino.py +++ b/optimum/commands/export/openvino.py @@ -157,13 +157,12 @@ def run(self): ) self.args.weight_format = "int8" - weight_format = self.args.weight_format or "fp32" - - ov_config = None - if weight_format in {"fp16", "fp32"}: - ov_config = OVConfig(dtype=weight_format) + if self.args.weight_format is None: + ov_config = None + elif self.args.weight_format in {"fp16", "fp32"}: + ov_config = OVConfig(dtype=self.args.weight_format) else: - is_int8 = weight_format == "int8" + is_int8 = self.args.weight_format == "int8" # For int4 quantization if not parameter is provided, then use the default config if exist if ( @@ -182,12 +181,12 @@ def run(self): "group_size": -1 if is_int8 else self.args.group_size, } - if weight_format in {"int4_sym_g128", "int4_asym_g128", "int4_sym_g64", "int4_asym_g64"}: + if self.args.weight_format in {"int4_sym_g128", "int4_asym_g128", "int4_sym_g64", "int4_asym_g64"}: logger.warning( - f"--weight-format {weight_format} is deprecated, possible choices are fp32, fp16, int8, int4" + f"--weight-format {self.args.weight_format} is deprecated, possible choices are fp32, fp16, int8, int4" ) - quantization_config["sym"] = "asym" not in weight_format - quantization_config["group_size"] = 128 if "128" in weight_format else 64 + quantization_config["sym"] = "asym" not in self.args.weight_format + quantization_config["group_size"] = 128 if "128" in self.args.weight_format else 64 ov_config = OVConfig(quantization_config=quantization_config) # TODO : add input shapes diff --git a/optimum/exporters/openvino/__main__.py b/optimum/exporters/openvino/__main__.py index 24b65f9032..1c695e2f19 100644 --- a/optimum/exporters/openvino/__main__.py +++ b/optimum/exporters/openvino/__main__.py @@ -21,26 +21,13 @@ from optimum.exporters import TasksManager from optimum.exporters.onnx.base import OnnxConfig +from optimum.exporters.onnx.constants import SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED from optimum.utils.save_utils import maybe_load_preprocessors -from ...intel.utils.import_utils import ( - is_openvino_tokenizers_available, - is_optimum_version, - is_transformers_version, -) +from ...intel.utils.import_utils import is_openvino_tokenizers_available, is_transformers_version from .convert import export_from_model, export_tokenizer -if is_optimum_version(">=", "1.16.0"): - from optimum.exporters.onnx.constants import SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED -else: - # Copied from https://github.com/huggingface/optimum/blob/main/optimum/exporters/onnx/constants.py - SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED = [ - "bart", - "whisper", - ] - - if TYPE_CHECKING: from optimum.intel.openvino.configuration import OVConfig From 67fad65933e0b8a4d8a03cc74b26888507465ffb Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Fri, 8 Mar 2024 17:16:35 +0800 Subject: [PATCH 28/64] Change model output parameter to last_hidden_states for IPEXModel (#589) * change model output parameter to last_hidden_states * update ipex model testiong * update testing * add output name to ipex model --- optimum/intel/ipex/modeling_base.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 2b6b569343..9928977ead 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -58,6 +58,7 @@ class IPEXModel(OptimizedModel): export_feature = "feature-extraction" base_model_prefix = "ipex_model" main_input_name = "input_ids" + output_name = "last_hidden_state" def __init__( self, @@ -193,7 +194,12 @@ def forward( inputs["token_type_ids"] = token_type_ids outputs = self._call_model(**inputs) - return ModelOutput(**outputs) if isinstance(outputs, dict) else ModelOutput(logits=outputs[0]) + if isinstance(outputs, dict): + model_output = ModelOutput(**outputs) + else: + model_output = ModelOutput() + model_output[self.output_name] = outputs[0] + return model_output def eval(self): self.model.eval() @@ -235,16 +241,19 @@ def _init_warmup(self): class IPEXModelForSequenceClassification(IPEXModel): auto_model_class = AutoModelForSequenceClassification export_feature = "text-classification" + output_name = "logits" class IPEXModelForTokenClassification(IPEXModel): auto_model_class = AutoModelForTokenClassification export_feature = "token-classification" + output_name = "logits" class IPEXModelForMaskedLM(IPEXModel): auto_model_class = AutoModelForMaskedLM export_feature = "fill-mask" + output_name = "logits" class IPEXModelForImageClassification(IPEXModel): From 0bcffeb023366b6984eeb6da3f297134f5543821 Mon Sep 17 00:00:00 2001 From: jiqing-feng <107918818+jiqing-feng@users.noreply.github.com> Date: Fri, 8 Mar 2024 18:48:02 +0800 Subject: [PATCH 29/64] Add IPEX model patcher (#567) * llama model patcher * fix jit model * fix jit model * rm autocast in model * add llama model patcher * support assisted decoding and add reorder cache function * add comment for _prepare_past_key_values * rebase main * fix model_dtype * rm useless comments * fix llama * add comments for ipex_rope and ipex_scale_dot_product * fix comments * add enable_tpp comments * fix import * fix review aroun2 * add torch.no_grad to avoid auto_kernel_selection issue * use torch.no_grad in jit trace * fix ipex model testing * add tests for ipex model generation with multi inputs * fix code style * remove __get__(self) as _reorder_cache is static method for the class * fix reorder_cache * use model_type * check if reorder_cache is a static method * fix _reorder_cache * fix raise import error * test ipex patching * fix comments * update API name and testing * disable untill ipex version 2.5.0 * update testing name * Update optimum/intel/ipex/modeling_base.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> * Update tests/ipex/test_modeling.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> * fix tests --------- Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/exporters/ipex/__init__.py | 0 optimum/exporters/ipex/model_patcher.py | 91 +++++++ optimum/exporters/ipex/modeling_utils.py | 307 +++++++++++++++++++++++ optimum/intel/ipex/modeling_base.py | 135 ++++++++-- tests/ipex/test_modeling.py | 39 +++ 5 files changed, 558 insertions(+), 14 deletions(-) create mode 100644 optimum/exporters/ipex/__init__.py create mode 100644 optimum/exporters/ipex/model_patcher.py create mode 100644 optimum/exporters/ipex/modeling_utils.py diff --git a/optimum/exporters/ipex/__init__.py b/optimum/exporters/ipex/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/optimum/exporters/ipex/model_patcher.py b/optimum/exporters/ipex/model_patcher.py new file mode 100644 index 0000000000..60ff3b721b --- /dev/null +++ b/optimum/exporters/ipex/model_patcher.py @@ -0,0 +1,91 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from transformers.models.llama.modeling_llama import ( + LlamaAttention, + LlamaDecoderLayer, + LlamaForCausalLM, + LlamaModel, + LlamaRMSNorm, +) + +from optimum.intel.utils.import_utils import is_ipex_version + +from .modeling_utils import ( + _IPEXLlamaDecoderLayerRef, + _llama_attn_forward, + _llama_layer_norm_forward, + _llama_model_forward, +) + + +_IPEX_EXPORTED_ARCH = ("LlamaForCausalLM",) +_IPEX_EXPORTED_TASK = ("text-generation",) + + +def convert_func(m, func_name, new_function): + bound_method = new_function.__get__(m, m.__class__) + setattr(m, func_name, bound_method) + + +def convert_functions(m, target_m, new_function_name, new_function): + for _, sub_m in m.named_children(): + if isinstance(sub_m, target_m): + convert_func(sub_m, new_function_name, new_function) + convert_functions(sub_m, target_m, new_function_name, new_function) + + +def convert_class(m, target_m, new_class, config, distributed=False): + for name, sub_m in m.named_children(): + if isinstance(sub_m, target_m): + new_m = new_class(sub_m, config, distributed) + setattr(m, name, new_m) + convert_class(sub_m, target_m, new_class, config, distributed) + + +def patch_op(m, target_m, new_op_name, new_op): + for name, sub_m in m.named_children(): + if isinstance(sub_m, target_m): + setattr(sub_m, new_op_name, new_op) + patch_op(sub_m, target_m, new_op_name, new_op) + + +def _patch_llama_model(model): + if is_ipex_version("<", "2.5.0"): + raise ImportError("Only ipex version > 2.3.0 supports RotaryEmbedding and IndirectAccessKVCache") + + from intel_extension_for_pytorch.llm.modules import IndirectAccessKVCache, RotaryEmbedding + + ipex_rope = RotaryEmbedding( + model.config.max_position_embeddings, + model.config.hidden_size // model.config.num_attention_heads, + model.config.rope_theta, + model.config.architectures[0], + ) + ipex_scale_dot_product = IndirectAccessKVCache(text_max_length=model.config.max_position_embeddings) + patch_op(model, LlamaAttention, "ipex_rope", ipex_rope) + patch_op(model, LlamaAttention, "ipex_scale_dot_product", ipex_scale_dot_product) + + convert_functions(model, LlamaModel, "forward", _llama_model_forward) + convert_functions(model, LlamaAttention, "forward", _llama_attn_forward) + convert_functions(model, LlamaRMSNorm, "forward", _llama_layer_norm_forward) + + convert_class(model, LlamaDecoderLayer, _IPEXLlamaDecoderLayerRef, model.config) + return model + + +def _patch_model(model): + if isinstance(model, LlamaForCausalLM): + model = _patch_llama_model(model) + return model diff --git a/optimum/exporters/ipex/modeling_utils.py b/optimum/exporters/ipex/modeling_utils.py new file mode 100644 index 0000000000..f75e559eaf --- /dev/null +++ b/optimum/exporters/ipex/modeling_utils.py @@ -0,0 +1,307 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple, Union + +import torch +from torch import nn +from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask +from transformers.modeling_outputs import BaseModelOutputWithPast +from transformers.models.llama.modeling_llama import repeat_kv + +from optimum.intel.utils.import_utils import is_ipex_version + + +# Adapted from https://github.com/huggingface/transformers/blob/v4.38.2/src/transformers/models/llama/modeling_llama.py#L83 +def _llama_layer_norm_forward(self, hidden_states): + return torch.ops.torch_ipex.rmsnorm(hidden_states, self.weight, self.variance_epsilon) + + +# Adapted from https://github.com/huggingface/transformers/blob/v4.38.2/src/transformers/models/llama/modeling_llama.py#L321 +def _llama_attn_forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + **kwargs, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + kv_seq_len = q_len + past_key_value[0].size(-2) if past_key_value is not None else q_len + + query = query.view(bsz, q_len, self.num_heads, self.head_dim) + key = key.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + value = value.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + # Use ipex op to rotary position embedding more efficient. + key = self.ipex_rope( + key, + position_ids, + self.num_key_value_heads, + self.head_dim, + self.head_dim // 2, + self.head_dim, + kv_seq_len, + ) + query = self.ipex_rope( + query, + position_ids, + self.num_heads, + self.head_dim, + self.head_dim // 2, + self.head_dim, + kv_seq_len, + ) + + if use_cache: + # This ipex op pre-allocates buffers for past_key_values and use beam index history + # which to decide which beam should be used to make attention scale dot more efficient. + (attn_output, attn_weights, past_key_value) = self.ipex_scale_dot_product( + query, + key, + value, + math.sqrt(self.head_dim), + past_key_value, + None, + attention_mask, + ) + else: + value_states = value.transpose(1, 2) + query_states = query.transpose(1, 2) + key_states = key.transpose(1, 2) + kv_seq_len = key_states.shape[-2] + + past_key_value = None + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: + attn_weights = torch.tensor(attn_weights) + torch.tensor(attention_mask) + attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Adapted from https://github.com/huggingface/transformers/blob/v4.38.2/src/transformers/models/llama/modeling_llama.py#L1130 +def _llama_model_forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, +) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + past_key_values_length = 0 + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0) + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if getattr(self.config, "_flash_attn_2_enabled", False): + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + # embed positions + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +# Adapted from https://github.com/huggingface/transformers/blob/v4.38.2/src/transformers/models/llama/modeling_llama.py#L694 +class _IPEXLlamaDecoderLayerRef(nn.Module): + def __init__(self, module, config, distributed=False): + if is_ipex_version("<", "2.5.0"): + raise ImportError("Only ipex version > 2.3.0 supports Linear2SiluMul and LinearAdd") + + from intel_extension_for_pytorch.llm.modules import Linear2SiluMul, LinearAdd + + super().__init__() + for k, v in module.__dict__.items(): + setattr(self, k, v) + for k, v in module.__class__.__dict__.items(): + if k.startswith("__") or k.startswith("forward"): + continue + setattr(self.__class__, k, getattr(module.__class__, k)) + self.distributed = distributed + if not self.distributed: + self.mha_linear_add = LinearAdd(module.self_attn.o_proj) + self.mlp_linear_add = LinearAdd(module.mlp.down_proj) + del self.__dict__["_modules"]["self_attn"].o_proj + del self.__dict__["_modules"]["mlp"].down_proj + self.linear_silu_mul = Linear2SiluMul(module.mlp.gate_proj, module.mlp.up_proj) + del self.__dict__["_modules"]["mlp"].gate_proj + del self.__dict__["_modules"]["mlp"].up_proj + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + if not self.distributed: + hidden_states = self.mha_linear_add(hidden_states, residual) + else: + hidden_states = self.self_attn.o_proj(hidden_states) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + + mlp_gate = self.linear_silu_mul(hidden_states) + + if not self.distributed: + hidden_states = self.mlp_linear_add(mlp_gate, residual) + else: + hidden_states = self.mlp.down_proj(mlp_gate) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs diff --git a/optimum/intel/ipex/modeling_base.py b/optimum/intel/ipex/modeling_base.py index 9928977ead..00fe3de115 100644 --- a/optimum/intel/ipex/modeling_base.py +++ b/optimum/intel/ipex/modeling_base.py @@ -22,6 +22,8 @@ import intel_extension_for_pytorch as ipex import torch from huggingface_hub import hf_hub_download +from intel_extension_for_pytorch.cpu._auto_kernel_selection import _enable_tpp +from intel_extension_for_pytorch.transformers.optimize import get_dummy_input from transformers import ( AutoConfig, AutoModel, @@ -45,14 +47,63 @@ from optimum.modeling_base import OptimizedModel from optimum.utils import NormalizedConfigManager -from ..generation.modeling import jit_trace, prepare_jit_inputs -from ..utils.import_utils import is_torch_version, is_transformers_version +from ...exporters.ipex.model_patcher import _IPEX_EXPORTED_TASK, _patch_model +from ..generation.modeling import prepare_jit_inputs +from ..utils.import_utils import is_ipex_version, is_torch_version, is_transformers_version from ..utils.modeling_utils import MULTI_QUERY_ATTN_MODELS, patch_decoder_attention_mask logger = logging.getLogger(__name__) +_IPEX_SUPPORT_MODEL_TYPES = ("llama",) + + +def _is_patched_with_ipex(model, task): + if is_ipex_version("<", "2.5.0"): + return False + + if isinstance(model, torch.jit.ScriptModule): + for node in model.graph.nodes(): + # Jit will record the codes position so we can check if the node use ipex exporter. + if "torch_ipex::rotary_position_embedding" in node.__str__(): + return True + return False + else: + return model.config.model_type in _IPEX_SUPPORT_MODEL_TYPES and task in _IPEX_EXPORTED_TASK + + +def ipex_jit_trace(model, task, use_cache): + # Only support torch version >= 2.1.0 to support example_kwarg_inputs in jit.trace + if is_torch_version("<", "2.1.0"): + raise ImportError("`torch>=2.1.0` is needed to trace your model") + + if _is_patched_with_ipex(model, task): + model = _patch_model(model) + sample_inputs = get_dummy_input(model, return_dict=True) + # Use Tensor Processing Primitives to accelerate linear, see https://arxiv.org/abs/2104.05755. + _enable_tpp() + else: + model = patch_decoder_attention_mask(model) + sample_inputs = prepare_jit_inputs(model, task, use_cache) + + model.config.return_dict = False + + model = ipex.optimize(model.eval(), dtype=model.dtype, inplace=True) + with torch.no_grad(): + trace_model = torch.jit.trace( + model, + example_kwarg_inputs=sample_inputs, + strict=False, + check_trace=False, + ) + trace_model = torch.jit.freeze(trace_model) + trace_model(**sample_inputs) + trace_model(**sample_inputs) + + return trace_model + + class IPEXModel(OptimizedModel): auto_model_class = AutoModel export_feature = "feature-extraction" @@ -74,6 +125,7 @@ def __init__( self._dtype = self.config.torch_dtype if self.config.torch_dtype is not None else torch.float32 self.model.to(self._device) self.model_save_dir = model_save_dir + self._is_ipex_exported = _is_patched_with_ipex(model, self.export_feature) self.input_names = { inputs.debugName().split(".")[0] for inputs in model.graph.inputs() if inputs.debugName() != "self" @@ -91,13 +143,13 @@ def _from_transformers( cls, model_id: str, config: PretrainedConfig, + use_cache: bool = True, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, force_download: bool = False, cache_dir: Optional[str] = None, subfolder: str = "", local_files_only: bool = False, - use_cache: bool = True, torch_dtype: Optional[Union[str, "torch.dtype"]] = None, trust_remote_code: bool = False, ): @@ -117,14 +169,13 @@ def _from_transformers( } model = TasksManager.get_model_from_task(task, model_id, **model_kwargs) - model = patch_decoder_attention_mask(model) - model = ipex.optimize(model, dtype=torch_dtype, level="O1", auto_kernel_selection=True) - traced_model = jit_trace(model, task, use_cache) + traced_model = ipex_jit_trace(model, task, use_cache) save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) torch.jit.save(traced_model, save_dir_path / WEIGHTS_NAME) config.torchscript = True + config.torch_dtype = torch_dtype return cls._from_pretrained( model_id=save_dir_path, @@ -135,6 +186,7 @@ def _from_transformers( cache_dir=cache_dir, local_files_only=local_files_only, use_cache=use_cache, + model_dtype=torch_dtype, ) @classmethod @@ -213,6 +265,13 @@ def device(self) -> torch.device: def dtype(self) -> torch.dtype: return self._dtype + @property + def model_dtype(self): + logger.warning( + "access to the `model_dtype` attribute is deprecated and will be removed after v1.18.0, please use `_dtype` instead." + ) + return self._dtype + def to(self, device: Union[torch.device, str]): self._device = device if isinstance(device, torch.device) else torch.device(device) self.model.to(self._device) @@ -223,7 +282,7 @@ def can_generate(self): def _call_model(self, *args, **kwargs): try: - with torch.autocast(self.device.type, self.dtype): + with torch.autocast(self.device.type, self.dtype), torch.no_grad(): out = self.model(*args, **kwargs) except RuntimeError: out = self.model(*args, **kwargs) @@ -232,10 +291,12 @@ def _call_model(self, *args, **kwargs): def _init_warmup(self): # warmup, the first 2 forwards of an IPEX model include some preprocessing steps and # the results of the compute are unpredictable - use_cache = "past_key_values" in self.input_names - dummy_inputs = prepare_jit_inputs(self, self.export_feature, use_cache) - for _ in range(2): - self(**dummy_inputs) + # TODO : add warmup for IPEX exported model + if not self._is_ipex_exported: + use_cache = "past_key_values" in self.input_names + dummy_inputs = prepare_jit_inputs(self, self.export_feature, use_cache) + for _ in range(2): + self(**dummy_inputs) class IPEXModelForSequenceClassification(IPEXModel): @@ -334,10 +395,10 @@ def __init__( ): # Perform the initial warmup at the end of __init__ super().__init__(model, config, model_save_dir=model_save_dir, warmup=False) + GenerationMixin.__init__(self) model_type = config.model_type.replace("_", "-") self.normalized_config = NormalizedConfigManager.get_normalized_config_class(model_type)(config) - self.model_dtype = kwargs.get("model_dtype", self.dtype) self.use_cache = "past_key_values" in self.input_names if use_cache ^ self.use_cache: @@ -357,7 +418,15 @@ def __init__( ) except AttributeError: self.model_cls = get_model_class(self.config, AutoModelForCausalLM._model_mapping) - self._reorder_cache = self.model_cls._reorder_cache.__get__(self) + + if self._is_ipex_exported: + self._reorder_cache = _ipex_reorder_cache + else: + # Check if _reorder_cache is a static method + if isinstance(self.model_cls.__dict__["_reorder_cache"], staticmethod): + self._reorder_cache = self.model_cls._reorder_cache + else: + self._reorder_cache = self.model_cls._reorder_cache.__get__(self) if is_transformers_version(">=", "4.38.0") and model_type in {"llama", "phi", "persimmon"}: self.prepare_inputs_for_generation = _prepare_inputs_for_generation_for_llama @@ -383,7 +452,25 @@ def _prepare_past_key_values(self, input_ids): else: num_attention_heads = self.normalized_config.num_attention_heads - if model_type == "bloom": + if self._is_ipex_exported: + # Indirect access kv cache has a different data layout compared with most transformers model, + # see https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/llm.html#indirect-access-kv-cache + beam_idx_tmp = torch.zeros( + (self.config.max_position_embeddings, input_ids.shape[0]), dtype=torch.long + ).contiguous() + past_key_values = tuple( + [ + ( + torch.zeros(1, 0, 0, 1, dtype=torch.long).contiguous(), + torch.zeros([1, 1, 1, 1]).contiguous(), + torch.zeros([1, 1, 1, 1]).contiguous(), + beam_idx_tmp, + ) + for i in range(num_layers) + ] + ) + return past_key_values + elif model_type == "bloom": shape_key = (batch_size * num_attention_heads, d_k, 0) shape_value = (batch_size * num_attention_heads, 0, d_k) key = torch.empty(size=shape_key, dtype=self.model_dtype, device=self._device) @@ -505,3 +592,23 @@ def _prepare_inputs_for_generation_for_llama( } ) return model_inputs + + +def _ipex_reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor +) -> Tuple[Tuple[torch.Tensor]]: + # Ipex patched model uses indirect access kv cache which has a different shape with other transformers models + if len(past_key_values[0]) == 4 and past_key_values[0][0].shape[-1] == 1: + for layer_past in past_key_values: + layer_past[3][layer_past[0].size(-2) - 1] = beam_idx + return past_key_values + elif len(past_key_values[0]) == 8: + for layer_past in past_key_values: + layer_past[3][layer_past[0].size(-2) - 1] = beam_idx + layer_past[7][layer_past[0].size(-2) - 1] = beam_idx + return past_key_values + else: + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) diff --git a/tests/ipex/test_modeling.py b/tests/ipex/test_modeling.py index 03b7d015d1..68119287d8 100644 --- a/tests/ipex/test_modeling.py +++ b/tests/ipex/test_modeling.py @@ -26,6 +26,7 @@ AutoModelForCausalLM, AutoModelForQuestionAnswering, AutoTokenizer, + GenerationConfig, PretrainedConfig, pipeline, set_seed, @@ -42,6 +43,8 @@ IPEXModelForSequenceClassification, IPEXModelForTokenClassification, ) +from optimum.intel.utils.import_utils import is_ipex_version +from optimum.utils.testing_utils import grid_parameters SEED = 42 @@ -216,6 +219,7 @@ class IPEXModelForCausalLMTest(unittest.TestCase): "mpt", "opt", ) + IPEX_PATCHED_SUPPORTED_ARCHITECTURES = ("llama",) GENERATION_LENGTH = 100 SPEEDUP_CACHE = 1.0 @@ -259,6 +263,41 @@ def test_pipeline(self, model_arch): self.assertEqual(pipe.device, model.device) self.assertTrue(all("This is a sample" in item["generated_text"] for item in outputs)) + @parameterized.expand( + grid_parameters( + { + "model_arch": IPEX_PATCHED_SUPPORTED_ARCHITECTURES, + "use_cache": [True, False], + } + ) + ) + @unittest.skipIf(is_ipex_version("<", "2.5.0"), reason="Only ipex version > 2.3.0 supports ipex model patching") + def test_ipex_patching_beam_search(self, test_name, model_arch, use_cache): + model_id = MODEL_NAMES[model_arch] + set_seed(SEED) + model = IPEXModelForCausalLM.from_pretrained(model_id, export=True, use_cache=use_cache) + self.assertEqual(model.use_cache, use_cache) + trasnformers_model = AutoModelForCausalLM.from_pretrained(model_id) + tokenizer = AutoTokenizer.from_pretrained(model_id) + tokenizer.pad_token = tokenizer.eos_token + # Test with batch_size is 1 and 2. + texts = ["This is a sample", ["This is the first input", "This is the second input"]] + generation_configs = ( + GenerationConfig(max_new_tokens=4, num_beams=2, do_sample=True), + GenerationConfig(max_new_tokens=4, num_beams=4, do_sample=True), + GenerationConfig(max_new_tokens=4, num_beams=8, do_sample=True), + GenerationConfig(max_new_tokens=4, num_beams=32, do_sample=True), + GenerationConfig(max_new_tokens=4, do_sample=not use_cache, top_p=1.0, top_k=5, penalty_alpha=0.6), + GenerationConfig(max_new_tokens=4, do_sample=True, top_p=0.9, top_k=0), + ) + for text in texts: + tokens = tokenizer(text, padding=True, return_tensors="pt") + for generation_config in generation_configs: + outputs = model.generate(**tokens, generation_config=generation_config) + transformers_outputs = trasnformers_model.generate(**tokens, generation_config=generation_config) + self.assertIsInstance(outputs, torch.Tensor) + self.assertEqual(outputs, transformers_outputs) + def test_compare_with_and_without_past_key_values(self): model_id = "echarlaix/tiny-random-gpt2-torchscript" tokenizer = AutoTokenizer.from_pretrained(model_id) From 52ae0a364e597cea458482ef0e3360f08f2bd999 Mon Sep 17 00:00:00 2001 From: Alexander Kozlov Date: Fri, 8 Mar 2024 16:07:15 +0400 Subject: [PATCH 30/64] Updates weight quantization section in the docs (#593) --- docs/source/optimization_ov.mdx | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 51067b0b64..088b78f0d3 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -82,7 +82,17 @@ from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig model = OVModelForCausalLM.from_pretrained( model_id, - export=True, + quantization_config=OVWeightQuantizationConfig(bits=4), +) +``` + +You can tune quantization parameters to achieve a better performance accuracy trade-off as follows: + +```python +from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig + +model = OVModelForCausalLM.from_pretrained( + model_id, quantization_config=OVWeightQuantizationConfig(bits=4, sym=False, ratio=0.8, dataset="ptb"), ) ``` From b7517662e7806ddd52757322b552e9006d89cbda Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:20:28 +0100 Subject: [PATCH 31/64] Remove accelerate and onnxruntime from required dependencies (#590) * Remove accelerate dependency * Add accelerate to import backend mapping * Add eval method to OVModels * add onnxruntime install for OV test * fix test expected int8 --- .github/workflows/test_openvino.yml | 2 +- optimum/intel/__init__.py | 37 +++++++++++++------ optimum/intel/openvino/__init__.py | 6 ++- optimum/intel/openvino/quantization.py | 14 +++++-- optimum/intel/utils/__init__.py | 1 + .../utils/dummy_openvino_and_nncf_objects.py | 6 +-- optimum/intel/utils/import_utils.py | 22 ++++++++++- setup.py | 9 +++-- tests/openvino/test_quantization.py | 20 +++++----- tests/openvino/test_stable_diffusion.py | 23 ++++-------- tests/openvino/test_training.py | 12 +++--- tests/openvino/utils_tests.py | 6 +-- 12 files changed, 97 insertions(+), 61 deletions(-) diff --git a/.github/workflows/test_openvino.yml b/.github/workflows/test_openvino.yml index bf9460c75a..6d709eecfd 100644 --- a/.github/workflows/test_openvino.yml +++ b/.github/workflows/test_openvino.yml @@ -32,7 +32,7 @@ jobs: python -m pip install --upgrade pip # install PyTorch CPU version to avoid installing CUDA packages on GitHub runner without GPU pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu - pip install .[openvino,openvino-tokenizers,nncf,tests,diffusers] + pip install .[openvino,openvino-tokenizers,tests,diffusers] onnxruntime - name: Test with Pytest run: | pytest tests/openvino/ --ignore test_modeling_basic diff --git a/optimum/intel/__init__.py b/optimum/intel/__init__.py index 93a4417bfc..59059d688d 100644 --- a/optimum/intel/__init__.py +++ b/optimum/intel/__init__.py @@ -18,6 +18,7 @@ from transformers.utils import OptionalDependencyNotAvailable, _LazyModule from .utils import ( + is_accelerate_available, is_diffusers_available, is_ipex_available, is_neural_compressor_available, @@ -29,6 +30,7 @@ _import_structure = { "openvino": [], + "utils.dummy_openvino_and_nncf_objects": [], } try: @@ -57,13 +59,19 @@ if not (is_openvino_available() and is_nncf_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - _import_structure["utils.dummy_openvino_and_nncf_objects"] = [ - "OVQuantizer", - "OVTrainer", - "OVTrainingArguments", - ] + _import_structure["utils.dummy_openvino_and_nncf_objects"].extend(["OVQuantizer", "OVTrainingArguments"]) +else: + _import_structure["openvino"].extend(["OVQuantizer", "OVTrainingArguments"]) + + +try: + if not (is_openvino_available() and is_nncf_available() and is_accelerate_available()): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + _import_structure["utils.dummy_openvino_and_nncf_objects"].extend(["OVTrainer"]) else: - _import_structure["openvino"].extend(["OVQuantizer", "OVTrainer", "OVTrainingArguments"]) + _import_structure["openvino"].extend(["OVTrainer"]) + try: if not (is_openvino_available() and is_diffusers_available()): @@ -145,6 +153,7 @@ "INCSeq2SeqTrainer", "INCTrainer", ] + try: if not (is_neural_compressor_available() and is_diffusers_available()): raise OptionalDependencyNotAvailable() @@ -177,13 +186,17 @@ if not (is_openvino_available() and is_nncf_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from .utils.dummy_openvino_and_nncf_objects import ( - OVQuantizer, - OVTrainer, - OVTrainingArguments, - ) + from .utils.dummy_openvino_and_nncf_objects import OVQuantizer, OVTrainingArguments + else: + from .openvino import OVQuantizer, OVTrainingArguments + + try: + if not (is_openvino_available() and is_nncf_available() and is_accelerate_available()): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + from .utils.dummy_openvino_and_nncf_objects import OVTrainer else: - from .openvino import OVQuantizer, OVTrainer, OVTrainingArguments + from .openvino import OVTrainer try: if not (is_openvino_available() and is_diffusers_available()): diff --git a/optimum/intel/openvino/__init__.py b/optimum/intel/openvino/__init__.py index a6227615a2..1df932771a 100644 --- a/optimum/intel/openvino/__init__.py +++ b/optimum/intel/openvino/__init__.py @@ -14,7 +14,7 @@ import logging -from ..utils.import_utils import is_diffusers_available, is_nncf_available +from ..utils.import_utils import is_accelerate_available, is_diffusers_available, is_nncf_available from .utils import ( OV_DECODER_NAME, OV_DECODER_WITH_PAST_NAME, @@ -37,9 +37,11 @@ patch_torch_operators() from .quantization import OVQuantizer - from .trainer import OVTrainer from .training_args import OVTrainingArguments + if is_accelerate_available(): + from .trainer import OVTrainer + from .configuration import OVConfig, OVWeightQuantizationConfig from .modeling import ( diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index d7b88f2be3..cd26f91f22 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -17,7 +17,7 @@ import logging import os from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union +from typing import Any, Callable, Dict, Optional, Tuple, Union import nncf import openvino @@ -56,8 +56,7 @@ if is_datasets_available(): - if TYPE_CHECKING: - from datasets import Dataset + from datasets import Dataset register_module(ignored_algorithms=[])(Conv1D) @@ -147,6 +146,7 @@ def __init__(self, model: transformers.PreTrainedModel, task: Optional[str] = No ) self.task = task or feature self.seed = seed + # TODO : deprecate input_names self.input_names = None signature = inspect.signature(self.model.forward) self._signature_columns = list(signature.parameters.keys()) @@ -526,9 +526,15 @@ def _get_calibration_dataloader( data_collator: Optional[DataCollator] = None, ) -> OVDataLoader: data_collator = data_collator if data_collator is not None else default_data_collator + + if not is_datasets_available() or not isinstance(calibration_dataset, Dataset): + logger.warning( + "`remove_unused_columns` set to `False` as calibration_dataset is not an instance of `datasets.Dataset`" + ) + remove_unused_columns = False + if remove_unused_columns: calibration_dataset = self._remove_unused_columns(calibration_dataset) - self.input_names = calibration_dataset.column_names generator = torch.Generator() generator.manual_seed(self.seed) sampler = RandomSampler(calibration_dataset, generator=generator) diff --git a/optimum/intel/utils/__init__.py b/optimum/intel/utils/__init__.py index 4e7522ee77..d77588f896 100644 --- a/optimum/intel/utils/__init__.py +++ b/optimum/intel/utils/__init__.py @@ -16,6 +16,7 @@ _neural_compressor_version, _torch_version, compare_versions, + is_accelerate_available, is_diffusers_available, is_ipex_available, is_neural_compressor_available, diff --git a/optimum/intel/utils/dummy_openvino_and_nncf_objects.py b/optimum/intel/utils/dummy_openvino_and_nncf_objects.py index 45c390aff2..8ae3135667 100644 --- a/optimum/intel/utils/dummy_openvino_and_nncf_objects.py +++ b/optimum/intel/utils/dummy_openvino_and_nncf_objects.py @@ -27,14 +27,14 @@ def from_pretrained(cls, *args, **kwargs): class OVTrainer(metaclass=DummyObject): - _backends = ["openvino", "nncf"] + _backends = ["openvino", "nncf", "accelerate"] def __init__(self, *args, **kwargs): - requires_backends(self, ["openvino", "nncf"]) + requires_backends(self, ["openvino", "nncf", "accelerate"]) @classmethod def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["openvino", "nncf"]) + requires_backends(cls, ["openvino", "nncf", "accelerate"]) class OVQuantizer(metaclass=DummyObject): diff --git a/optimum/intel/utils/import_utils.py b/optimum/intel/utils/import_utils.py index b4cdce9729..4d6f84aa35 100644 --- a/optimum/intel/utils/import_utils.py +++ b/optimum/intel/utils/import_utils.py @@ -165,6 +165,16 @@ _datasets_available = False +_accelerate_available = importlib.util.find_spec("accelerate") is not None +_accelerate_version = "N/A" + +if _accelerate_available: + try: + _accelerate_version = importlib_metadata.version("accelerate") + except importlib_metadata.PackageNotFoundError: + _accelerate_available = False + + def is_transformers_available(): return _transformers_available @@ -209,6 +219,10 @@ def is_datasets_available(): return _datasets_available +def is_accelerate_available(): + return _accelerate_available + + # This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319 def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): """ @@ -344,6 +358,11 @@ def is_timm_version(operation: str, version: str): `pip install datasets`. Please note that you may need to restart your runtime after installation. """ +ACCELERATE_IMPORT_ERROR = """ +{0} requires the accelerate library but it was not found in your environment. You can install it with pip: +`pip install accelerate`. Please note that you may need to restart your runtime after installation. +""" + BACKENDS_MAPPING = OrderedDict( [ ("diffusers", (is_diffusers_available, DIFFUSERS_IMPORT_ERROR)), @@ -353,8 +372,9 @@ def is_timm_version(operation: str, version: str): ("neural_compressor", (is_neural_compressor_available, NEURAL_COMPRESSOR_IMPORT_ERROR)), ( "intel_extension_for_transformers", - (is_intel_extension_for_transformers_available, NEURAL_COMPRESSOR_IMPORT_ERROR), + (is_intel_extension_for_transformers_available, INTEL_EXTENSION_FOR_TRANSFORMERS_IMPORT_ERROR), ), + ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), ] ) diff --git a/setup.py b/setup.py index a4e67d46c2..d9427f3bbd 100644 --- a/setup.py +++ b/setup.py @@ -18,10 +18,11 @@ "datasets>=1.4.0", "sentencepiece", "scipy", - "accelerate", # transformers 4.29 require accelerate for PyTorch + "onnx", ] TESTS_REQUIRE = [ + "accelerate", "pytest", "parameterized", "Pillow", @@ -41,11 +42,11 @@ QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241"] EXTRAS_REQUIRE = { - "neural-compressor": ["neural-compressor>=2.2.0", "onnx", "onnxruntime<1.15.0"], - "openvino": ["openvino>=2023.3", "onnx", "onnxruntime", "nncf>=2.8.1"], + "neural-compressor": ["neural-compressor>=2.2.0", "onnxruntime<1.15.0", "accelerate"], + "openvino": ["openvino>=2023.3", "nncf>=2.8.1"], "openvino-tokenizers": ["openvino-tokenizers[transformers]"], "nncf": ["nncf>=2.8.1"], - "ipex": ["intel-extension-for-pytorch", "onnx"], + "ipex": ["intel-extension-for-pytorch"], "diffusers": ["diffusers"], "quality": QUALITY_REQUIRE, "tests": TESTS_REQUIRE, diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 0ef89ec8b8..a33e0339f3 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -154,16 +154,16 @@ class OVWeightCompressionTest(unittest.TestCase): # TODO : add models SUPPORTED_ARCHITECTURES_WITH_EXPECTED_8BIT_COMPRESSED_MATMULS = ( (OVModelForSequenceClassification, "hf-internal-testing/tiny-random-bert", 70, 70), - (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 46), + (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 44), ) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 64, 365),) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 388),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 62, 365),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 385),) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTO_COMPRESSED_MATMULS = ( - (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 16, 136), + (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 14, 136), ) SUPPORTED_ARCHITECTURES_STATEFUL_WITH_EXPECTED_8BIT_COMPRESSED_MATMULS = ( - (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 46), + (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 44), ) LOAD_IN_4_BITS_SCOPE = ( @@ -171,7 +171,7 @@ class OVWeightCompressionTest(unittest.TestCase): OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", dict(bits=4, sym=False, group_size=-1, ratio=0.8), - 16, + 14, ), ( OVModelForCausalLM, @@ -182,13 +182,13 @@ class OVWeightCompressionTest(unittest.TestCase): group_size=32, ignored_scope={"names": ["__module.model.transformer.h.2.mlp.c_fc/aten::addmm/MatMul"]}, ), - 6, + 4, ), ( OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", dict(bits=4, sym=False, group_size=-1, ratio=0.8, all_layers=True), - 22, + 18, ), ( OVModelForCausalLM, @@ -201,7 +201,7 @@ class OVWeightCompressionTest(unittest.TestCase): sensitivity_metric="mean_activation_magnitude", dataset="ptb", ), - 16, + 14, ), ( OVModelForCausalLM, @@ -215,7 +215,7 @@ class OVWeightCompressionTest(unittest.TestCase): dataset="ptb", awq=True, ), - 16, + 14, ), ) diff --git a/tests/openvino/test_stable_diffusion.py b/tests/openvino/test_stable_diffusion.py index d8cef2e027..ab6f6f21a6 100644 --- a/tests/openvino/test_stable_diffusion.py +++ b/tests/openvino/test_stable_diffusion.py @@ -28,7 +28,6 @@ from diffusers.utils import load_image from diffusers.utils.testing_utils import floats_tensor from openvino.runtime.ie_api import CompiledModel -from packaging.version import Version, parse from parameterized import parameterized from utils_tests import MODEL_NAMES, SEED @@ -46,13 +45,8 @@ OVModelVaeDecoder, OVModelVaeEncoder, ) -from optimum.onnxruntime import ( - ORTStableDiffusionImg2ImgPipeline, - ORTStableDiffusionInpaintPipeline, - ORTStableDiffusionXLImg2ImgPipeline, - ORTStableDiffusionXLPipeline, -) -from optimum.utils.import_utils import _diffusers_version +from optimum.intel.utils.import_utils import is_diffusers_version +from optimum.utils.import_utils import is_onnxruntime_available F32_CONFIG = {"INFERENCE_PRECISION_HINT": "f32"} @@ -167,7 +161,6 @@ def generate_inputs(self, height=128, width=128, batch_size=1): class OVStableDiffusionImg2ImgPipelineTest(OVStableDiffusionPipelineBaseTest): SUPPORTED_ARCHITECTURES = ("stable-diffusion",) MODEL_CLASS = OVStableDiffusionImg2ImgPipeline - ORT_MODEL_CLASS = ORTStableDiffusionImg2ImgPipeline TASK = "image-to-image" @parameterized.expand(SUPPORTED_ARCHITECTURES) @@ -298,11 +291,13 @@ def test_height_width_properties(self, model_arch: str): class OVStableDiffusionInpaintPipelineTest(OVStableDiffusionPipelineBaseTest): SUPPORTED_ARCHITECTURES = ("stable-diffusion",) MODEL_CLASS = OVStableDiffusionInpaintPipeline - ORT_MODEL_CLASS = ORTStableDiffusionInpaintPipeline TASK = "inpaint" @parameterized.expand(SUPPORTED_ARCHITECTURES) + @unittest.skipIf(not is_onnxruntime_available(), "this test requires onnxruntime") def test_compare_diffusers_pipeline(self, model_arch: str): + from optimum.onnxruntime import ORTStableDiffusionInpaintPipeline + model_id = MODEL_NAMES[model_arch] pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True, ov_config=F32_CONFIG) batch_size, num_images, height, width = 1, 1, 64, 64 @@ -329,7 +324,7 @@ def test_compare_diffusers_pipeline(self, model_arch: str): outputs = pipeline(**inputs, latents=latents).images self.assertEqual(outputs.shape, (batch_size * num_images, height, width, 3)) - ort_pipeline = self.ORT_MODEL_CLASS.from_pretrained(model_id, export=True) + ort_pipeline = ORTStableDiffusionInpaintPipeline.from_pretrained(model_id, export=True) ort_outputs = ort_pipeline(**inputs, latents=latents).images self.assertTrue(np.allclose(outputs, ort_outputs, atol=1e-1)) @@ -358,7 +353,6 @@ def generate_inputs(self, height=128, width=128, batch_size=1): class OVtableDiffusionXLPipelineTest(unittest.TestCase): SUPPORTED_ARCHITECTURES = ("stable-diffusion-xl",) MODEL_CLASS = OVStableDiffusionXLPipeline - ORT_MODEL_CLASS = ORTStableDiffusionXLPipeline PT_MODEL_CLASS = StableDiffusionXLPipeline TASK = "text-to-image" @@ -444,7 +438,6 @@ def test_num_images_per_prompt_static_model(self, model_arch: str): class OVStableDiffusionXLImg2ImgPipelineTest(unittest.TestCase): SUPPORTED_ARCHITECTURES = ("stable-diffusion-xl", "stable-diffusion-xl-refiner") MODEL_CLASS = OVStableDiffusionXLImg2ImgPipeline - ORT_MODEL_CLASS = ORTStableDiffusionXLImg2ImgPipeline PT_MODEL_CLASS = StableDiffusionXLImg2ImgPipeline TASK = "image-to-image" @@ -489,7 +482,7 @@ class OVLatentConsistencyModelPipelineTest(unittest.TestCase): TASK = "text-to-image" @parameterized.expand(SUPPORTED_ARCHITECTURES) - @unittest.skipIf(parse(_diffusers_version) <= Version("0.21.4"), "not supported with this diffusers version") + @unittest.skipIf(is_diffusers_version("<=", "0.21.4"), "not supported with this diffusers version") def test_compare_to_diffusers(self, model_arch: str): ov_pipeline = self.MODEL_CLASS.from_pretrained(MODEL_NAMES[model_arch], export=True, ov_config=F32_CONFIG) self.assertIsInstance(ov_pipeline.text_encoder, OVModelTextEncoder) @@ -532,7 +525,7 @@ def test_compare_to_diffusers(self, model_arch: str): self.assertEqual(pipeline.device.type, ov_pipeline.device) @parameterized.expand(SUPPORTED_ARCHITECTURES) - @unittest.skipIf(parse(_diffusers_version) <= Version("0.21.4"), "not supported with this diffusers version") + @unittest.skipIf(is_diffusers_version("<=", "0.21.4"), "not supported with this diffusers version") def test_num_images_per_prompt_static_model(self, model_arch: str): model_id = MODEL_NAMES[model_arch] pipeline = self.MODEL_CLASS.from_pretrained(model_id, export=True, compile=False, dynamic_shapes=False) diff --git a/tests/openvino/test_training.py b/tests/openvino/test_training.py index 937c0bf3f5..80298faf2b 100644 --- a/tests/openvino/test_training.py +++ b/tests/openvino/test_training.py @@ -365,7 +365,7 @@ def tearDown(self): "default_quantization,structured_movement_sparsity": OVTrainerTestDescriptor( model_id="hf-internal-testing/tiny-random-bert", nncf_compression_config=[DEFAULT_QUANTIZATION_CONFIG, STRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT], - expected_fake_quantize=44, + expected_fake_quantize=34, expected_int8=32, expected_binary_masks=60, compression_metrics=["compression_loss"], @@ -376,7 +376,7 @@ def tearDown(self): CUSTOMIZED_QUANTIZATION_CONFIG, STRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT, ], - expected_fake_quantize=44, + expected_fake_quantize=34, expected_int8=32, expected_binary_masks=60, compression_metrics=["compression_loss"], @@ -385,7 +385,7 @@ def tearDown(self): model_id="hf-internal-testing/tiny-random-bert", teacher_model_id="hf-internal-testing/tiny-random-bert", nncf_compression_config=[DEFAULT_QUANTIZATION_CONFIG, STRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT], - expected_fake_quantize=44, + expected_fake_quantize=34, expected_int8=32, expected_binary_masks=60, compression_metrics=["compression_loss", "distillation_loss", "task_loss"], @@ -397,7 +397,7 @@ def tearDown(self): CUSTOMIZED_QUANTIZATION_CONFIG, STRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_BERT, ], - expected_fake_quantize=44, + expected_fake_quantize=34, expected_int8=32, expected_binary_masks=60, compression_metrics=["compression_loss", "distillation_loss", "task_loss"], @@ -749,7 +749,7 @@ def check_ovmodel_reshaping(self, ovmodel: OVModel): "quantization,structured_movement_sparsity": OVTrainerTestDescriptor( model_id="hf-internal-testing/tiny-random-Wav2Vec2Model", nncf_compression_config=[QUANTIZATION_CONFIG_FOR_WAV2VEC2, STRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_WAV2VEC2], - expected_fake_quantize=48, + expected_fake_quantize=40, expected_int8=30, expected_binary_masks=48, compression_metrics=["compression_loss"], @@ -766,7 +766,7 @@ def check_ovmodel_reshaping(self, ovmodel: OVModel): model_id="hf-internal-testing/tiny-random-Wav2Vec2Model", teacher_model_id="hf-internal-testing/tiny-random-Wav2Vec2Model", nncf_compression_config=[QUANTIZATION_CONFIG_FOR_WAV2VEC2, STRUCTURED_MOVEMENT_SPARSITY_CONFIG_FOR_WAV2VEC2], - expected_fake_quantize=48, + expected_fake_quantize=40, expected_int8=30, expected_binary_masks=48, compression_metrics=["compression_loss", "distillation_loss", "task_loss"], diff --git a/tests/openvino/utils_tests.py b/tests/openvino/utils_tests.py index 8fabb34e38..04049172d3 100644 --- a/tests/openvino/utils_tests.py +++ b/tests/openvino/utils_tests.py @@ -102,12 +102,12 @@ SEED = 42 _ARCHITECTURES_TO_EXPECTED_INT8 = { - "bert": (70,), + "bert": (68,), "roberta": (68,), "albert": (84,), "vit": (64,), "blenderbot": (70,), - "gpt2": (46,), + "gpt2": (44,), "wav2vec2": (34,), "distilbert": (66,), "t5": (64, 104, 84), @@ -116,7 +116,7 @@ "stable-diffusion-xl-refiner": (366, 34, 42, 66), } -_ARCHITECTURES_TO_EXPECTED_INT4_INT8 = {"opt125m": (64, 477)} +_ARCHITECTURES_TO_EXPECTED_INT4_INT8 = {"opt125m": (62, 477)} def get_num_quantized_nodes(ov_model): From 7674e33ecaeb645445fbcdc7faaeef91f8e699e4 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Mon, 11 Mar 2024 11:04:56 +0100 Subject: [PATCH 32/64] Fix OpenVINO image classification examples (#598) --- .../run_image_classification.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/examples/openvino/image-classification/run_image_classification.py b/examples/openvino/image-classification/run_image_classification.py index 8a7c009e46..5f98d95cb5 100644 --- a/examples/openvino/image-classification/run_image_classification.py +++ b/examples/openvino/image-classification/run_image_classification.py @@ -151,12 +151,12 @@ class ModelArguments: metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) feature_extractor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) - use_auth_token: bool = field( - default=False, + token: str = field( + default=None, metadata={ "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." + "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " + "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) @@ -239,8 +239,7 @@ def main(): data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, - task="image-classification", - use_auth_token=True if model_args.use_auth_token else None, + token=model_args.token, ) else: data_files = {} @@ -252,7 +251,6 @@ def main(): "imagefolder", data_files=data_files, cache_dir=model_args.cache_dir, - task="image-classification", ) # If we don't have a validation split, split off a percentage of train as validation. @@ -287,7 +285,7 @@ def compute_metrics(p): finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=model_args.token, ) model = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path, @@ -295,7 +293,7 @@ def compute_metrics(p): config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=model_args.token, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) @@ -311,7 +309,7 @@ def compute_metrics(p): model_args.feature_extractor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, + token=model_args.token, ) # Define torchvision transforms to be applied to each image. From 1e734508ccbb801a951725ab043af733b3f4167c Mon Sep 17 00:00:00 2001 From: Ekaterina Aidova Date: Mon, 11 Mar 2024 19:24:47 +0400 Subject: [PATCH 33/64] Fix weights compression for OPenVINO models (#596) * hot fix for weights compression * rewrite mcok tests --- optimum/intel/openvino/modeling_decoder.py | 8 +-- tests/openvino/test_quantization.py | 64 ++++++++++++++++------ 2 files changed, 50 insertions(+), 22 deletions(-) diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 92a2ce436d..3d9671caf1 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -261,10 +261,10 @@ def _from_transformers( task = task + "-with-past" # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: - ov_config = None + if load_in_8bit is None and not quantization_config: + ov_export_config = None else: - ov_config = OVConfig(dtype="fp32") + ov_export_config = OVConfig(dtype="fp32") stateful = kwargs.pop("stateful", ensure_stateful_is_available(warn=False) and use_cache) @@ -279,7 +279,7 @@ def _from_transformers( local_files_only=local_files_only, force_download=force_download, trust_remote_code=trust_remote_code, - ov_config=ov_config, + ov_config=ov_export_config, stateful=stateful, ) diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index a33e0339f3..57c45df6ec 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -459,36 +459,64 @@ def test_ovmodel_load_with_uncompressed_weights(self, model_cls, model_type): self.assertEqual(0, num_int8) def test_ovmodel_load_large_model_with_default_compressed_weights(self): - with unittest.mock.patch("transformers.modeling_utils.ModuleUtilsMixin") as model_mixin_patch: - model_mixin_patch.num_parameters.return_value = 2e9 + with unittest.mock.patch("torch.nn.Module.parameters") as model_parameters: + mock_tensor = unittest.mock.Mock() + mock_tensor.numel = lambda: 2000000000 + mock_tensor.requires_grad = True + model_parameters.return_value = [mock_tensor] with unittest.mock.patch("openvino.runtime.ie_api.Core.read_model") as core_patch: with unittest.mock.patch("optimum.exporters.openvino.convert._save_model") as save_model_patch: _ = OVModelForCausalLM.from_pretrained( MODEL_NAMES["llama"], export=True, compile=False, use_cache=False ) - saving_params = { - "model": unittest.mock.ANY, - "path": unittest.mock.ANY, - "compression_option": "int8", - "compression_ratio": None, - } - save_model_patch.aasert_called_with(saving_params) + save_model_patch.assert_called_with( + unittest.mock.ANY, unittest.mock.ANY, ov_config=OVConfig(quantization_config={"bits": 8}) + ) def test_ovmodel_load_large_model_with_uncompressed_weights(self): - with unittest.mock.patch("transformers.modeling_utils.ModuleUtilsMixin") as model_mixin_patch: - model_mixin_patch.num_parameters.return_value = 2e9 + with unittest.mock.patch("torch.nn.Module.parameters") as model_parameters: + mock_tensor = unittest.mock.Mock() + mock_tensor.numel = lambda: 2000000000 + mock_tensor.requires_grad = True + model_parameters.return_value = [mock_tensor] with unittest.mock.patch("openvino.runtime.ie_api.Core.read_model") as core_patch: with unittest.mock.patch("optimum.exporters.openvino.convert._save_model") as save_model_patch: _ = OVModelForCausalLM.from_pretrained( MODEL_NAMES["llama"], export=True, load_in_8bit=False, compile=False, use_cache=False ) - saving_params = { - "model": unittest.mock.ANY, - "path": unittest.mock.ANY, - "compression_option": "fp32", - "compression_ratio": None, - } - save_model_patch.aasert_called_with(saving_params) + save_model_patch.assert_called_with( + unittest.mock.ANY, unittest.mock.ANY, ov_config=OVConfig(dtype="fp32") + ) + + def test_ovmodel_load_large_model_with_additional_quantization_config(self): + with unittest.mock.patch("torch.nn.Module.parameters") as model_parameters: + mock_tensor = unittest.mock.Mock() + mock_tensor.numel = lambda: 2000000000 + mock_tensor.requires_grad = True + with unittest.mock.patch("openvino.runtime.ie_api.Core.read_model") as core_patch: + with unittest.mock.patch("optimum.exporters.openvino.convert._save_model") as save_model_patch: + with unittest.mock.patch("nncf.compress_weights") as compress_weights_patch: + _ = OVModelForCausalLM.from_pretrained( + MODEL_NAMES["llama"], + export=True, + compile=False, + use_cache=False, + quantization_config=OVWeightQuantizationConfig(bits=4, sym=True, group_size=-1, ratio=0.8), + ) + # quantization will be performed later, using load_model + save_model_patch.assert_called_with( + unittest.mock.ANY, unittest.mock.ANY, ov_config=OVConfig(dtype="fp32") + ) + compression_params = { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "ratio": 0.8, + "group_size": -1, + "all_layers": None, + "sensitivity_metric": None, + "dataset": None, + "ignored_scope": None, + } + compress_weights_patch.assert_called_with(unittest.mock.ANY, **compression_params) class OVQuantizerQATest(unittest.TestCase): From dc14a2bb64b757a72b5d6944d285d9bad7ef0271 Mon Sep 17 00:00:00 2001 From: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:37:19 +0100 Subject: [PATCH 34/64] Fix default ov config (#600) --- optimum/intel/openvino/modeling.py | 4 ++-- optimum/intel/openvino/modeling_base.py | 4 ++-- optimum/intel/openvino/modeling_base_seq2seq.py | 4 ++-- optimum/intel/openvino/modeling_decoder.py | 2 +- optimum/intel/openvino/modeling_diffusion.py | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/optimum/intel/openvino/modeling.py b/optimum/intel/openvino/modeling.py index 7831305d5f..357ca94c07 100644 --- a/optimum/intel/openvino/modeling.py +++ b/optimum/intel/openvino/modeling.py @@ -434,8 +434,8 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index af00f7a06e..7ab99aab42 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -314,8 +314,8 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") diff --git a/optimum/intel/openvino/modeling_base_seq2seq.py b/optimum/intel/openvino/modeling_base_seq2seq.py index 3cb43e61b8..28e112c4d9 100644 --- a/optimum/intel/openvino/modeling_base_seq2seq.py +++ b/optimum/intel/openvino/modeling_base_seq2seq.py @@ -258,8 +258,8 @@ def _from_transformers( if use_cache: task = task + "-with-past" - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index 3d9671caf1..edc88d02cb 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -260,7 +260,7 @@ def _from_transformers( if use_cache: task = task + "-with-past" - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size if load_in_8bit is None and not quantization_config: ov_export_config = None else: diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index 5e8a0cdc59..a985f43d7c 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -321,8 +321,8 @@ def _from_transformers( save_dir = TemporaryDirectory() save_dir_path = Path(save_dir.name) - # If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size - if load_in_8bit is None or not quantization_config: + # If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size + if load_in_8bit is None and not quantization_config: ov_config = None else: ov_config = OVConfig(dtype="fp32") From de243f018ee4002aa26733f7388d8dda45211426 Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Tue, 12 Mar 2024 11:20:13 +0100 Subject: [PATCH 35/64] Add warning for transformers>=4.38 and OpenVINO 2024.0 (#599) * Add warning for transformers>=4.38 and OpenVINO 2024.0 * Use is_openvino_version to compare versions * Show version warning only for llama and gpt-bigcode * Fix style, show OpenVINO version * Include affected model types in warning message --- optimum/exporters/openvino/model_patcher.py | 23 +++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/optimum/exporters/openvino/model_patcher.py b/optimum/exporters/openvino/model_patcher.py index aea57161e2..91dc48df05 100644 --- a/optimum/exporters/openvino/model_patcher.py +++ b/optimum/exporters/openvino/model_patcher.py @@ -15,21 +15,24 @@ import logging as log from optimum.intel.utils.import_utils import ( + _openvino_version, _torch_version, _transformers_version, + is_openvino_version, is_torch_version, is_transformers_version, ) def patch_model_with_bettertransformer(model): + COLOR_RED = "\033[1;31m" + COLOR_RESET = "\033[0m" + # check that the model has not yet been pathced if hasattr(model, "use_bettertransformer") and model.use_bettertransformer is True: return model if is_transformers_version("<", "4.36") or is_torch_version("<", "2.1.1"): - COLOR_RED = "\033[1;31m" - COLOR_RESET = "\033[0m" log.warn( COLOR_RED + "[WARNING] For good performance with stateful models, transformers>=4.36.2 and PyTorch>=2.1.1 are required. " @@ -39,6 +42,22 @@ def patch_model_with_bettertransformer(model): + COLOR_RESET ) + if ( + getattr(model.config, "model_type") in {"gpt_bigcode", "llama"} + and is_transformers_version(">=", "4.38") + and is_openvino_version("<", "2024.1.0-14612") + ): + # display commit-id only when a nightly/prerelease of OpenVINO is installed. + display_version = ( + _openvino_version.split("-")[0] if is_openvino_version("<=", "2024.0.0-14509") else _openvino_version + ) + log.warn( + COLOR_RED + f"[WARNING] Stateful models are not supported for Llama and GPTBigCode with Transformers " + f"{_transformers_version} and OpenVINO {display_version}. For good performance, consider using a nightly OpenVINO build: " + "https://docs.openvino.ai/2024/get-started/install-openvino.html. For models that do not need transformers " + "4.38+, it is also an option to downgrade transformers: `pip install transformers==4.37.2`" + COLOR_RESET + ) + # model already has required SDPA implementation if getattr(model, "_supports_sdpa", False) and getattr(model.config, "_attn_implementation", "eager") == "sdpa": return model From 345f9e51eddb589ecfd053075e94b41c0d2d413a Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 12 Mar 2024 10:40:33 +0000 Subject: [PATCH 36/64] Add hybrid quantization for StableDiffusion pipelines (#584) * Add hybrid quantization for StableDiffusion pipelines * apply black * fix tests * fix ruff * fix lcm bug * apply review comments * rework dataset processing * Add doc * remove SDXL test * Apply comments * reformat --- docs/source/optimization_ov.mdx | 17 ++++ optimum/intel/openvino/configuration.py | 37 ++++--- optimum/intel/openvino/modeling_decoder.py | 3 +- optimum/intel/openvino/modeling_diffusion.py | 101 ++++++++++++++++++- optimum/intel/openvino/quantization.py | 95 ++++++++++++++++- optimum/intel/openvino/utils.py | 7 ++ tests/openvino/test_quantization.py | 45 ++++++++- tests/openvino/utils_tests.py | 6 +- 8 files changed, 283 insertions(+), 28 deletions(-) diff --git a/docs/source/optimization_ov.mdx b/docs/source/optimization_ov.mdx index 088b78f0d3..70c98f14f7 100644 --- a/docs/source/optimization_ov.mdx +++ b/docs/source/optimization_ov.mdx @@ -69,6 +69,23 @@ from optimum.intel import OVModelForCausalLM model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True) ``` +## Hybrid quantization + +Traditional optimization methods like post-training 8-bit quantization do not work well for Stable Diffusion models and can lead to poor generation results. On the other hand, weight compression does not improve performance significantly when applied to Stable Diffusion models, as the size of activations is comparable to weights. +The UNet model takes up most of the overall execution time of the pipeline. Thus, optimizing just one model brings substantial benefits in terms of inference speed while keeping acceptable accuracy without fine-tuning. Quantizing the rest of the diffusion pipeline does not significantly improve inference performance but could potentially lead to substantial degradation of accuracy. +Therefore, the proposal is to apply quantization in *hybrid mode* for the UNet model and weight-only quantization for the rest of the pipeline components. The hybrid mode involves the quantization of weights in MatMul and Embedding layers, and activations of other layers, facilitating accuracy preservation post-optimization while reducing the model size. +The `quantization_config` is utilized to define optimization parameters for optimizing the Stable Diffusion pipeline. To enable hybrid quantization, specify the quantization dataset in the `quantization_config`. Otherwise, weight-only quantization to a specified data type (8 tr 4 bits) is applied to UNet model. + +```python +from optimum.intel import OVStableDiffusionPipeline, OVWeightQuantizationConfig + +model = OVStableDiffusionPipeline.from_pretrained( + model_id, + export=True, + quantization_config=OVWeightQuantizationConfig(bits=8, dataset="conceptual_captions"), +) +``` + `load_in_8bit` is enabled by default for the models larger than 1 billion parameters. diff --git a/optimum/intel/openvino/configuration.py b/optimum/intel/openvino/configuration.py index 8ddd005279..40a60bb58e 100644 --- a/optimum/intel/openvino/configuration.py +++ b/optimum/intel/openvino/configuration.py @@ -167,7 +167,7 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): bits (`int`, defaults to 8): The number of bits to quantize to. - sym (`bool`, *optional*, defaults to `False`): + sym (`bool`, defaults to `False`): Whether to use symetric quantization. tokenizer (`str` or `PreTrainedTokenizerBase`, *optional*): The tokenizer used to process the dataset. You can pass either: @@ -177,23 +177,24 @@ class OVWeightQuantizationConfig(QuantizationConfigMixin): user or organization name, like `dbmdz/bert-base-german-cased`. - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. - dataset (`Union[List[str]]`, *optional*): - The dataset used for data-aware compression. You can provide your own dataset in a list of string or just use the - the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] - group_size (`int`, *optional*, defaults to 128): - The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. - ratio (`float`, *optional*, defaults to 1.0): + dataset (`str or List[str]`, *optional*): + The dataset used for data-aware compression or quantization with NNCF. You can provide your own dataset + in a list of strings or just use the one from the list ['wikitext2','c4','c4-new','ptb','ptb-new'] for LLLMs + or ['conceptual_captions','laion/220k-GPT4Vision-captions-from-LIVIS','laion/filtered-wit'] for diffusion models. + ratio (`float`, defaults to 1.0): The ratio between baseline and backup precisions (e.g. 0.9 means 90% of layers quantized to INT4_ASYM and the rest to INT8_ASYM). + group_size (`int`, *optional*): + The group size to use for quantization. Recommended value is 128 and -1 uses per-column quantization. all_layers (`bool`, *optional*): Defines how many layers are compressed to 4-bits while the rest are kept in 8-bit presicion. - sensitivity_metric (`nncf.SensitivityMetric`, *optional*): + sensitivity_metric (`str`, *optional*): The sensitivity metric for assigning quantization precision to layers. In order to preserve the accuracy of the model, the more sensitive layers receives a higher precision. - awq (`bool`, *optional*): - Enables AWQ method to unify weight ranges and improve overall model accuracy. - ignored_scope (`nncf.IgnoredScope`, *optional*): + ignored_scope (`dict`, *optional*): An ignored scope that defined the list of model control flow graph nodes to be ignored during quantization. + num_samples (`int`, *optional*): + The maximum number of samples composing the calibration dataset. """ @@ -202,12 +203,13 @@ def __init__( bits: int = 8, sym: bool = False, tokenizer: Optional[Any] = None, - dataset: Optional[str] = None, + dataset: Optional[Union[str, List[str]]] = None, ratio: float = 1.0, group_size: Optional[int] = None, all_layers: Optional[bool] = None, sensitivity_metric: Optional[str] = None, ignored_scope: Optional[dict] = None, + num_samples: Optional[int] = None, **kwargs, ): self.bits = bits @@ -219,6 +221,7 @@ def __init__( self.all_layers = all_layers self.sensitivity_metric = sensitivity_metric self.ignored_scope = ignored_scope + self.num_samples = num_samples self.quant_method = "default" # TODO : enable AWQ after nncf v2.9.0 release self.post_init() @@ -231,10 +234,16 @@ def post_init(self): if self.group_size is not None and self.group_size != -1 and self.group_size <= 0: raise ValueError("`group_size` must be greater than 0 or equal to -1") if self.dataset is not None and isinstance(self.dataset, str): - if self.dataset not in ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"]: + llm_datasets = ["wikitext2", "c4", "c4-new", "ptb", "ptb-new"] + stable_diffusion_datasets = [ + "conceptual_captions", + "laion/220k-GPT4Vision-captions-from-LIVIS", + "laion/filtered-wit", + ] + if self.dataset not in llm_datasets + stable_diffusion_datasets: raise ValueError( f"""You have entered a string value for dataset. You can only choose between - ['wikitext2','c4','c4-new','ptb','ptb-new'], but we found {self.dataset}""" + {llm_datasets} for LLLMs or {stable_diffusion_datasets} for diffusion models, but we found {self.dataset}""" ) if self.bits not in [4, 8]: diff --git a/optimum/intel/openvino/modeling_decoder.py b/optimum/intel/openvino/modeling_decoder.py index edc88d02cb..53aa05bc5a 100644 --- a/optimum/intel/openvino/modeling_decoder.py +++ b/optimum/intel/openvino/modeling_decoder.py @@ -635,7 +635,8 @@ def _from_pretrained( # from optimum.gptq.utils import get_seqlen # seqlen = get_seqlen(causal_model) - dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32) + nsamples = quantization_config.num_samples if quantization_config.num_samples else 128 + dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32, nsamples=nsamples) dataset = prepare_dataset(dataset) quantization_config = copy.deepcopy(quantization_config) quantization_config.dataset = nncf.Dataset(dataset, lambda x: causal_model.prepare_inputs(**x)) diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index a985f43d7c..c0588a6f11 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -16,6 +16,7 @@ import logging import os import shutil +from copy import deepcopy from pathlib import Path from tempfile import TemporaryDirectory, gettempdir from typing import Any, Dict, List, Optional, Union @@ -57,7 +58,13 @@ from .configuration import OVConfig, OVWeightQuantizationConfig from .loaders import OVTextualInversionLoaderMixin from .modeling_base import OVBaseModel -from .utils import ONNX_WEIGHTS_NAME, OV_TO_NP_TYPE, OV_XML_FILE_NAME, _print_compiled_model_properties +from .utils import ( + ONNX_WEIGHTS_NAME, + OV_TO_NP_TYPE, + OV_XML_FILE_NAME, + PREDEFINED_SD_DATASETS, + _print_compiled_model_properties, +) core = Core() @@ -274,9 +281,19 @@ def _from_pretrained( kwargs[name] = load_method(new_model_save_dir) quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit) - unet = cls.load_model( - new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config - ) + + unet_path = new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name + if quantization_config is not None and quantization_config.dataset is not None: + # load the UNet model uncompressed to apply hybrid quantization further + unet = cls.load_model(unet_path) + # Apply weights compression to other `components` without dataset + weight_quantization_params = { + param: value for param, value in quantization_config.__dict__.items() if param != "dataset" + } + weight_quantization_config = OVWeightQuantizationConfig.from_dict(weight_quantization_params) + else: + weight_quantization_config = quantization_config + unet = cls.load_model(unet_path, weight_quantization_config) components = { "vae_encoder": new_model_save_dir / DIFFUSION_MODEL_VAE_ENCODER_SUBFOLDER / vae_encoder_file_name, @@ -286,11 +303,29 @@ def _from_pretrained( } for key, value in components.items(): - components[key] = cls.load_model(value, quantization_config) if value.is_file() else None + components[key] = cls.load_model(value, weight_quantization_config) if value.is_file() else None if model_save_dir is None: model_save_dir = new_model_save_dir + if quantization_config is not None and quantization_config.dataset is not None: + sd_model = cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs) + + supported_pipelines = ( + OVStableDiffusionPipeline, + OVStableDiffusionXLPipeline, + OVLatentConsistencyModelPipeline, + ) + if not isinstance(sd_model, supported_pipelines): + raise NotImplementedError(f"Quantization in hybrid mode is not supported for {cls.__name__}") + + nsamples = quantization_config.num_samples if quantization_config.num_samples else 200 + unet_inputs = sd_model._prepare_unet_inputs(quantization_config.dataset, nsamples) + + from .quantization import _hybrid_quantization + + unet = _hybrid_quantization(sd_model.unet.model, weight_quantization_config, dataset=unet_inputs) + return cls( unet=unet, config=config, @@ -300,6 +335,62 @@ def _from_pretrained( **kwargs, ) + def _prepare_unet_inputs( + self, + dataset: Union[str, List[Any]], + num_samples: int, + height: Optional[int] = None, + width: Optional[int] = None, + seed: Optional[int] = 42, + **kwargs, + ) -> Dict[str, Any]: + self.compile() + + size = self.unet.config.get("sample_size", 64) * self.vae_scale_factor + height = height or min(size, 512) + width = width or min(size, 512) + + if isinstance(dataset, str): + dataset = deepcopy(dataset) + available_datasets = PREDEFINED_SD_DATASETS.keys() + if dataset not in available_datasets: + raise ValueError( + f"""You have entered a string value for dataset. You can only choose between + {list(available_datasets)}, but the {dataset} was found""" + ) + + from datasets import load_dataset + + dataset_metadata = PREDEFINED_SD_DATASETS[dataset] + dataset = load_dataset(dataset, split=dataset_metadata["split"], streaming=True).shuffle(seed=seed) + input_names = dataset_metadata["inputs"] + dataset = dataset.select_columns(list(input_names.values())) + + def transform_fn(data_item): + return {inp_name: data_item[column] for inp_name, column in input_names.items()} + + else: + + def transform_fn(data_item): + return data_item if isinstance(data_item, (list, dict)) else [data_item] + + from .quantization import InferRequestWrapper + + calibration_data = [] + self.unet.request = InferRequestWrapper(self.unet.request, calibration_data) + + for inputs in dataset: + inputs = transform_fn(inputs) + if isinstance(inputs, dict): + self.__call__(**inputs, height=height, width=width) + else: + self.__call__(*inputs, height=height, width=width) + if len(calibration_data) > num_samples: + break + + self.unet.request = self.unet.request.request + return calibration_data[:num_samples] + @classmethod def _from_transformers( cls, diff --git a/optimum/intel/openvino/quantization.py b/optimum/intel/openvino/quantization.py index cd26f91f22..c46f29092b 100644 --- a/optimum/intel/openvino/quantization.py +++ b/optimum/intel/openvino/quantization.py @@ -16,6 +16,7 @@ import inspect import logging import os +from collections import deque from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple, Union @@ -24,6 +25,7 @@ import torch import transformers from nncf import CompressWeightsMode, IgnoredScope, NNCFConfig, SensitivityMetric +from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters from nncf.torch import create_compressed_model, register_default_init_args, register_module from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk from nncf.torch.initialization import PTInitializingDataLoader @@ -550,7 +552,7 @@ def _remove_unused_columns(self, dataset: "Dataset"): def _weight_only_quantization( model: openvino.runtime.Model, quantization_config: Union[OVWeightQuantizationConfig, Dict] -): +) -> openvino.runtime.Model: config = quantization_config if isinstance(config, dict): config = OVWeightQuantizationConfig.from_dict(quantization_config) @@ -564,7 +566,8 @@ def _weight_only_quantization( from optimum.gptq.data import get_dataset, prepare_dataset - dataset = get_dataset(config.dataset, tokenizer, seqlen=32) + nsamples = config.num_samples if config.num_samples else 128 + dataset = get_dataset(config.dataset, tokenizer, seqlen=32, nsamples=nsamples) dataset = prepare_dataset(dataset) sensitivity_metric = None @@ -590,4 +593,92 @@ def _weight_only_quantization( # awq=config.quant_method == "awq", # TODO : remove and add it back once nncf v2.9.0 ignored_scope=ignored_scope, dataset=dataset, + # subset_size=config.num_samples if config.num_samples else 128, # TODO : enable from nncf v2.9.0 ) + + +def _get_operation_const_op(operation, const_port_id: int): + node = operation.input_value(const_port_id).get_node() + queue = deque([node]) + constant_node = None + allowed_propagation_types_list = ["Convert", "FakeQuantize", "Reshape"] + + while len(queue) != 0: + curr_node = queue.popleft() + if curr_node.get_type_name() == "Constant": + constant_node = curr_node + break + if len(curr_node.inputs()) == 0: + break + if curr_node.get_type_name() in allowed_propagation_types_list: + queue.append(curr_node.input_value(0).get_node()) + + return constant_node + + +def _is_embedding(node) -> bool: + allowed_types_list = ["f16", "f32", "f64"] + const_port_id = 0 + input_tensor = node.input_value(const_port_id) + if input_tensor.get_element_type().get_type_name() in allowed_types_list: + const_node = _get_operation_const_op(node, const_port_id) + if const_node is not None: + return True + + return False + + +def _collect_ops_with_weights(model): + ops_with_weights = [] + for op in model.get_ops(): + if op.get_type_name() == "MatMul": + constant_node_0 = _get_operation_const_op(op, const_port_id=0) + constant_node_1 = _get_operation_const_op(op, const_port_id=1) + if constant_node_0 or constant_node_1: + ops_with_weights.append(op.get_friendly_name()) + if op.get_type_name() == "Gather" and _is_embedding(op): + ops_with_weights.append(op.get_friendly_name()) + + return ops_with_weights + + +def _hybrid_quantization( + model: openvino.runtime.Model, quantization_config: OVWeightQuantizationConfig, dataset: Dict[str, Any] +) -> openvino.runtime.Model: + """ + Quantize a model in hybrid mode with NNCF which means that we quantize: + weights of MatMul and Embedding layers and activations of other layers. + The optimization specifications defined in `quantization_config`. + + Args: + model (`openvino.runtime.Model`): + The OpenVINO Runtime model for applying hybrid quantization. + quantization_config (`OVWeightQuantizationConfig`): + The configuration containing the parameters related to quantization. + dataset (`Dict[str, Any]`): + The dataset used for hybrid quantization. + Returns: + The OpenVINO Runtime model with applied hybrid quantization. + """ + ops_to_compress = _collect_ops_with_weights(model) + + ignored_scope = quantization_config.ignored_scope if isinstance(quantization_config.ignored_scope, dict) else {} + ptq_ignored_scope = nncf.IgnoredScope(**ignored_scope) + ptq_ignored_scope.names += ops_to_compress + + wc_quantization_config = copy.deepcopy(quantization_config) + wc_quantization_config.ignored_scope = ignored_scope + wc_quantization_config.ignored_scope["types"] = ignored_scope.get("types", []) + ["Convolution"] + compressed_model = _weight_only_quantization(model, wc_quantization_config) + + subset_size = quantization_config.num_samples if quantization_config.num_samples else 200 + quantized_model = nncf.quantize( + model=compressed_model, + calibration_dataset=nncf.Dataset(dataset), + model_type=nncf.ModelType.TRANSFORMER, + ignored_scope=ptq_ignored_scope, + # The SQ algo should be disabled for MatMul nodes because their weights are already compressed + advanced_parameters=nncf.AdvancedQuantizationParameters(AdvancedSmoothQuantParameters(matmul=-1)), + subset_size=subset_size, + ) + return quantized_model diff --git a/optimum/intel/openvino/utils.py b/optimum/intel/openvino/utils.py index 49aec81e57..cbcc696393 100644 --- a/optimum/intel/openvino/utils.py +++ b/optimum/intel/openvino/utils.py @@ -99,6 +99,13 @@ } +PREDEFINED_SD_DATASETS = { + "conceptual_captions": {"split": "train", "inputs": {"prompt": "caption"}}, + "laion/220k-GPT4Vision-captions-from-LIVIS": {"split": "train", "inputs": {"prompt": "caption"}}, + "laion/filtered-wit": {"split": "train", "inputs": {"prompt": "caption"}}, +} + + def use_external_data_format(num_parameters: int) -> bool: """ Returns whether or not the model requires using external data format for the ONNX export diff --git a/tests/openvino/test_quantization.py b/tests/openvino/test_quantization.py index 57c45df6ec..c7fb00e12d 100644 --- a/tests/openvino/test_quantization.py +++ b/tests/openvino/test_quantization.py @@ -39,6 +39,7 @@ from optimum.intel import ( OVConfig, + OVLatentConsistencyModelPipeline, OVModelForAudioClassification, OVModelForCausalLM, OVModelForFeatureExtraction, @@ -157,10 +158,10 @@ class OVWeightCompressionTest(unittest.TestCase): (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 44), ) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 62, 365),) - SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 385),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_COMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 62, 86),) + SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS = ((OVModelForCausalLM, "opt125m", 0, 148),) SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTO_COMPRESSED_MATMULS = ( - (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 14, 136), + (OVModelForCausalLM, "hf-internal-testing/tiny-random-OPTForCausalLM", 14, 50), ) SUPPORTED_ARCHITECTURES_STATEFUL_WITH_EXPECTED_8BIT_COMPRESSED_MATMULS = ( (OVModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 44, 44), @@ -233,6 +234,12 @@ class OVWeightCompressionTest(unittest.TestCase): (OVStableDiffusionXLPipeline, "stable-diffusion-xl"), ) + SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION = ( + (OVStableDiffusionPipeline, "stable-diffusion", 72, 195), + (OVStableDiffusionXLPipeline, "stable-diffusion-xl", 84, 331), + (OVLatentConsistencyModelPipeline, "latent-consistency", 50, 135), + ) + IS_SUPPORT_STATEFUL = is_openvino_version(">=", "2023.3") DEFAULT_INT4_CONFIG = {"bits": 4, "sym": True, "group_size": 64, "all_layers": True} @@ -352,6 +359,38 @@ def test_ovmodel_load_with_compressed_weights(self, model_cls, model_type): _, num_int8, _ = get_num_quantized_nodes(model) self.assertEqual(expected_ov_int8[i], num_int8) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION) + def test_ovmodel_hybrid_quantization(self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8): + model_id = MODEL_NAMES[model_type] + quantization_config = OVWeightQuantizationConfig(bits=8, dataset="conceptual_captions", num_samples=2) + with tempfile.TemporaryDirectory() as tmp_dir: + model = model_cls.from_pretrained(model_id, export=True, quantization_config=quantization_config) + + num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet) + self.assertEqual(expected_num_fake_quantize, num_fake_quantize) + self.assertEqual(expected_ov_int8, num_int8) + self.assertEqual(0, num_int4) + + model.save_pretrained(tmp_dir) + + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_HYBRID_QUANTIZATION[-1:]) + def test_ovmodel_hybrid_quantization_with_custom_dataset( + self, model_cls, model_type, expected_num_fake_quantize, expected_ov_int8 + ): + model_id = MODEL_NAMES[model_type] + dataset = [ + "dream rose covered with clean crystal, sharp edges, transparent, beautiful, highly detailed, high render" + ] + model = model_cls.from_pretrained( + model_id, + export=True, + quantization_config=OVWeightQuantizationConfig(bits=8, dataset=dataset, num_samples=3), + ) + num_fake_quantize, num_int8, num_int4 = get_num_quantized_nodes(model.unet) + self.assertEqual(expected_num_fake_quantize, num_fake_quantize) + self.assertEqual(expected_ov_int8, num_int8) + self.assertEqual(0, num_int4) + @parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_4BIT_AUTOCOMPRESSED_MATMULS) @unittest.mock.patch.dict( "optimum.intel.openvino.configuration._DEFAULT_4BIT_CONFIGS", {"facebook/opt-125m": DEFAULT_INT4_CONFIG} diff --git a/tests/openvino/utils_tests.py b/tests/openvino/utils_tests.py index 04049172d3..97c8a92836 100644 --- a/tests/openvino/utils_tests.py +++ b/tests/openvino/utils_tests.py @@ -116,7 +116,7 @@ "stable-diffusion-xl-refiner": (366, 34, 42, 66), } -_ARCHITECTURES_TO_EXPECTED_INT4_INT8 = {"opt125m": (62, 477)} +_ARCHITECTURES_TO_EXPECTED_INT4_INT8 = {"opt125m": (62, 86)} def get_num_quantized_nodes(ov_model): @@ -127,8 +127,8 @@ def get_num_quantized_nodes(ov_model): if "FakeQuantize" in elem.name: num_fake_quantize += 1 for i in range(elem.get_output_size()): - if "8" in elem.get_output_element_type(i).get_type_name(): + if elem.get_output_element_type(i).get_type_name() in ["i8", "u8"]: num_int8 += 1 - if "4" in elem.get_output_element_type(i).get_type_name(): + if elem.get_output_element_type(i).get_type_name() in ["i4", "u4"]: num_int4 += 1 return num_fake_quantize, num_int8, num_int4 From f68486babb15e27c83ca0e998b747c848b3d1cab Mon Sep 17 00:00:00 2001 From: Helena Kloosterman Date: Tue, 12 Mar 2024 18:40:00 +0100 Subject: [PATCH 37/64] Show device name in _print_compiled_model_properties (#541) * Show device name in _print_compiled_model_properties Enable CACHE_DIR also for devices like "GPU:0" * Update optimum/intel/openvino/modeling_seq2seq.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> * Change check for gpu device --------- Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/openvino/modeling_base.py | 2 +- optimum/intel/openvino/modeling_diffusion.py | 2 +- optimum/intel/openvino/modeling_seq2seq.py | 4 ++-- optimum/intel/openvino/utils.py | 8 +++++++- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/optimum/intel/openvino/modeling_base.py b/optimum/intel/openvino/modeling_base.py index 7ab99aab42..15f1fc4f1c 100644 --- a/optimum/intel/openvino/modeling_base.py +++ b/optimum/intel/openvino/modeling_base.py @@ -388,7 +388,7 @@ def compile(self): if ( "CACHE_DIR" not in self.ov_config.keys() and not str(self.model_save_dir).startswith(gettempdir()) - and self._device.lower() == "gpu" + and "gpu" in self._device.lower() ): # Set default CACHE_DIR only if it is not set, if the model is not in a temporary directory, and device is GPU cache_dir = Path(self.model_save_dir).joinpath("model_cache") diff --git a/optimum/intel/openvino/modeling_diffusion.py b/optimum/intel/openvino/modeling_diffusion.py index c0588a6f11..f0fea5a8ce 100644 --- a/optimum/intel/openvino/modeling_diffusion.py +++ b/optimum/intel/openvino/modeling_diffusion.py @@ -669,7 +669,7 @@ def _compile(self): if ( "CACHE_DIR" not in self.ov_config.keys() and not str(self._model_dir).startswith(gettempdir()) - and self.device.lower() == "gpu" + and self.device.lower().split(":")[0] == "gpu" ): self.ov_config["CACHE_DIR"] = os.path.join(self._model_dir, self._model_name, "model_cache") diff --git a/optimum/intel/openvino/modeling_seq2seq.py b/optimum/intel/openvino/modeling_seq2seq.py index 617d898be5..d68cbc75ed 100644 --- a/optimum/intel/openvino/modeling_seq2seq.py +++ b/optimum/intel/openvino/modeling_seq2seq.py @@ -451,7 +451,7 @@ def _compile(self): if ( "CACHE_DIR" not in ov_config.keys() and not str(self.parent_model.model_save_dir).startswith(gettempdir()) - and self._device.lower() == "gpu" + and "gpu" in self._device.lower() ): cache_dir = Path(self.parent_model.model_save_dir).joinpath("model_cache") ov_config["CACHE_DIR"] = str(cache_dir) @@ -563,7 +563,7 @@ def _compile(self): if ( "CACHE_DIR" not in ov_config.keys() and not str(self.parent_model.model_save_dir).startswith(gettempdir()) - and self._device.lower() == "gpu" + and "gpu" in self._device.lower() ): cache_dir = Path(self.parent_model.model_save_dir).joinpath("model_cache") ov_config["CACHE_DIR"] = str(cache_dir) diff --git a/optimum/intel/openvino/utils.py b/optimum/intel/openvino/utils.py index cbcc696393..a0439d2129 100644 --- a/optimum/intel/openvino/utils.py +++ b/optimum/intel/openvino/utils.py @@ -20,7 +20,7 @@ import numpy as np from huggingface_hub import model_info -from openvino.runtime import Type, properties +from openvino.runtime import Core, Type, properties from transformers.onnx.utils import ParameterFormat, compute_serialized_parameters_size @@ -155,3 +155,9 @@ def _print_compiled_model_properties(compiled_model): logger.info(f" {k}: {value}") except Exception: logger.error(f"[error] Get property of '{k}' failed") + try: + logger.info("EXECUTION_DEVICES:") + for device in compiled_model.get_property("EXECUTION_DEVICES"): + logger.info(f" {device}: {Core().get_property(device, 'FULL_DEVICE_NAME')}") + except Exception: + logger.error("[error] Get FULL_DEVICE_NAME failed") From 00cd9036c39cf212dbd2664cf07661db34a37230 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 16:32:23 +0800 Subject: [PATCH 38/64] Update code with comments Signed-off-by: Cheng, Penghui --- .../language-modeling/requirements.txt | 2 +- .../language-modeling/run_clm.py | 9 +++- .../intel/neural_compressor/quantization.py | 14 ++++--- optimum/intel/utils/import_utils.py | 2 +- tests/neural_compressor/test_optimization.py | 41 ++++++++++++++++++- 5 files changed, 58 insertions(+), 10 deletions(-) diff --git a/examples/neural_compressor/language-modeling/requirements.txt b/examples/neural_compressor/language-modeling/requirements.txt index 0e71b6fc46..ec38e83d2d 100644 --- a/examples/neural_compressor/language-modeling/requirements.txt +++ b/examples/neural_compressor/language-modeling/requirements.txt @@ -3,5 +3,5 @@ torch >= 1.9 datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf -intel-extension-for-transformers >=1.3 +intel-extension-for-transformers >= 1.3 peft diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index f3c1b44e57..52234a1a10 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -33,7 +33,6 @@ import torch import transformers from datasets import load_dataset -from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig from neural_compressor import ( DistillationConfig, PostTrainingQuantConfig, @@ -58,7 +57,10 @@ from transformers.utils.versions import require_version from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer +from optimum.intel.utils.import_utils import is_intel_extension_for_transformers_available +if is_intel_extension_for_transformers_available(): + from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -626,6 +628,11 @@ def compute_metrics(eval_preds): else: recipes = {} if optim_args.quantization_approach == "weight_only": + if not is_intel_extension_for_transformers_available(): + raise ImportError( + "Didn't find out intel-etension-for-transformers package. " + "Please install packages: pip install intel-etension-for-transformers and pip install peft." + ) if optim_args.apply_pruning or optim_args.apply_distillation: raise ValueError("Weight only quantization and pruning or distillation cannot be combined.") quantization_config = WeightOnlyQuantConfig( diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 53b269957a..d8022ce53f 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -19,7 +19,7 @@ from enum import Enum from itertools import chain from pathlib import Path -from typing import Callable, Dict, Optional, Union +from typing import Callable, Dict, Optional, TypeAlias, Union import torch from datasets import Dataset, load_dataset @@ -80,6 +80,9 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig + Config: TypeAlias = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] +else: + Config: TypeAlias = PostTrainingQuantConfig logger = logging.getLogger(__name__) @@ -149,7 +152,7 @@ def from_pretrained(cls, model: PreTrainedModel, **kwargs): def quantize( self, save_directory: Union[str, Path], - quantization_config=None, + quantization_config: Config = None, calibration_dataset: Dataset = None, batch_size: int = 8, data_collator: Optional[DataCollator] = None, @@ -162,7 +165,7 @@ def quantize( Quantize a model given the optimization specifications defined in `quantization_config`. Args: - quantization_config (`PostTrainingQuantConfig`): + quantization_config (`Union[PostTrainingQuantConfig, WeightOnlyQuantConfig]`): The configuration containing the parameters related to quantization. save_directory (`Union[str, Path]`): The directory where the quantized model should be saved. @@ -261,8 +264,7 @@ def quantize( save_onnx_model = False if ( - not weight_only - and not isinstance(quantization_config, WeightOnlyQuantConfig) + isinstance(quantization_config, PostTrainingQuantConfig) and quantization_config.backend == "ipex" and is_ipex_version("<", IPEX_MINIMUM_VERSION) and "generation" in self.task @@ -272,7 +274,7 @@ def quantize( f"but only version {IPEX_MINIMUM_VERSION} or higher is supported." ) - if isinstance(quantization_config, WeightOnlyQuantConfig): + if not isinstance(quantization_config, PostTrainingQuantConfig): self._quantized_model = convert_to_quantized_model(self._original_model, quantization_config) # Save the quantized model output_path = save_directory.joinpath(file_name or default_name) diff --git a/optimum/intel/utils/import_utils.py b/optimum/intel/utils/import_utils.py index 4d6f84aa35..4213cc2c7b 100644 --- a/optimum/intel/utils/import_utils.py +++ b/optimum/intel/utils/import_utils.py @@ -350,7 +350,7 @@ def is_timm_version(operation: str, version: str): INTEL_EXTENSION_FOR_TRANSFORMERS_IMPORT_ERROR = """ {0} requires the intel-extension-for-transformers library but it was not found in your environment. You can install it with pip: -`pip install neural-compressor`. Please note that you may need to restart your runtime after installation. +`pip install intel-extension-for-transformers`. Please note that you may need to restart your runtime after installation. """ DATASETS_IMPORT_ERROR = """ diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index c98065b30d..a5bdcd328f 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -202,7 +202,6 @@ def test_ipex_static_quantization_with_smoothquant(self, task, model_name, expec def test_weight_only_quantization(self): model_name = "hf-internal-testing/tiny-random-GPTNeoForCausalLM" - quantization_config = WeightOnlyQuantConfig(weight_dtype="int8") model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) @@ -210,6 +209,36 @@ def test_weight_only_quantization(self): calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) with tempfile.TemporaryDirectory() as tmp_dir: + quantization_config = WeightOnlyQuantConfig(weight_dtype="int8") + q_model = quantizer.quantize( + quantization_config=quantization_config, + save_directory=tmp_dir, + ) + inp = torch.tensor([calibration_dataset[0]["input_ids"]]) + out = model(inp)[0] + q_out = q_model(inp)[0] + self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) + + with tempfile.TemporaryDirectory() as tmp_dir: + quantization_config = WeightOnlyQuantConfig( + algorithm="GPTQ", + weight_dtype="int4_clip", + ) + q_model = quantizer.quantize( + quantization_config=quantization_config, + calibration_dataset=calibration_dataset, + save_directory=tmp_dir, + ) + inp = torch.tensor([calibration_dataset[0]["input_ids"]]) + out = model(inp)[0] + q_out = q_model(inp)[0] + self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) + + with tempfile.TemporaryDirectory() as tmp_dir: + quantization_config = WeightOnlyQuantConfig( + algorithm="AWQ", + weight_dtype="int4_clip", + ) q_model = quantizer.quantize( quantization_config=quantization_config, calibration_dataset=calibration_dataset, @@ -220,6 +249,16 @@ def test_weight_only_quantization(self): q_out = q_model(inp)[0] self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) + with tempfile.TemporaryDirectory() as tmp_dir: + q_model = quantizer.quantize( + weight_only=True, # use RTN quantization method and NF4 weight data type is default. + save_directory=tmp_dir, + ) + inp = torch.tensor([calibration_dataset[0]["input_ids"]]) + out = model(inp)[0] + q_out = q_model(inp)[0] + self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) + def test_dynamic_accuracy_strategy_quantization(self): model_name = "distilbert-base-cased-distilled-squad" model = AutoModelForQuestionAnswering.from_pretrained(model_name) From 6b95933f0671f8086edb70cedd2abb574dbaffde Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 16:48:06 +0800 Subject: [PATCH 39/64] Fixed pylint error Signed-off-by: Cheng, Penghui --- examples/neural_compressor/language-modeling/run_clm.py | 1 + optimum/intel/neural_compressor/quantization.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 52234a1a10..c41d3e4b32 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -59,6 +59,7 @@ from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer from optimum.intel.utils.import_utils import is_intel_extension_for_transformers_available + if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index d8022ce53f..d7fffc85be 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -80,6 +80,7 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig + Config: TypeAlias = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] else: Config: TypeAlias = PostTrainingQuantConfig @@ -212,7 +213,9 @@ def quantize( elif isinstance(quantization_config, WeightOnlyQuantConfig): algo = quantization_config.algorithm else: - raise TypeError(f"For weight-only quantization, `quantization_config` should be an instance of `WeightOnlyQuantConfig`, but got: {type(quantization_config)} instead.") + raise TypeError( + f"For weight-only quantization, `quantization_config` should be an instance of `WeightOnlyQuantConfig`, but got: {type(quantization_config)} instead." + ) if calibration_dataset is None and ("GPTQ" in algo or "AWQ" in algo): raise ValueError( From 5d90b5245650c98af818437d38a65a3cb53028b6 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 17:03:40 +0800 Subject: [PATCH 40/64] Update optimum/intel/neural_compressor/configuration.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/neural_compressor/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index 9990cabda8..0abdc29cd2 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -35,7 +35,7 @@ class INCConfig(BaseConfig): def __init__( self, - quantization=None, + quantization: Optional[Union[Dict, _BaseQuantizationConfig, "WeightOnlyQuantConfig"]] = None, pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None, distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None, save_onnx_model: bool = False, From e804df350442d9c47eb2b658315476a704d2e17d Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 17:24:47 +0800 Subject: [PATCH 41/64] Fixed example and UT for weight-only quantization Signed-off-by: Cheng, Penghui --- optimum/intel/neural_compressor/quantization.py | 2 +- tests/neural_compressor/test_optimization.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index d7fffc85be..4884813209 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -230,7 +230,7 @@ def quantize( batch_size=batch_size, remove_unused_columns=remove_unused_columns, data_collator=data_collator, - use_label=False, + use_label=False if "GPTQ" in algo else True, ) quantization_config.calib_dataloader = calibration_dataloader diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index a5bdcd328f..0272892096 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -205,10 +205,10 @@ def test_weight_only_quantization(self): model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) - quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) with tempfile.TemporaryDirectory() as tmp_dir: + quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") quantization_config = WeightOnlyQuantConfig(weight_dtype="int8") q_model = quantizer.quantize( quantization_config=quantization_config, @@ -220,6 +220,7 @@ def test_weight_only_quantization(self): self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) with tempfile.TemporaryDirectory() as tmp_dir: + quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") quantization_config = WeightOnlyQuantConfig( algorithm="GPTQ", weight_dtype="int4_clip", @@ -235,6 +236,7 @@ def test_weight_only_quantization(self): self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) with tempfile.TemporaryDirectory() as tmp_dir: + quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") quantization_config = WeightOnlyQuantConfig( algorithm="AWQ", weight_dtype="int4_clip", @@ -250,6 +252,7 @@ def test_weight_only_quantization(self): self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) with tempfile.TemporaryDirectory() as tmp_dir: + quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") q_model = quantizer.quantize( weight_only=True, # use RTN quantization method and NF4 weight data type is default. save_directory=tmp_dir, From 82c27dd4010e658d77bc5460580f553f36737059 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 17:56:15 +0800 Subject: [PATCH 42/64] Fixed pre-ci test error Signed-off-by: Cheng, Penghui --- optimum/intel/neural_compressor/quantization.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 4884813209..3207ff43dd 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -19,7 +19,7 @@ from enum import Enum from itertools import chain from pathlib import Path -from typing import Callable, Dict, Optional, TypeAlias, Union +from typing import Callable, Dict, Optional, Union import torch from datasets import Dataset, load_dataset @@ -81,9 +81,9 @@ from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig - Config: TypeAlias = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] + Config = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] else: - Config: TypeAlias = PostTrainingQuantConfig + Config = PostTrainingQuantConfig logger = logging.getLogger(__name__) From 3ca3f608c90cde33324841b80cbfd336a638be3f Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 13 Mar 2024 18:12:24 +0800 Subject: [PATCH 43/64] Fixed pre-ci test error Signed-off-by: Cheng, Penghui --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index d9427f3bbd..49b7a92673 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,7 @@ "rjieba", "timm", "invisible-watermark>=0.2.0", + "cmake>=3.16", "intel-extension-for-transformers>=1.3", "peft", "auto-gptq", From 0cc7c0065d1bb9c901c916d93e2971f892233ef1 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sun, 17 Mar 2024 21:47:59 +0800 Subject: [PATCH 44/64] Fixed UT and examples error Signed-off-by: Cheng, Penghui --- .github/workflows/test_inc.yml | 5 +- .../language-modeling/run_clm.py | 60 +++++++++++++++---- .../text-generation/run_generation.py | 6 +- optimum/intel/neural_compressor/__init__.py | 6 +- .../intel/neural_compressor/configuration.py | 4 +- .../intel/neural_compressor/modeling_base.py | 10 +++- .../intel/neural_compressor/quantization.py | 30 ++++++++-- setup.py | 2 +- tests/neural_compressor/test_optimization.py | 37 ++++++------ tests/openvino/test_modeling_basic.py | 1 + 10 files changed, 117 insertions(+), 44 deletions(-) diff --git a/.github/workflows/test_inc.yml b/.github/workflows/test_inc.yml index 3a15214f99..f3398858a7 100644 --- a/.github/workflows/test_inc.yml +++ b/.github/workflows/test_inc.yml @@ -30,8 +30,11 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip + pip install cmake>=3.16 + pip install py-cpuinfo + pip install torch==2.1.0+cpu --extra-index-url https://download.pytorch.org/whl/cpu pip install .[neural-compressor,diffusers,tests] - pip install intel-extension-for-pytorch + pip install intel-extension-for-pytorch==2.1.100 - name: Test with Pytest run: | pytest tests/neural_compressor/ diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index c41d3e4b32..5a6256d6b1 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -63,6 +63,8 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig + from optimum.intel.neural_compressor import ITREXAutoModelForCausalLM + os.environ["CUDA_VISIBLE_DEVICES"] = "" # Will error if the minimal version of Transformers is not installed. Remove at your own risks. @@ -147,7 +149,9 @@ class OptimizationArguments: ) quantization_approach: str = field( default="dynamic", - metadata={"help": "Quantization approach. Supported approach are static, dynamic and aware_training."}, + metadata={ + "help": "Quantization approach. Supported approach are static, dynamic aware_training and weight_only." + }, ) smooth_quant: bool = field( default=False, @@ -200,8 +204,12 @@ class OptimizationArguments: default=False, metadata={"help": "Whether or not to verify the loading of the quantized model."}, ) + bits: str = field( + default="4", + metadata={"help": "Bits number of weight for weight only quantization. 1~8 bits."}, + ) weight_dtype: str = field( - default="int8", + default="int4_clip", metadata={"help": "weight dtype for weight only quantization."}, ) group_size: int = field( @@ -218,9 +226,24 @@ class OptimizationArguments: ) quantization_methodology: str = field( default="RTN", - metadata={ - "help": "Quantization methodology for weight only quantization. Choose from 'RTN', 'AWQ' and 'GPTQ'." - }, + metadata={"help": "Quantization methodology for weight only quantization. Choose from 'RTN' and 'GPTQ'."}, + ) + gptq_percdamp: float = field( + default=0.01, + metadata={"help": "Percent of the average Hessian diagonal to use for dampening."}, + ) + gptq_block_size: int = field( + default=128, + metadata={"help": "Block size. sub weight matrix size to run GPTQ."}, + ) + gptq_nsamples: int = field(default=128, metadata={"help": "Number of calibration data samples."}) + gptq_use_max_length: bool = field( + default=False, + metadata={"help": "Set all sequence length to be same length of args.gptq_pad_max_length"}, + ) + gptq_pad_max_length: int = field( + default=2048, + metadata={"help": "Calibration dataset sequence max length, this should align with your model config"}, ) @@ -636,11 +659,21 @@ def compute_metrics(eval_preds): ) if optim_args.apply_pruning or optim_args.apply_distillation: raise ValueError("Weight only quantization and pruning or distillation cannot be combined.") + if optim_args.quantization_methodology == "GPTQ": + algorithm_args = { + "act_order": False, + "percdamp": optim_args.gptq_percdamp, + "block_size": optim_args.gptq_block_size, + "nsamples": optim_args.gptq_nsamples, + "use_max_length": optim_args.gptq_use_max_length, + "pad_max_length": optim_args.gptq_pad_max_length, + } quantization_config = WeightOnlyQuantConfig( weight_dtype=optim_args.weight_dtype, group_size=optim_args.group_size, scheme=optim_args.weight_only_scheme, algorithm=optim_args.quantization_methodology, + algorithm_args=algorithm_args if optim_args.quantization_methodology == "GPTQ" else None, ) else: quantization_config = PostTrainingQuantConfig( @@ -733,17 +766,20 @@ def compute_metrics(eval_preds): quantizer.quantize( quantization_config=quantization_config, save_directory=training_args.output_dir, - calibration_dataset=train_dataset - if optim_args.quantization_approach in ["static", "weight_only"] - else None, - batch_size=1 - if optim_args.quantization_approach == "weight_only" - else training_args.per_device_train_batch_size, + calibration_dataset=( + train_dataset if optim_args.quantization_approach in ["static", "weight_only"] else None + ), + batch_size=( + 1 if optim_args.quantization_approach == "weight_only" else training_args.per_device_train_batch_size + ), ) trainer.model = quantizer._quantized_model if optim_args.apply_quantization and optim_args.verify_loading: - loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) + if optim_args.quantization_approach == "weight_only": + loaded_model = ITREXAutoModelForCausalLM.from_pretrained(training_args.output_dir) + else: + loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) tokens = tokenizer("This is a sample input", return_tensors="pt") with torch.no_grad(): original_model_outputs = trainer.model(**tokens) diff --git a/examples/neural_compressor/text-generation/run_generation.py b/examples/neural_compressor/text-generation/run_generation.py index e06bba4102..8b1adbd3f8 100755 --- a/examples/neural_compressor/text-generation/run_generation.py +++ b/examples/neural_compressor/text-generation/run_generation.py @@ -368,9 +368,9 @@ def calibration_fn(p_model): args.length = adjust_length_to_model( args.length, - max_sequence_length=model.config.max_position_embeddings - if hasattr(model.config, "max_position_embeddings") - else 0, + max_sequence_length=( + model.config.max_position_embeddings if hasattr(model.config, "max_position_embeddings") else 0 + ), ) logger.info(args) diff --git a/optimum/intel/neural_compressor/__init__.py b/optimum/intel/neural_compressor/__init__.py index a7170120b7..f3a7bffe69 100644 --- a/optimum/intel/neural_compressor/__init__.py +++ b/optimum/intel/neural_compressor/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..utils.import_utils import is_diffusers_available +from ..utils.import_utils import is_diffusers_available, is_intel_extension_for_transformers_available from .configuration import INCConfig from .modeling_base import ( INCModel, @@ -32,3 +32,7 @@ if is_diffusers_available(): from .modeling_diffusion import INCStableDiffusionPipeline + + +if is_intel_extension_for_transformers_available(): + from .modeling_base import ITREXAutoModelForCausalLM diff --git a/optimum/intel/neural_compressor/configuration.py b/optimum/intel/neural_compressor/configuration.py index 0abdc29cd2..7f5370e5ee 100644 --- a/optimum/intel/neural_compressor/configuration.py +++ b/optimum/intel/neural_compressor/configuration.py @@ -35,7 +35,7 @@ class INCConfig(BaseConfig): def __init__( self, - quantization: Optional[Union[Dict, _BaseQuantizationConfig, "WeightOnlyQuantConfig"]] = None, + quantization: Optional[Union[Dict, _BaseQuantizationConfig]] = None, pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None, distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None, save_onnx_model: bool = False, @@ -50,7 +50,7 @@ def __init__( self.save_onnx_model = save_onnx_model @staticmethod - def _create_quantization_config(config): + def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig]): # TODO : add activations_dtype and weights_dtype if isinstance(config, _BaseQuantizationConfig): approach = _quantization_model[config.approach] diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index 72646a9f94..0226855d64 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -43,7 +43,7 @@ from optimum.intel.generation import BaseModelForCausalLM from ...modeling_base import OptimizedModel -from ..utils.import_utils import _torch_version, is_torch_version +from ..utils.import_utils import _torch_version, is_intel_extension_for_transformers_available, is_torch_version from .configuration import INCConfig from .utils import WEIGHTS_NAME @@ -63,6 +63,14 @@ """ +if is_intel_extension_for_transformers_available(): + from intel_extension_for_transformers.transformers.modeling import AutoModelForCausalLM as ITREX_WOQ_MODEL + + class ITREXAutoModelForCausalLM(ITREX_WOQ_MODEL): + auto_model_class = AutoModelForCausalLM + export_feature = "text-generation" + + class INCModel(OptimizedModel): auto_model_class = AutoModel export_feature = "feature-extraction" diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 3207ff43dd..7b294a55ec 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -15,6 +15,7 @@ import copy import inspect import logging +import types import warnings from enum import Enum from itertools import chain @@ -79,6 +80,7 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model + from intel_extension_for_transformers.transformers.modeling.modeling_auto import save_low_bit from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig Config = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] @@ -185,6 +187,9 @@ def quantize( save_directory = Path(save_directory) save_directory.mkdir(parents=True, exist_ok=True) save_onnx_model = kwargs.pop("save_onnx_model", False) + device = kwargs.pop("device", "cpu") + use_cpu = True if device == torch.device("cpu") or device == "cpu" else False + use_xpu = True if (isinstance(device, torch.device) and device.type == "xpu") or device == "xpu" else False if save_onnx_model and (isinstance(self._original_model, ORTModel) or weight_only): save_onnx_model = False @@ -217,7 +222,10 @@ def quantize( f"For weight-only quantization, `quantization_config` should be an instance of `WeightOnlyQuantConfig`, but got: {type(quantization_config)} instead." ) - if calibration_dataset is None and ("GPTQ" in algo or "AWQ" in algo): + if algo not in ["RTN", "GPTQ"]: + raise ValueError("Weight-only quantization is only support RTN and GPTQ algorithm now!") + + if calibration_dataset is None and quantization_config.tokenizer is None and ("GPTQ" in algo): raise ValueError( "Weight-only quantization needs a calibration dataset for both GPTQ and AWQ methodologies." ) @@ -278,10 +286,24 @@ def quantize( ) if not isinstance(quantization_config, PostTrainingQuantConfig): - self._quantized_model = convert_to_quantized_model(self._original_model, quantization_config) + if use_cpu: + # will remove after intel-extension-for-transformers 1.3.3 released + quantization_config.device = "cpu" + quantization_config.post_init() + elif use_xpu: + # will remove after intel-extension-for-transformers 1.3.3 released + quantization_config.device = "xpu" + quantization_config.post_init_xpu() + self._quantized_model = convert_to_quantized_model( + self._original_model, quantization_config, device=quantization_config.device + ) + # will remove after intel-extension-for-transformers 1.3.3 released + if hasattr(quantization_config, "calib_dataloader"): + quantization_config.calib_dataloader = None + self._quantized_model.quantization_config = quantization_config + self._quantized_model.save_pretrained = types.MethodType(save_low_bit, self._quantized_model) # Save the quantized model - output_path = save_directory.joinpath(file_name or default_name) - self._quantized_model.save_pretrained(output_path) + self._quantized_model.save_pretrained(save_directory) else: if isinstance(self._original_model.config, PretrainedConfig): self._original_model.config.backend = quantization_config.backend diff --git a/setup.py b/setup.py index 49b7a92673..a59721450f 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ "rjieba", "timm", "invisible-watermark>=0.2.0", - "cmake>=3.16", + # Will remove after intel-extension-for-transformers 1.3.3 released. "intel-extension-for-transformers>=1.3", "peft", "auto-gptq", diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index 0272892096..260cb97270 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -45,7 +45,7 @@ set_seed, ) from utils_tests import SEED, INCTestMixin, _generate_dataset -from optimum.intel.utils.import_utils import is_torch_version +from optimum.intel.utils.import_utils import is_torch_version, is_intel_extension_for_transformers_available from optimum.intel import ( @@ -60,11 +60,13 @@ INCSeq2SeqTrainer, INCStableDiffusionPipeline, ) -from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig from optimum.intel.utils.constant import DIFFUSION_WEIGHTS_NAME from optimum.onnxruntime import ORTModelForCausalLM, ORTModelForSequenceClassification from optimum.pipelines import ORT_SUPPORTED_TASKS +if is_intel_extension_for_transformers_available(): + from optimum.intel.neural_compressor import ITREXAutoModelForCausalLM + from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig os.environ["CUDA_VISIBLE_DEVICES"] = "" set_seed(SEED) @@ -200,20 +202,24 @@ def test_ipex_static_quantization_with_smoothquant(self, task, model_name, expec load_ipex_model=True, ) + @unittest.skipIf( + not is_intel_extension_for_transformers_available(), reason="Intel-extension-for-transformers not available!" + ) def test_weight_only_quantization(self): model_name = "hf-internal-testing/tiny-random-GPTNeoForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) - calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) with tempfile.TemporaryDirectory() as tmp_dir: quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") + calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) quantization_config = WeightOnlyQuantConfig(weight_dtype="int8") q_model = quantizer.quantize( quantization_config=quantization_config, save_directory=tmp_dir, ) + q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) out = model(inp)[0] q_out = q_model(inp)[0] @@ -221,8 +227,14 @@ def test_weight_only_quantization(self): with tempfile.TemporaryDirectory() as tmp_dir: quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") + calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) quantization_config = WeightOnlyQuantConfig( algorithm="GPTQ", + algorithm_args={ + "percdamp": 0.01, + "act_order": False, + "scheme": "sym", + }, weight_dtype="int4_clip", ) q_model = quantizer.quantize( @@ -230,6 +242,7 @@ def test_weight_only_quantization(self): calibration_dataset=calibration_dataset, save_directory=tmp_dir, ) + q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) out = model(inp)[0] q_out = q_model(inp)[0] @@ -237,26 +250,12 @@ def test_weight_only_quantization(self): with tempfile.TemporaryDirectory() as tmp_dir: quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") - quantization_config = WeightOnlyQuantConfig( - algorithm="AWQ", - weight_dtype="int4_clip", - ) - q_model = quantizer.quantize( - quantization_config=quantization_config, - calibration_dataset=calibration_dataset, - save_directory=tmp_dir, - ) - inp = torch.tensor([calibration_dataset[0]["input_ids"]]) - out = model(inp)[0] - q_out = q_model(inp)[0] - self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) - - with tempfile.TemporaryDirectory() as tmp_dir: - quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") + calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) q_model = quantizer.quantize( weight_only=True, # use RTN quantization method and NF4 weight data type is default. save_directory=tmp_dir, ) + q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) out = model(inp)[0] q_out = q_model(inp)[0] diff --git a/tests/openvino/test_modeling_basic.py b/tests/openvino/test_modeling_basic.py index a443c5fea7..9423ce5683 100644 --- a/tests/openvino/test_modeling_basic.py +++ b/tests/openvino/test_modeling_basic.py @@ -7,6 +7,7 @@ This test is meant to run quickly with tiny test models. More extensive tests are in test_modeling.py. """ + # ruff: noqa import gc From 9ec53ce7fe5e4320bedccf06539eda678e036ef0 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sun, 17 Mar 2024 23:03:39 +0800 Subject: [PATCH 45/64] Fixed pre-CI error Signed-off-by: Cheng, Penghui --- .github/workflows/test_inc.yml | 2 +- .../intel/neural_compressor/quantization.py | 6 +- setup.py | 12 ++- tests/neural_compressor/test_optimization.py | 77 ++++++++----------- 4 files changed, 45 insertions(+), 52 deletions(-) diff --git a/.github/workflows/test_inc.yml b/.github/workflows/test_inc.yml index f3398858a7..63ceb75158 100644 --- a/.github/workflows/test_inc.yml +++ b/.github/workflows/test_inc.yml @@ -32,7 +32,7 @@ jobs: python -m pip install --upgrade pip pip install cmake>=3.16 pip install py-cpuinfo - pip install torch==2.1.0+cpu --extra-index-url https://download.pytorch.org/whl/cpu + pip install torch==2.1.0 torchaudio==2.1.0 torchvision==0.16 --extra-index-url https://download.pytorch.org/whl/cpu pip install .[neural-compressor,diffusers,tests] pip install intel-extension-for-pytorch==2.1.100 - name: Test with Pytest diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 7b294a55ec..d5ff782db3 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -287,17 +287,17 @@ def quantize( if not isinstance(quantization_config, PostTrainingQuantConfig): if use_cpu: - # will remove after intel-extension-for-transformers 1.3.3 released + # will remove after intel-extension-for-transformers 1.3.3 release. quantization_config.device = "cpu" quantization_config.post_init() elif use_xpu: - # will remove after intel-extension-for-transformers 1.3.3 released + # will remove after intel-extension-for-transformers 1.3.3 release. quantization_config.device = "xpu" quantization_config.post_init_xpu() self._quantized_model = convert_to_quantized_model( self._original_model, quantization_config, device=quantization_config.device ) - # will remove after intel-extension-for-transformers 1.3.3 released + # will remove after intel-extension-for-transformers 1.3.3 release. if hasattr(quantization_config, "calib_dataloader"): quantization_config.calib_dataloader = None self._quantized_model.quantization_config = quantization_config diff --git a/setup.py b/setup.py index 2a125597df..6452da43b1 100644 --- a/setup.py +++ b/setup.py @@ -49,9 +49,6 @@ "rjieba", "timm", "invisible-watermark>=0.2.0", - # Will remove after intel-extension-for-transformers 1.3.3 released. - "intel-extension-for-transformers>=1.3", - "peft", "auto-gptq", "transformers_stream_generator", "einops", @@ -60,7 +57,14 @@ QUALITY_REQUIRE = ["black~=23.1", "ruff>=0.0.241"] EXTRAS_REQUIRE = { - "neural-compressor": ["neural-compressor>=2.2.0", "onnxruntime<1.15.0", "accelerate"], + "neural-compressor": [ + "neural-compressor>=2.2.0", + "onnxruntime<1.15.0", + "accelerate", + # will remove after intel-extension-for-transformers 1.3.3 release. + "intel-extension-for-transformers>=1.3", + "peft", + ], "openvino": ["openvino>=2023.3", "nncf>=2.8.1"], "openvino-tokenizers": ["openvino-tokenizers[transformers]"], "nncf": ["nncf>=2.8.1"], diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index 260cb97270..026138553c 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -88,6 +88,13 @@ class OptimizationTest(INCTestMixin): "hf-internal-testing/tiny-random-GPTNeoForCausalLM", ) + WEIGHT_ONLY_CONFIG = ( + (False, "RTN", "int4_clip"), + (False, "GPTQ", "int4_clip"), + (False, "RTN", "int8"), + (True, "", ""), + ) + @parameterized.expand(SUPPORTED_ARCHITECTURES_DYNAMIC) def test_dynamic_quantization(self, task, model_name, expected_quantized_matmuls): quantization_config = PostTrainingQuantConfig(approach="dynamic") @@ -202,59 +209,41 @@ def test_ipex_static_quantization_with_smoothquant(self, task, model_name, expec load_ipex_model=True, ) + @parameterized.expand(WEIGHT_ONLY_CONFIG) @unittest.skipIf( not is_intel_extension_for_transformers_available(), reason="Intel-extension-for-transformers not available!" ) - def test_weight_only_quantization(self): + def test_weight_only_quantization(self, no_config, algo, weight_dtype): model_name = "hf-internal-testing/tiny-random-GPTNeoForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") + calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) with tempfile.TemporaryDirectory() as tmp_dir: - quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") - calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) - quantization_config = WeightOnlyQuantConfig(weight_dtype="int8") - q_model = quantizer.quantize( - quantization_config=quantization_config, - save_directory=tmp_dir, - ) - q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) - inp = torch.tensor([calibration_dataset[0]["input_ids"]]) - out = model(inp)[0] - q_out = q_model(inp)[0] - self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) - - with tempfile.TemporaryDirectory() as tmp_dir: - quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") - calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) - quantization_config = WeightOnlyQuantConfig( - algorithm="GPTQ", - algorithm_args={ - "percdamp": 0.01, - "act_order": False, - "scheme": "sym", - }, - weight_dtype="int4_clip", - ) - q_model = quantizer.quantize( - quantization_config=quantization_config, - calibration_dataset=calibration_dataset, - save_directory=tmp_dir, - ) - q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) - inp = torch.tensor([calibration_dataset[0]["input_ids"]]) - out = model(inp)[0] - q_out = q_model(inp)[0] - self.assertTrue(torch.all(torch.isclose(out, q_out, atol=5e-1))) - - with tempfile.TemporaryDirectory() as tmp_dir: - quantizer = INCQuantizer.from_pretrained(copy.deepcopy(model), task="text-generation") - calibration_dataset = _generate_dataset(quantizer, tokenizer, num_samples=2) - q_model = quantizer.quantize( - weight_only=True, # use RTN quantization method and NF4 weight data type is default. - save_directory=tmp_dir, - ) + if not no_config: + if algo == "GPTQ": + algorithm_args = { + "percdamp": 0.01, + "act_order": False, + "scheme": "sym", + } + quantization_config = WeightOnlyQuantConfig( + algorithm=algo, + algorithm_args=algorithm_args if algo == "GPTQ" else None, + weight_dtype=weight_dtype, + ) + q_model = quantizer.quantize( + quantization_config=quantization_config, + calibration_dataset=calibration_dataset if algo == "GPTQ" else None, + save_directory=tmp_dir, + ) + else: + q_model = quantizer.quantize( + weight_only=True, # use RTN quantization method and NF4 weight data type is default. + save_directory=tmp_dir, + ) q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) out = model(inp)[0] From 66d45c2f4084bb719ca29ff3de810362fc3f9703 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Mon, 18 Mar 2024 11:19:58 +0800 Subject: [PATCH 46/64] Fixed UT error Signed-off-by: Cheng, Penghui --- .github/workflows/test_inc.yml | 2 +- optimum/intel/neural_compressor/quantization.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_inc.yml b/.github/workflows/test_inc.yml index 63ceb75158..3ba52ec008 100644 --- a/.github/workflows/test_inc.yml +++ b/.github/workflows/test_inc.yml @@ -30,7 +30,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install cmake>=3.16 + pip install cmake pip install py-cpuinfo pip install torch==2.1.0 torchaudio==2.1.0 torchvision==0.16 --extra-index-url https://download.pytorch.org/whl/cpu pip install .[neural-compressor,diffusers,tests] diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index d5ff782db3..9099efcf74 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -214,7 +214,7 @@ def quantize( if quantization_config is None: quantization_config = WeightOnlyQuantConfig() - algo = ["RTN"] + algo = "RTN" elif isinstance(quantization_config, WeightOnlyQuantConfig): algo = quantization_config.algorithm else: @@ -223,7 +223,7 @@ def quantize( ) if algo not in ["RTN", "GPTQ"]: - raise ValueError("Weight-only quantization is only support RTN and GPTQ algorithm now!") + raise ValueError(f"Weight-only quantization is only support RTN and GPTQ algorithm now!But got {algo}") if calibration_dataset is None and quantization_config.tokenizer is None and ("GPTQ" in algo): raise ValueError( From 4347cee10c5a23cba297c49f617e852e1cf39efb Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:12:53 +0800 Subject: [PATCH 47/64] Update tests/openvino/test_modeling_basic.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- tests/openvino/test_modeling_basic.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/openvino/test_modeling_basic.py b/tests/openvino/test_modeling_basic.py index 9423ce5683..a443c5fea7 100644 --- a/tests/openvino/test_modeling_basic.py +++ b/tests/openvino/test_modeling_basic.py @@ -7,7 +7,6 @@ This test is meant to run quickly with tiny test models. More extensive tests are in test_modeling.py. """ - # ruff: noqa import gc From 68d6e9085e478dd3e169667a63751ff9762776fa Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:13:25 +0800 Subject: [PATCH 48/64] Update examples/neural_compressor/language-modeling/README.md Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/language-modeling/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/README.md b/examples/neural_compressor/language-modeling/README.md index 2b22688c1d..80d7a25d16 100644 --- a/examples/neural_compressor/language-modeling/README.md +++ b/examples/neural_compressor/language-modeling/README.md @@ -97,4 +97,4 @@ respectively `dynamic`, `static`, `weight_only` or `aware_training`. The flag `--verify_loading` can be passed along to verify that the resulting quantized model can be loaded correctly. -> **_Note:_** `weight_only` quantization_approach requires neural-compressor >= 2.3 and intel-extension-for-transformers >= 1.3. +> **_Note:_** `weight_only` quantization_approach requires `neural-compressor` >= 2.3 and `intel-extension-for-transformers` >= 1.3. From 032b0ef235ea327ac677bb35bb1c0e81455c6990 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:14:03 +0800 Subject: [PATCH 49/64] Update examples/neural_compressor/language-modeling/run_clm.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/language-modeling/run_clm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 5a6256d6b1..a12b458edb 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -236,7 +236,7 @@ class OptimizationArguments: default=128, metadata={"help": "Block size. sub weight matrix size to run GPTQ."}, ) - gptq_nsamples: int = field(default=128, metadata={"help": "Number of calibration data samples."}) + num_calibration_samples: int = field(default=128, metadata={"help": "Number of examples to use for the GPTQ calibration step."}) gptq_use_max_length: bool = field( default=False, metadata={"help": "Set all sequence length to be same length of args.gptq_pad_max_length"}, From 6a6a97c17152e05512625297329739866e2976b8 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:14:21 +0800 Subject: [PATCH 50/64] Update examples/neural_compressor/language-modeling/run_clm.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/language-modeling/run_clm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index a12b458edb..8e0f55e28c 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -241,7 +241,7 @@ class OptimizationArguments: default=False, metadata={"help": "Set all sequence length to be same length of args.gptq_pad_max_length"}, ) - gptq_pad_max_length: int = field( + pad_max_length: int = field( default=2048, metadata={"help": "Calibration dataset sequence max length, this should align with your model config"}, ) From 8e90ac8ad546c649e59d40d7eb1a7a8409db07b9 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:14:34 +0800 Subject: [PATCH 51/64] Update examples/neural_compressor/language-modeling/run_clm.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/language-modeling/run_clm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 8e0f55e28c..3d3b1d4a28 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -237,7 +237,7 @@ class OptimizationArguments: metadata={"help": "Block size. sub weight matrix size to run GPTQ."}, ) num_calibration_samples: int = field(default=128, metadata={"help": "Number of examples to use for the GPTQ calibration step."}) - gptq_use_max_length: bool = field( + use_max_length: bool = field( default=False, metadata={"help": "Set all sequence length to be same length of args.gptq_pad_max_length"}, ) From 88760bc5290b9d8fa521ff6073b1456a342a8860 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:15:23 +0800 Subject: [PATCH 52/64] Update examples/neural_compressor/language-modeling/run_clm.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/language-modeling/run_clm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 3d3b1d4a28..73c97439b6 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -230,7 +230,7 @@ class OptimizationArguments: ) gptq_percdamp: float = field( default=0.01, - metadata={"help": "Percent of the average Hessian diagonal to use for dampening."}, + metadata={"help": "Percentage of Hessian's diagonal values average, which will be added to Hessian's diagonal to increase numerical stability, used for GPTQ quantization"}, ) gptq_block_size: int = field( default=128, From f51266ad480ab2a13c1a784308b3c6ed2d33e9c3 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sat, 23 Mar 2024 21:15:51 +0800 Subject: [PATCH 53/64] Update examples/neural_compressor/language-modeling/run_clm.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/language-modeling/run_clm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 73c97439b6..746b7261b5 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -228,7 +228,8 @@ class OptimizationArguments: default="RTN", metadata={"help": "Quantization methodology for weight only quantization. Choose from 'RTN' and 'GPTQ'."}, ) - gptq_percdamp: float = field( + damp_percent: float = field( + default=0.01, metadata={"help": "Percentage of Hessian's diagonal values average, which will be added to Hessian's diagonal to increase numerical stability, used for GPTQ quantization"}, ) From f9702721661c22ac60ea017e66657ceadbe5f1bf Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Sun, 24 Mar 2024 22:39:34 +0800 Subject: [PATCH 54/64] Load weight-only quantized model with INCModelForCausalLM Signed-off-by: Cheng, Penghui --- .../language-modeling/run_clm.py | 6 +--- optimum/intel/neural_compressor/__init__.py | 4 --- .../intel/neural_compressor/modeling_base.py | 31 ++++++++++++++++--- tests/neural_compressor/test_optimization.py | 3 +- 4 files changed, 28 insertions(+), 16 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 746b7261b5..f169a621e0 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -63,7 +63,6 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig - from optimum.intel.neural_compressor import ITREXAutoModelForCausalLM os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -777,10 +776,7 @@ def compute_metrics(eval_preds): trainer.model = quantizer._quantized_model if optim_args.apply_quantization and optim_args.verify_loading: - if optim_args.quantization_approach == "weight_only": - loaded_model = ITREXAutoModelForCausalLM.from_pretrained(training_args.output_dir) - else: - loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) + loaded_model = INCModelForCausalLM.from_pretrained(training_args.output_dir) tokens = tokenizer("This is a sample input", return_tensors="pt") with torch.no_grad(): original_model_outputs = trainer.model(**tokens) diff --git a/optimum/intel/neural_compressor/__init__.py b/optimum/intel/neural_compressor/__init__.py index f3a7bffe69..2daecfbc93 100644 --- a/optimum/intel/neural_compressor/__init__.py +++ b/optimum/intel/neural_compressor/__init__.py @@ -32,7 +32,3 @@ if is_diffusers_available(): from .modeling_diffusion import INCStableDiffusionPipeline - - -if is_intel_extension_for_transformers_available(): - from .modeling_base import ITREXAutoModelForCausalLM diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index 0226855d64..01d071bdd2 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -65,11 +65,7 @@ if is_intel_extension_for_transformers_available(): from intel_extension_for_transformers.transformers.modeling import AutoModelForCausalLM as ITREX_WOQ_MODEL - - class ITREXAutoModelForCausalLM(ITREX_WOQ_MODEL): - auto_model_class = AutoModelForCausalLM - export_feature = "text-generation" - + from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig class INCModel(OptimizedModel): auto_model_class = AutoModel @@ -138,6 +134,31 @@ def _from_pretrained( model_save_dir = Path(model_cache_path).parent inc_config = None msg = None + try: + quantization_config = WeightOnlyQuantConfig.from_pretrained(model_id) + if getattr(quantization_config, "algorithm", None) is not None and quantization_config.algorithm.lower() in [ + "rtn", "gptq", "awq", "autoaround" + ]: + if not is_intel_extension_for_transformers_available(): + raise ImportError( + "Didn't find out intel-etension-for-transformers package. " + "Please install packages: pip install intel-etension-for-transformers and pip install peft." + ) + return ITREX_WOQ_MODEL.from_pretrained( + pretrained_model_name_or_path=model_id, + use_auth_token=use_auth_token, + revision=revision, + force_download=force_download, + cache_dir=cache_dir, + local_files_only=local_files_only, + subfolder=subfolder, + trust_remote_code=trust_remote_code, + **kwargs, + ) + except EnvironmentError: + msg = ( + "The model is not quantized with weight-only quantization." + ) try: inc_config = INCConfig.from_pretrained(model_id) if not is_torch_version("==", inc_config.torch_version): diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index 026138553c..6e91cc26b7 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -65,7 +65,6 @@ from optimum.pipelines import ORT_SUPPORTED_TASKS if is_intel_extension_for_transformers_available(): - from optimum.intel.neural_compressor import ITREXAutoModelForCausalLM from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -244,7 +243,7 @@ def test_weight_only_quantization(self, no_config, algo, weight_dtype): weight_only=True, # use RTN quantization method and NF4 weight data type is default. save_directory=tmp_dir, ) - q_model = ITREXAutoModelForCausalLM.from_pretrained(tmp_dir) + q_model = INCModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) out = model(inp)[0] q_out = q_model(inp)[0] From 5ddd360a043496bcbf4a3bdadb590590137f32c9 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Mon, 25 Mar 2024 10:36:48 +0800 Subject: [PATCH 55/64] Changed parameters name for GPTQ in example Signed-off-by: Cheng, Penghui --- examples/neural_compressor/language-modeling/run_clm.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 22b884f4f8..831606d840 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -665,11 +665,11 @@ def compute_metrics(eval_preds): if optim_args.quantization_methodology == "GPTQ": algorithm_args = { "act_order": False, - "percdamp": optim_args.gptq_percdamp, + "percdamp": optim_args.damp_percent, "block_size": optim_args.gptq_block_size, - "nsamples": optim_args.gptq_nsamples, - "use_max_length": optim_args.gptq_use_max_length, - "pad_max_length": optim_args.gptq_pad_max_length, + "nsamples": optim_args.num_calibration_samples, + "use_max_length": optim_args.use_max_length, + "pad_max_length": optim_args.pad_max_length, } quantization_config = WeightOnlyQuantConfig( weight_dtype=optim_args.weight_dtype, From 721dd3b86e1b9881b953ee9e7a4bfa63a902d8ab Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Mon, 25 Mar 2024 10:53:41 +0800 Subject: [PATCH 56/64] Changed parameters order in INCQuantizer.quantize Signed-off-by: Cheng, Penghui --- optimum/intel/neural_compressor/quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 24eb8c8e42..eed39233a6 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -143,8 +143,8 @@ def from_pretrained(cls, model: PreTrainedModel, **kwargs): def quantize( self, + quantization_config: Config, save_directory: Union[str, Path], - quantization_config: Config = None, calibration_dataset: Dataset = None, batch_size: int = 8, data_collator: Optional[DataCollator] = None, From ac9aee8643b9214807f2f8b663b663da0eb11f45 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Mon, 25 Mar 2024 11:26:27 +0800 Subject: [PATCH 57/64] Fixed UT error Signed-off-by: Cheng, Penghui --- tests/neural_compressor/test_optimization.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index 6e91cc26b7..88e203a517 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -240,8 +240,9 @@ def test_weight_only_quantization(self, no_config, algo, weight_dtype): ) else: q_model = quantizer.quantize( - weight_only=True, # use RTN quantization method and NF4 weight data type is default. + quantization_config=None, save_directory=tmp_dir, + weight_only=True, # use RTN quantization method and NF4 weight data type is default. ) q_model = INCModelForCausalLM.from_pretrained(tmp_dir) inp = torch.tensor([calibration_dataset[0]["input_ids"]]) From d7bd27eb0601dd0671f1972ca53491e19621e61d Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 26 Mar 2024 09:34:06 +0800 Subject: [PATCH 58/64] Update examples/neural_compressor/text-generation/run_generation.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- examples/neural_compressor/text-generation/run_generation.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/examples/neural_compressor/text-generation/run_generation.py b/examples/neural_compressor/text-generation/run_generation.py index 8b1adbd3f8..9966a73c10 100755 --- a/examples/neural_compressor/text-generation/run_generation.py +++ b/examples/neural_compressor/text-generation/run_generation.py @@ -368,9 +368,7 @@ def calibration_fn(p_model): args.length = adjust_length_to_model( args.length, - max_sequence_length=( - model.config.max_position_embeddings if hasattr(model.config, "max_position_embeddings") else 0 - ), + max_sequence_length=getattr(model.config, "max_position_embeddings", 0), ) logger.info(args) From 19bdf0f2a5fca609357f98894c2f65876ddd2c07 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 26 Mar 2024 09:40:06 +0800 Subject: [PATCH 59/64] Update optimum/intel/neural_compressor/quantization.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/neural_compressor/quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index eed39233a6..e726261c5c 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -178,7 +178,7 @@ def quantize( save_onnx_model = kwargs.pop("save_onnx_model", False) device = kwargs.pop("device", "cpu") use_cpu = True if device == torch.device("cpu") or device == "cpu" else False - use_xpu = True if (isinstance(device, torch.device) and device.type == "xpu") or device == "xpu" else False + use_xpu = device == torch.device("xpu") or device == "xpu" if save_onnx_model and (isinstance(self._original_model, ORTModel) or weight_only): save_onnx_model = False From dd981df9bc8c2a970449a3eb50c261c65aa8627e Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Tue, 26 Mar 2024 17:40:05 +0800 Subject: [PATCH 60/64] Update optimum/intel/neural_compressor/quantization.py Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> --- optimum/intel/neural_compressor/quantization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index e726261c5c..4ad19256b3 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -177,7 +177,7 @@ def quantize( save_directory.mkdir(parents=True, exist_ok=True) save_onnx_model = kwargs.pop("save_onnx_model", False) device = kwargs.pop("device", "cpu") - use_cpu = True if device == torch.device("cpu") or device == "cpu" else False + use_cpu = device == torch.device("cpu") or device == "cpu" use_xpu = device == torch.device("xpu") or device == "xpu" if save_onnx_model and (isinstance(self._original_model, ORTModel) or weight_only): From af07192b2c51c5a9a19810d2f1bf794cdf6216ee Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 27 Mar 2024 11:23:15 +0800 Subject: [PATCH 61/64] Update import message Signed-off-by: Cheng, Penghui --- .../neural_compressor/language-modeling/run_clm.py | 10 +++++----- optimum/intel/neural_compressor/modeling_base.py | 13 +++++++------ optimum/intel/utils/import_utils.py | 3 ++- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/examples/neural_compressor/language-modeling/run_clm.py b/examples/neural_compressor/language-modeling/run_clm.py index 831606d840..ef24616307 100644 --- a/examples/neural_compressor/language-modeling/run_clm.py +++ b/examples/neural_compressor/language-modeling/run_clm.py @@ -57,7 +57,10 @@ from transformers.utils.versions import require_version from optimum.intel.neural_compressor import INCModelForCausalLM, INCQuantizer, INCTrainer -from optimum.intel.utils.import_utils import is_intel_extension_for_transformers_available +from optimum.intel.utils.import_utils import ( + INTEL_EXTENSION_FOR_TRANSFORMERS_IMPORT_ERROR, + is_intel_extension_for_transformers_available, +) if is_intel_extension_for_transformers_available(): @@ -656,10 +659,7 @@ def compute_metrics(eval_preds): recipes = {} if optim_args.quantization_approach == "weight_only": if not is_intel_extension_for_transformers_available(): - raise ImportError( - "Didn't find out intel-etension-for-transformers package. " - "Please install packages: pip install intel-etension-for-transformers and pip install peft." - ) + raise ImportError(INTEL_EXTENSION_FOR_TRANSFORMERS_IMPORT_ERROR.format("WeightOnly quantization")) if optim_args.apply_pruning or optim_args.apply_distillation: raise ValueError("Weight only quantization and pruning or distillation cannot be combined.") if optim_args.quantization_methodology == "GPTQ": diff --git a/optimum/intel/neural_compressor/modeling_base.py b/optimum/intel/neural_compressor/modeling_base.py index 0e95e6b9c4..c37542fdef 100644 --- a/optimum/intel/neural_compressor/modeling_base.py +++ b/optimum/intel/neural_compressor/modeling_base.py @@ -43,7 +43,12 @@ from optimum.intel.generation import BaseModelForCausalLM from ...modeling_base import OptimizedModel -from ..utils.import_utils import _torch_version, is_intel_extension_for_transformers_available, is_torch_version +from ..utils.import_utils import ( + _torch_version, + is_intel_extension_for_transformers_available, + is_torch_version, + requires_backends, +) from .configuration import INCConfig from .utils import WEIGHTS_NAME @@ -137,15 +142,11 @@ def _from_pretrained( inc_config = None msg = None try: + requires_backends(cls, ["intel_extension_for_transformers"]) quantization_config = WeightOnlyQuantConfig.from_pretrained(model_id) if getattr( quantization_config, "algorithm", None ) is not None and quantization_config.algorithm.lower() in ["rtn", "gptq", "awq", "autoaround"]: - if not is_intel_extension_for_transformers_available(): - raise ImportError( - "Didn't find out intel-etension-for-transformers package. " - "Please install packages: pip install intel-etension-for-transformers and pip install peft." - ) return ITREX_WOQ_MODEL.from_pretrained( pretrained_model_name_or_path=model_id, use_auth_token=use_auth_token, diff --git a/optimum/intel/utils/import_utils.py b/optimum/intel/utils/import_utils.py index 4213cc2c7b..08a9ec1f88 100644 --- a/optimum/intel/utils/import_utils.py +++ b/optimum/intel/utils/import_utils.py @@ -66,6 +66,7 @@ if _intel_extension_for_transformers_available: try: _intel_extension_for_transformers_version = importlib_metadata.version("intel_extension_for_transformers") + logging.warn("`transformers` version >= 4.31 is requirements by intel-extension-for-transformers.") except importlib_metadata.PackageNotFoundError: _intel_extension_for_transformers_available = False @@ -350,7 +351,7 @@ def is_timm_version(operation: str, version: str): INTEL_EXTENSION_FOR_TRANSFORMERS_IMPORT_ERROR = """ {0} requires the intel-extension-for-transformers library but it was not found in your environment. You can install it with pip: -`pip install intel-extension-for-transformers`. Please note that you may need to restart your runtime after installation. +`pip install intel-extension-for-transformers` and `pip install peft`. Please note that you may need to restart your runtime after installation. """ DATASETS_IMPORT_ERROR = """ From 9c24871d480bf79ef0c9f5321c714b7c45b04397 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 27 Mar 2024 22:34:38 +0800 Subject: [PATCH 62/64] Limit intel-extension-for-transformers version Signed-off-by: Cheng, Penghui --- optimum/intel/neural_compressor/quantization.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 4ad19256b3..50c67e4896 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -47,9 +47,11 @@ from ..utils.constant import _TASK_ALIASES, MIN_QDQ_ONNX_OPSET, ONNX_WEIGHTS_NAME, WEIGHTS_NAME from ..utils.import_utils import ( + _intel_extension_for_transformers_version, _ipex_version, _neural_compressor_version, is_intel_extension_for_transformers_available, + is_intel_extension_for_transformers_version, is_ipex_version, is_neural_compressor_version, ) @@ -73,6 +75,13 @@ from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig Config = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] + + INTEL_EXTENSION_FOR_TRANSFORMERS_MINIMUM_VERSION = "1.3.2" + if is_intel_extension_for_transformers_version("!=", INTEL_EXTENSION_FOR_TRANSFORMERS_MINIMUM_VERSION): + raise ImportError( + f"Found an incompatible version of `intel-extension-for-transformers`. Found version {_intel_extension_for_transformers_version}, " + f"but only version {INTEL_EXTENSION_FOR_TRANSFORMERS_MINIMUM_VERSION} is supported." + ) else: Config = PostTrainingQuantConfig From 1331cdc5207e04f5e33d124779746182193f0a3e Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 27 Mar 2024 23:15:21 +0800 Subject: [PATCH 63/64] Limit torch version for weight-only quantization Signed-off-by: Cheng, Penghui --- .../intel/neural_compressor/quantization.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/optimum/intel/neural_compressor/quantization.py b/optimum/intel/neural_compressor/quantization.py index 50c67e4896..6b5ea38a16 100644 --- a/optimum/intel/neural_compressor/quantization.py +++ b/optimum/intel/neural_compressor/quantization.py @@ -50,10 +50,12 @@ _intel_extension_for_transformers_version, _ipex_version, _neural_compressor_version, + _torch_version, is_intel_extension_for_transformers_available, is_intel_extension_for_transformers_version, is_ipex_version, is_neural_compressor_version, + is_torch_version, ) from .configuration import INCConfig from .modeling_base import ( # noqa @@ -70,18 +72,24 @@ if is_intel_extension_for_transformers_available(): - from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model - from intel_extension_for_transformers.transformers.modeling.modeling_auto import save_low_bit - from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig - - Config = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] - INTEL_EXTENSION_FOR_TRANSFORMERS_MINIMUM_VERSION = "1.3.2" if is_intel_extension_for_transformers_version("!=", INTEL_EXTENSION_FOR_TRANSFORMERS_MINIMUM_VERSION): raise ImportError( f"Found an incompatible version of `intel-extension-for-transformers`. Found version {_intel_extension_for_transformers_version}, " f"but only version {INTEL_EXTENSION_FOR_TRANSFORMERS_MINIMUM_VERSION} is supported." ) + TORCH_VERSION = "2.1.0" + if is_torch_version("!=", TORCH_VERSION): + raise ImportError( + f"Found an incompatible version of `torch`. Found version {_torch_version}, " + f"but only version {TORCH_VERSION} is supported." + ) + + from intel_extension_for_transformers.llm.quantization.utils import convert_to_quantized_model + from intel_extension_for_transformers.transformers.modeling.modeling_auto import save_low_bit + from intel_extension_for_transformers.transformers.utils.config import WeightOnlyQuantConfig + + Config = Union[PostTrainingQuantConfig, WeightOnlyQuantConfig] else: Config = PostTrainingQuantConfig From 638f516e688b75171908f7248425793591a29f90 Mon Sep 17 00:00:00 2001 From: "Cheng, Penghui" Date: Wed, 27 Mar 2024 23:49:40 +0800 Subject: [PATCH 64/64] Fixed doc building error Signed-off-by: Cheng, Penghui --- .github/workflows/test_inc.yml | 2 ++ setup.py | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_inc.yml b/.github/workflows/test_inc.yml index 3ba52ec008..16c01e7298 100644 --- a/.github/workflows/test_inc.yml +++ b/.github/workflows/test_inc.yml @@ -35,6 +35,8 @@ jobs: pip install torch==2.1.0 torchaudio==2.1.0 torchvision==0.16 --extra-index-url https://download.pytorch.org/whl/cpu pip install .[neural-compressor,diffusers,tests] pip install intel-extension-for-pytorch==2.1.100 + pip install intel-extension-for-transformers==1.3.2 + pip install peft - name: Test with Pytest run: | pytest tests/neural_compressor/ diff --git a/setup.py b/setup.py index fe954e3ab0..e80d0ea448 100644 --- a/setup.py +++ b/setup.py @@ -61,9 +61,6 @@ "neural-compressor>=2.2.0", "onnxruntime<1.15.0", "accelerate", - # will remove after intel-extension-for-transformers 1.3.3 release. - "intel-extension-for-transformers>=1.3", - "peft", ], "openvino": ["openvino>=2023.3", "nncf>=2.8.1"], "openvino-tokenizers": ["openvino-tokenizers[transformers]"],