Skip to content

Commit 04ab002

Browse files
committed
Merge branch 'ea/ov_export_configs' of https://github.com/eaidova/optimum-intel into ea/ov_export_configs
2 parents 3263894 + 507f63d commit 04ab002

13 files changed

+213
-94
lines changed

docs/source/inference.mdx

+5-2
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ By default the quantization scheme will be [assymmetric](https://github.com/open
110110

111111
For INT4 quantization you can also specify the following arguments :
112112
* The `--group-size` parameter will define the group size to use for quantization, `-1` it will results in per-column quantization.
113-
* The `--ratio` CLI parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`.
113+
* The `--ratio` parameter controls the ratio between 4-bit and 8-bit quantization. If set to 0.9, it means that 90% of the layers will be quantized to `int4` while 10% will be quantized to `int8`.
114114

115115
Smaller `group_size` and `ratio` of usually improve accuracy at the sacrifice of the model size and inference latency.
116116

@@ -122,8 +122,11 @@ from optimum.intel import OVModelForCausalLM
122122
model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True)
123123
```
124124

125-
> **NOTE:** `load_in_8bit` is enabled by default for the models larger than 1 billion parameters.
125+
<Tip warning={true}>
126126

127+
`load_in_8bit` is enabled by default for the models larger than 1 billion parameters.
128+
129+
</Tip>
127130

128131
To apply quantization on both weights and activations, you can use the `OVQuantizer`, more information in the [documentation](https://huggingface.co/docs/optimum/main/en/intel/optimization_ov#optimization).
129132

docs/source/optimization_ov.mdx

+5-4
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,6 @@ save_dir = "ptq_model"
3838
def preprocess_function(examples, tokenizer):
3939
return tokenizer(examples["sentence"], padding="max_length", max_length=128, truncation=True)
4040

41-
# Load the default quantization configuration detailing the quantization we wish to apply
42-
quantization_config = OVConfig()
4341
# Instantiate our OVQuantizer using the desired configuration
4442
quantizer = OVQuantizer.from_pretrained(model)
4543
# Create the calibration dataset used to perform static quantization
@@ -52,7 +50,6 @@ calibration_dataset = quantizer.get_calibration_dataset(
5250
)
5351
# Apply static quantization and export the resulting quantized model to OpenVINO IR format
5452
quantizer.quantize(
55-
quantization_config=quantization_config,
5653
calibration_dataset=calibration_dataset,
5754
save_directory=save_dir,
5855
)
@@ -72,7 +69,11 @@ from optimum.intel import OVModelForCausalLM
7269
model = OVModelForCausalLM.from_pretrained(model_id, load_in_8bit=True)
7370
```
7471

75-
> **NOTE:** `load_in_8bit` is enabled by default for models larger than 1 billion parameters.
72+
<Tip warning={true}>
73+
74+
`load_in_8bit` is enabled by default for the models larger than 1 billion parameters.
75+
76+
</Tip>
7677

7778
For the 4-bit weight quantization you can use the `quantization_config` to specify the optimization parameters, for example:
7879

Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
accelerate
22
diffusers
33
torch~=1.13
4-
nncf @ git+https://github.com/openvinotoolkit/nncf.git
4+
torchvision~=0.14
5+
nncf
56
tomesd @ git+https://github.com/AlexKoff88/tomesd.git@openvino

optimum/intel/generation/modeling.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -105,13 +105,13 @@ def __init__(
105105
self.normalized_config = NormalizedConfigManager.get_normalized_config_class(config.model_type)(config)
106106
self.model_dtype = kwargs.get("model_dtype", None)
107107

108-
logger.warning(
109-
f"The class `{self.__class__}` has been depreciated and will be removed in optimum-intel v1.14, please use IPEXModel instead"
110-
)
111108
if isinstance(model, torch.jit.ScriptModule):
112109
self.input_names = {
113110
inputs.debugName().split(".")[0] for inputs in model.graph.inputs() if inputs.debugName() != "self"
114111
}
112+
logger.warning(
113+
f"The class `{self.__class__}` has been depreciated for TorchScript model, please use `IPEXModelForCausalLM` instead"
114+
)
115115
else:
116116
self.input_names = set()
117117

optimum/intel/openvino/configuration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ def __init__(
114114
**kwargs,
115115
):
116116
super().__init__()
117-
self.compression = compression or DEFAULT_QUANTIZATION_CONFIG
117+
self.compression = compression
118118
self.input_info = input_info
119119
self.save_onnx_model = save_onnx_model
120120
self._enable_standard_onnx_export_option()

optimum/intel/openvino/modeling_base.py

+34-4
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ def __init__(
5757
dynamic_shapes: bool = True,
5858
ov_config: Optional[Dict[str, str]] = None,
5959
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
60+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None,
6061
**kwargs,
6162
):
6263
self.config = config
@@ -91,6 +92,10 @@ def __init__(
9192

9293
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
9394

95+
self._openvino_config = None
96+
if quantization_config:
97+
self._openvino_config = OVConfig(quantization_config=quantization_config)
98+
9499
@staticmethod
95100
def load_model(file_name: Union[str, Path], quantization_config: Union[OVWeightQuantizationConfig, Dict] = None):
96101
"""
@@ -143,6 +148,15 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
143148
dst_path = os.path.join(save_directory, OV_XML_FILE_NAME)
144149
openvino.save_model(self.model, dst_path, compress_to_fp16=False)
145150

151+
self._save_openvino_config(save_directory)
152+
153+
def _save_openvino_config(self, save_directory: Union[str, Path]):
154+
if self._openvino_config is not None:
155+
if not isinstance(self._openvino_config.quantization_config.dataset, (str, type(None))):
156+
self._openvino_config.quantization_config.dataset = None
157+
158+
self._openvino_config.save_pretrained(save_directory)
159+
146160
@classmethod
147161
def _from_pretrained(
148162
cls,
@@ -203,12 +217,28 @@ def _from_pretrained(
203217
local_files_only=local_files_only,
204218
)
205219

206-
# Give default quantization config if not provided and load_in_8bit=True
207-
if load_in_8bit:
208-
quantization_config = quantization_config or {"bits": 8}
220+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
209221

210222
model = cls.load_model(model_cache_path, quantization_config=quantization_config)
211-
return cls(model, config=config, model_save_dir=model_cache_path.parent, **kwargs)
223+
return cls(
224+
model,
225+
config=config,
226+
model_save_dir=model_cache_path.parent,
227+
quantization_config=quantization_config,
228+
**kwargs,
229+
)
230+
231+
@staticmethod
232+
def _prepare_weight_quantization_config(
233+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, load_in_8bit: bool = False
234+
):
235+
# Give default quantization config if not provided and load_in_8bit=True
236+
if not quantization_config and load_in_8bit:
237+
quantization_config = OVWeightQuantizationConfig(bits=8)
238+
elif isinstance(quantization_config, dict):
239+
quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config)
240+
241+
return quantization_config
212242

213243
@staticmethod
214244
def _cached_file(

optimum/intel/openvino/modeling_base_seq2seq.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ def __init__(
5858
dynamic_shapes: bool = True,
5959
ov_config: Optional[Dict[str, str]] = None,
6060
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
61+
quantization_config: Union[OVWeightQuantizationConfig, Dict] = None,
6162
**kwargs,
6263
):
6364
self.config = config
@@ -76,6 +77,9 @@ def __init__(
7677
self.decoder_model = decoder
7778
self.decoder_with_past_model = decoder_with_past
7879
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
80+
self._openvino_config = None
81+
if quantization_config:
82+
self._openvino_config = OVConfig(quantization_config=quantization_config)
7983

8084
def _save_pretrained(self, save_directory: Union[str, Path]):
8185
"""
@@ -96,6 +100,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
96100
dst_path = os.path.join(save_directory, dst_file_name)
97101
openvino.save_model(src_file, dst_path, compress_to_fp16=False)
98102

103+
self._save_openvino_config(save_directory)
104+
99105
@classmethod
100106
def _from_pretrained(
101107
cls,
@@ -155,9 +161,7 @@ def _from_pretrained(
155161
decoder_with_past_file_name = decoder_with_past_file_name or default_decoder_with_past_file_name
156162
decoder_with_past = None
157163

158-
# Give default quantization config if not provided and load_in_8bit=True
159-
if load_in_8bit:
160-
quantization_config = quantization_config or {"bits": 8}
164+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
161165

162166
# Load model from a local directory
163167
if os.path.isdir(model_id):
@@ -205,6 +209,7 @@ def _from_pretrained(
205209
decoder_with_past=decoder_with_past,
206210
config=config,
207211
model_save_dir=model_save_dir,
212+
quantization_config=quantization_config,
208213
**kwargs,
209214
)
210215

optimum/intel/openvino/modeling_decoder.py

+15-9
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import copy
1516
import logging
1617
import os
1718
from pathlib import Path
@@ -100,6 +101,7 @@ def __init__(
100101
dynamic_shapes: bool = True,
101102
ov_config: Optional[Dict[str, str]] = None,
102103
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
104+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None,
103105
**kwargs,
104106
):
105107
if not dynamic_shapes:
@@ -117,6 +119,7 @@ def __init__(
117119
dynamic_shapes=False,
118120
ov_config=ov_config,
119121
model_save_dir=model_save_dir,
122+
quantization_config=quantization_config,
120123
**kwargs,
121124
)
122125

@@ -224,6 +227,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
224227
dst_path = os.path.join(save_directory, OV_XML_FILE_NAME)
225228
openvino.save_model(model_to_save, dst_path, compress_to_fp16=False)
226229

230+
self._save_openvino_config(save_directory)
231+
227232
@classmethod
228233
def _from_transformers(
229234
cls,
@@ -578,15 +583,10 @@ def _from_pretrained(
578583
local_files_only=local_files_only,
579584
)
580585

581-
# Give default quantization config if not provided and load_in_8bit=True
582-
if load_in_8bit:
583-
quantization_config = quantization_config or {"bits": 8}
584-
585-
if isinstance(quantization_config, dict):
586-
if quantization_config == {"bits": 4} and config.name_or_path in _DEFAULT_4BIT_CONFIGS:
587-
quantization_config = _DEFAULT_4BIT_CONFIGS[config.name_or_path]
586+
if isinstance(quantization_config, dict) and quantization_config == {"bits": 4}:
587+
quantization_config = _DEFAULT_4BIT_CONFIGS.get(config.name_or_path, quantization_config)
588588

589-
quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config)
589+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
590590

591591
load_in_4bit = quantization_config.bits == 4 if quantization_config else False
592592
model = cls.load_model(model_cache_path, quantization_config=None if load_in_4bit else quantization_config)
@@ -605,7 +605,12 @@ def _from_pretrained(
605605

606606
enable_compilation = kwargs.pop("compile", True) and not load_in_4bit
607607
causal_model = init_cls(
608-
model=model, config=config, model_save_dir=model_cache_path.parent, compile=enable_compilation, **kwargs
608+
model=model,
609+
config=config,
610+
model_save_dir=model_cache_path.parent,
611+
compile=enable_compilation,
612+
quantization_config=quantization_config,
613+
**kwargs,
609614
)
610615

611616
if load_in_4bit:
@@ -634,6 +639,7 @@ def _from_pretrained(
634639
# seqlen = get_seqlen(causal_model)
635640
dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32)
636641
dataset = prepare_dataset(dataset)
642+
quantization_config = copy.deepcopy(quantization_config)
637643
quantization_config.dataset = nncf.Dataset(dataset, lambda x: causal_model.prepare_inputs(**x))
638644

639645
_weight_only_quantization(model, quantization_config)

optimum/intel/openvino/modeling_diffusion.py

+28-8
Original file line numberDiff line numberDiff line change
@@ -87,15 +87,25 @@ def __init__(
8787
compile: bool = True,
8888
ov_config: Optional[Dict[str, str]] = None,
8989
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
90+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None,
9091
**kwargs,
9192
):
9293
self._internal_dict = config
9394
self._device = device.upper()
9495
self.is_dynamic = dynamic_shapes
9596
self.ov_config = ov_config if ov_config is not None else {}
96-
self._model_save_dir = (
97-
Path(model_save_dir.name) if isinstance(model_save_dir, TemporaryDirectory) else model_save_dir
98-
)
97+
98+
# This attribute is needed to keep one reference on the temporary directory, since garbage collecting
99+
# would end-up removing the directory containing the underlying OpenVINO model
100+
self._model_save_dir_tempdirectory_instance = None
101+
if isinstance(model_save_dir, TemporaryDirectory):
102+
self._model_save_dir_tempdirectory_instance = model_save_dir
103+
self._model_save_dir = Path(model_save_dir.name)
104+
elif isinstance(model_save_dir, str):
105+
self._model_save_dir = Path(model_save_dir)
106+
else:
107+
self._model_save_dir = model_save_dir
108+
99109
self.vae_decoder = OVModelVaeDecoder(vae_decoder, self)
100110
self.unet = OVModelUnet(unet, self)
101111
self.text_encoder = OVModelTextEncoder(text_encoder, self) if text_encoder is not None else None
@@ -140,6 +150,10 @@ def __init__(
140150

141151
self._internal_dict.pop("vae", None)
142152

153+
self._openvino_config = None
154+
if quantization_config:
155+
self._openvino_config = OVConfig(quantization_config=quantization_config)
156+
143157
def _save_pretrained(self, save_directory: Union[str, Path]):
144158
"""
145159
Saves the model to the OpenVINO IR format so that it can be re-loaded using the
@@ -177,6 +191,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
177191
if self.tokenizer_2 is not None:
178192
self.tokenizer_2.save_pretrained(save_directory / "tokenizer_2")
179193

194+
self._save_openvino_config(save_directory)
195+
180196
@classmethod
181197
def _from_pretrained(
182198
cls,
@@ -257,10 +273,7 @@ def _from_pretrained(
257273
else:
258274
kwargs[name] = load_method(new_model_save_dir)
259275

260-
# Give default quantization config if not provided and load_in_8bit=True
261-
if load_in_8bit:
262-
quantization_config = quantization_config or {"bits": 8}
263-
276+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
264277
unet = cls.load_model(
265278
new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config
266279
)
@@ -278,7 +291,14 @@ def _from_pretrained(
278291
if model_save_dir is None:
279292
model_save_dir = new_model_save_dir
280293

281-
return cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs)
294+
return cls(
295+
unet=unet,
296+
config=config,
297+
model_save_dir=model_save_dir,
298+
quantization_config=quantization_config,
299+
**components,
300+
**kwargs,
301+
)
282302

283303
@classmethod
284304
def _from_transformers(

0 commit comments

Comments
 (0)