Skip to content

Commit 0c651e1

Browse files
authored
Merge branch 'main' into ea/ov_export_configs
2 parents d412a14 + 99cf01f commit 0c651e1

File tree

11 files changed

+188
-85
lines changed

11 files changed

+188
-85
lines changed

docs/source/optimization_ov.mdx

-3
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,6 @@ save_dir = "ptq_model"
3838
def preprocess_function(examples, tokenizer):
3939
return tokenizer(examples["sentence"], padding="max_length", max_length=128, truncation=True)
4040

41-
# Load the default quantization configuration detailing the quantization we wish to apply
42-
quantization_config = OVConfig()
4341
# Instantiate our OVQuantizer using the desired configuration
4442
quantizer = OVQuantizer.from_pretrained(model)
4543
# Create the calibration dataset used to perform static quantization
@@ -52,7 +50,6 @@ calibration_dataset = quantizer.get_calibration_dataset(
5250
)
5351
# Apply static quantization and export the resulting quantized model to OpenVINO IR format
5452
quantizer.quantize(
55-
quantization_config=quantization_config,
5653
calibration_dataset=calibration_dataset,
5754
save_directory=save_dir,
5855
)
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
accelerate
22
diffusers
33
torch~=1.13
4-
nncf @ git+https://github.com/openvinotoolkit/nncf.git
4+
torchvision~=0.14
5+
nncf
56
tomesd @ git+https://github.com/AlexKoff88/tomesd.git@openvino

optimum/intel/openvino/configuration.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ def __init__(
114114
**kwargs,
115115
):
116116
super().__init__()
117-
self.compression = compression or DEFAULT_QUANTIZATION_CONFIG
117+
self.compression = compression
118118
self.input_info = input_info
119119
self.save_onnx_model = save_onnx_model
120120
self._enable_standard_onnx_export_option()

optimum/intel/openvino/modeling_base.py

+34-4
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ def __init__(
5757
dynamic_shapes: bool = True,
5858
ov_config: Optional[Dict[str, str]] = None,
5959
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
60+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None,
6061
**kwargs,
6162
):
6263
self.config = config
@@ -91,6 +92,10 @@ def __init__(
9192

9293
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
9394

95+
self._openvino_config = None
96+
if quantization_config:
97+
self._openvino_config = OVConfig(quantization_config=quantization_config)
98+
9499
@staticmethod
95100
def load_model(file_name: Union[str, Path], quantization_config: Union[OVWeightQuantizationConfig, Dict] = None):
96101
"""
@@ -143,6 +148,15 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
143148
dst_path = os.path.join(save_directory, OV_XML_FILE_NAME)
144149
openvino.save_model(self.model, dst_path, compress_to_fp16=False)
145150

151+
self._save_openvino_config(save_directory)
152+
153+
def _save_openvino_config(self, save_directory: Union[str, Path]):
154+
if self._openvino_config is not None:
155+
if not isinstance(self._openvino_config.quantization_config.dataset, (str, type(None))):
156+
self._openvino_config.quantization_config.dataset = None
157+
158+
self._openvino_config.save_pretrained(save_directory)
159+
146160
@classmethod
147161
def _from_pretrained(
148162
cls,
@@ -203,12 +217,28 @@ def _from_pretrained(
203217
local_files_only=local_files_only,
204218
)
205219

206-
# Give default quantization config if not provided and load_in_8bit=True
207-
if load_in_8bit:
208-
quantization_config = quantization_config or {"bits": 8}
220+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
209221

210222
model = cls.load_model(model_cache_path, quantization_config=quantization_config)
211-
return cls(model, config=config, model_save_dir=model_cache_path.parent, **kwargs)
223+
return cls(
224+
model,
225+
config=config,
226+
model_save_dir=model_cache_path.parent,
227+
quantization_config=quantization_config,
228+
**kwargs,
229+
)
230+
231+
@staticmethod
232+
def _prepare_weight_quantization_config(
233+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, load_in_8bit: bool = False
234+
):
235+
# Give default quantization config if not provided and load_in_8bit=True
236+
if not quantization_config and load_in_8bit:
237+
quantization_config = OVWeightQuantizationConfig(bits=8)
238+
elif isinstance(quantization_config, dict):
239+
quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config)
240+
241+
return quantization_config
212242

213243
@staticmethod
214244
def _cached_file(

optimum/intel/openvino/modeling_base_seq2seq.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ def __init__(
5858
dynamic_shapes: bool = True,
5959
ov_config: Optional[Dict[str, str]] = None,
6060
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
61+
quantization_config: Union[OVWeightQuantizationConfig, Dict] = None,
6162
**kwargs,
6263
):
6364
self.config = config
@@ -76,6 +77,9 @@ def __init__(
7677
self.decoder_model = decoder
7778
self.decoder_with_past_model = decoder_with_past
7879
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
80+
self._openvino_config = None
81+
if quantization_config:
82+
self._openvino_config = OVConfig(quantization_config=quantization_config)
7983

8084
def _save_pretrained(self, save_directory: Union[str, Path]):
8185
"""
@@ -96,6 +100,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
96100
dst_path = os.path.join(save_directory, dst_file_name)
97101
openvino.save_model(src_file, dst_path, compress_to_fp16=False)
98102

103+
self._save_openvino_config(save_directory)
104+
99105
@classmethod
100106
def _from_pretrained(
101107
cls,
@@ -155,9 +161,7 @@ def _from_pretrained(
155161
decoder_with_past_file_name = decoder_with_past_file_name or default_decoder_with_past_file_name
156162
decoder_with_past = None
157163

158-
# Give default quantization config if not provided and load_in_8bit=True
159-
if load_in_8bit:
160-
quantization_config = quantization_config or {"bits": 8}
164+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
161165

162166
# Load model from a local directory
163167
if os.path.isdir(model_id):
@@ -205,6 +209,7 @@ def _from_pretrained(
205209
decoder_with_past=decoder_with_past,
206210
config=config,
207211
model_save_dir=model_save_dir,
212+
quantization_config=quantization_config,
208213
**kwargs,
209214
)
210215

optimum/intel/openvino/modeling_decoder.py

+15-9
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import copy
1516
import logging
1617
import os
1718
from pathlib import Path
@@ -100,6 +101,7 @@ def __init__(
100101
dynamic_shapes: bool = True,
101102
ov_config: Optional[Dict[str, str]] = None,
102103
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
104+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None,
103105
**kwargs,
104106
):
105107
if not dynamic_shapes:
@@ -117,6 +119,7 @@ def __init__(
117119
dynamic_shapes=False,
118120
ov_config=ov_config,
119121
model_save_dir=model_save_dir,
122+
quantization_config=quantization_config,
120123
**kwargs,
121124
)
122125

@@ -224,6 +227,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
224227
dst_path = os.path.join(save_directory, OV_XML_FILE_NAME)
225228
openvino.save_model(model_to_save, dst_path, compress_to_fp16=False)
226229

230+
self._save_openvino_config(save_directory)
231+
227232
@classmethod
228233
def _from_transformers(
229234
cls,
@@ -578,15 +583,10 @@ def _from_pretrained(
578583
local_files_only=local_files_only,
579584
)
580585

581-
# Give default quantization config if not provided and load_in_8bit=True
582-
if load_in_8bit:
583-
quantization_config = quantization_config or {"bits": 8}
584-
585-
if isinstance(quantization_config, dict):
586-
if quantization_config == {"bits": 4} and config.name_or_path in _DEFAULT_4BIT_CONFIGS:
587-
quantization_config = _DEFAULT_4BIT_CONFIGS[config.name_or_path]
586+
if isinstance(quantization_config, dict) and quantization_config == {"bits": 4}:
587+
quantization_config = _DEFAULT_4BIT_CONFIGS.get(config.name_or_path, quantization_config)
588588

589-
quantization_config = OVWeightQuantizationConfig.from_dict(quantization_config)
589+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
590590

591591
load_in_4bit = quantization_config.bits == 4 if quantization_config else False
592592
model = cls.load_model(model_cache_path, quantization_config=None if load_in_4bit else quantization_config)
@@ -605,7 +605,12 @@ def _from_pretrained(
605605

606606
enable_compilation = kwargs.pop("compile", True) and not load_in_4bit
607607
causal_model = init_cls(
608-
model=model, config=config, model_save_dir=model_cache_path.parent, compile=enable_compilation, **kwargs
608+
model=model,
609+
config=config,
610+
model_save_dir=model_cache_path.parent,
611+
compile=enable_compilation,
612+
quantization_config=quantization_config,
613+
**kwargs,
609614
)
610615

611616
if load_in_4bit:
@@ -634,6 +639,7 @@ def _from_pretrained(
634639
# seqlen = get_seqlen(causal_model)
635640
dataset = get_dataset(quantization_config.dataset, tokenizer, seqlen=32)
636641
dataset = prepare_dataset(dataset)
642+
quantization_config = copy.deepcopy(quantization_config)
637643
quantization_config.dataset = nncf.Dataset(dataset, lambda x: causal_model.prepare_inputs(**x))
638644

639645
_weight_only_quantization(model, quantization_config)

optimum/intel/openvino/modeling_diffusion.py

+16-5
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ def __init__(
8787
compile: bool = True,
8888
ov_config: Optional[Dict[str, str]] = None,
8989
model_save_dir: Optional[Union[str, Path, TemporaryDirectory]] = None,
90+
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None,
9091
**kwargs,
9192
):
9293
self._internal_dict = config
@@ -140,6 +141,10 @@ def __init__(
140141

141142
self._internal_dict.pop("vae", None)
142143

144+
self._openvino_config = None
145+
if quantization_config:
146+
self._openvino_config = OVConfig(quantization_config=quantization_config)
147+
143148
def _save_pretrained(self, save_directory: Union[str, Path]):
144149
"""
145150
Saves the model to the OpenVINO IR format so that it can be re-loaded using the
@@ -177,6 +182,8 @@ def _save_pretrained(self, save_directory: Union[str, Path]):
177182
if self.tokenizer_2 is not None:
178183
self.tokenizer_2.save_pretrained(save_directory / "tokenizer_2")
179184

185+
self._save_openvino_config(save_directory)
186+
180187
@classmethod
181188
def _from_pretrained(
182189
cls,
@@ -257,10 +264,7 @@ def _from_pretrained(
257264
else:
258265
kwargs[name] = load_method(new_model_save_dir)
259266

260-
# Give default quantization config if not provided and load_in_8bit=True
261-
if load_in_8bit:
262-
quantization_config = quantization_config or {"bits": 8}
263-
267+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
264268
unet = cls.load_model(
265269
new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config
266270
)
@@ -278,7 +282,14 @@ def _from_pretrained(
278282
if model_save_dir is None:
279283
model_save_dir = new_model_save_dir
280284

281-
return cls(unet=unet, config=config, model_save_dir=model_save_dir, **components, **kwargs)
285+
return cls(
286+
unet=unet,
287+
config=config,
288+
model_save_dir=model_save_dir,
289+
quantization_config=quantization_config,
290+
**components,
291+
**kwargs,
292+
)
282293

283294
@classmethod
284295
def _from_transformers(

optimum/intel/openvino/quantization.py

+14-7
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import copy
1516
import inspect
1617
import logging
1718
import os
@@ -44,7 +45,7 @@
4445
from ..utils.constant import _TASK_ALIASES
4546
from ..utils.import_utils import DATASETS_IMPORT_ERROR, is_datasets_available
4647
from ..utils.modeling_utils import get_model_device
47-
from .configuration import OVConfig, OVWeightQuantizationConfig
48+
from .configuration import DEFAULT_QUANTIZATION_CONFIG, OVConfig, OVWeightQuantizationConfig
4849
from .modeling_base import OVBaseModel
4950
from .utils import (
5051
MAX_ONNX_OPSET,
@@ -87,11 +88,14 @@ def __init__(self, request, data_cache=None):
8788
self.data_cache = data_cache
8889

8990
def __call__(self, *args, **kwargs):
90-
self.data_cache.append(*args)
91+
# If __call__ is invoked then self.request must be an instance of CompiledModel
92+
signature = inspect.signature(self.request)
93+
bound_args = signature.bind(*args, **kwargs).arguments
94+
self.data_cache.append(copy.deepcopy(bound_args["inputs"]))
9195
return self.request(*args, **kwargs)
9296

9397
def infer(self, inputs: Any = None, share_inputs: bool = False):
94-
self.data_cache.append(inputs)
98+
self.data_cache.append(copy.deepcopy(inputs))
9599
return self.request.infer(inputs, share_inputs)
96100

97101
def start_async(
@@ -102,7 +106,7 @@ def start_async(
102106
*,
103107
shared_memory: Any = None,
104108
):
105-
self.data_cache.append(inputs)
109+
self.data_cache.append(copy.deepcopy(inputs))
106110
self.request.infer(inputs, share_inputs, share_outputs=True)
107111

108112
def wait(self):
@@ -231,8 +235,11 @@ def quantize(
231235
)
232236
ov_config = ov_config or quantization_config
233237

234-
if ov_config is not None and not isinstance(ov_config, OVConfig):
235-
raise TypeError(f"`ov_config` should be an `OVConfig`, but got: {type(ov_config)} instead.")
238+
if ov_config is not None:
239+
if not isinstance(ov_config, OVConfig):
240+
raise TypeError(f"`ov_config` should be an `OVConfig`, but got: {type(ov_config)} instead.")
241+
elif ov_config.compression is None:
242+
ov_config.compression = DEFAULT_QUANTIZATION_CONFIG
236243

237244
if isinstance(self.model, OVBaseModel):
238245
self._quantize_ovbasemodel(
@@ -351,7 +358,7 @@ def _quantize_torchmodel(
351358
logger.info(
352359
"No configuration describing the quantization process was provided, a default OVConfig will be generated."
353360
)
354-
ov_config = OVConfig()
361+
ov_config = OVConfig(compression=DEFAULT_QUANTIZATION_CONFIG)
355362
onnx_file_name = (
356363
ONNX_WEIGHTS_NAME
357364
if file_name is None and ov_config.save_onnx_model

0 commit comments

Comments
 (0)