Skip to content

Commit 3a71f42

Browse files
committed
modify method name
1 parent cb847bb commit 3a71f42

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

optimum/intel/openvino/modeling_base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ def _from_pretrained(
217217
local_files_only=local_files_only,
218218
)
219219

220-
quantization_config = cls._prepare_quantization_config(quantization_config, load_in_8bit)
220+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
221221

222222
model = cls.load_model(model_cache_path, quantization_config=quantization_config)
223223
return cls(
@@ -229,7 +229,7 @@ def _from_pretrained(
229229
)
230230

231231
@staticmethod
232-
def _prepare_quantization_config(
232+
def _prepare_weight_quantization_config(
233233
quantization_config: Optional[Union[OVWeightQuantizationConfig, Dict]] = None, load_in_8bit: bool = False
234234
):
235235
# Give default quantization config if not provided and load_in_8bit=True

optimum/intel/openvino/modeling_base_seq2seq.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ def _from_pretrained(
161161
decoder_with_past_file_name = decoder_with_past_file_name or default_decoder_with_past_file_name
162162
decoder_with_past = None
163163

164-
quantization_config = cls._prepare_quantization_config(quantization_config, load_in_8bit)
164+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
165165

166166
# Load model from a local directory
167167
if os.path.isdir(model_id):

optimum/intel/openvino/modeling_decoder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -584,7 +584,7 @@ def _from_pretrained(
584584
if isinstance(quantization_config, dict) and quantization_config == {"bits": 4}:
585585
quantization_config = _DEFAULT_4BIT_CONFIGS.get(config.name_or_path, quantization_config)
586586

587-
quantization_config = cls._prepare_quantization_config(quantization_config, load_in_8bit)
587+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
588588

589589
load_in_4bit = quantization_config.bits == 4 if quantization_config else False
590590
model = cls.load_model(model_cache_path, quantization_config=None if load_in_4bit else quantization_config)

optimum/intel/openvino/modeling_diffusion.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -264,7 +264,7 @@ def _from_pretrained(
264264
else:
265265
kwargs[name] = load_method(new_model_save_dir)
266266

267-
quantization_config = cls._prepare_quantization_config(quantization_config, load_in_8bit)
267+
quantization_config = cls._prepare_weight_quantization_config(quantization_config, load_in_8bit)
268268
unet = cls.load_model(
269269
new_model_save_dir / DIFFUSION_MODEL_UNET_SUBFOLDER / unet_file_name, quantization_config
270270
)

0 commit comments

Comments
 (0)