Skip to content

Commit 36459a1

Browse files
authored
Fix default ov config (#600)
1 parent 5c683a3 commit 36459a1

File tree

5 files changed

+9
-9
lines changed

5 files changed

+9
-9
lines changed

optimum/intel/openvino/modeling.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,8 @@ def _from_transformers(
434434
save_dir = TemporaryDirectory()
435435
save_dir_path = Path(save_dir.name)
436436

437-
# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
438-
if load_in_8bit is None or not quantization_config:
437+
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
438+
if load_in_8bit is None and not quantization_config:
439439
ov_config = None
440440
else:
441441
ov_config = OVConfig(dtype="fp32")

optimum/intel/openvino/modeling_base.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -314,8 +314,8 @@ def _from_transformers(
314314
save_dir = TemporaryDirectory()
315315
save_dir_path = Path(save_dir.name)
316316

317-
# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
318-
if load_in_8bit is None or not quantization_config:
317+
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
318+
if load_in_8bit is None and not quantization_config:
319319
ov_config = None
320320
else:
321321
ov_config = OVConfig(dtype="fp32")

optimum/intel/openvino/modeling_base_seq2seq.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -258,8 +258,8 @@ def _from_transformers(
258258
if use_cache:
259259
task = task + "-with-past"
260260

261-
# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
262-
if load_in_8bit is None or not quantization_config:
261+
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
262+
if load_in_8bit is None and not quantization_config:
263263
ov_config = None
264264
else:
265265
ov_config = OVConfig(dtype="fp32")

optimum/intel/openvino/modeling_decoder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ def _from_transformers(
260260
if use_cache:
261261
task = task + "-with-past"
262262

263-
# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
263+
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
264264
if load_in_8bit is None and not quantization_config:
265265
ov_export_config = None
266266
else:

optimum/intel/openvino/modeling_diffusion.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -321,8 +321,8 @@ def _from_transformers(
321321
save_dir = TemporaryDirectory()
322322
save_dir_path = Path(save_dir.name)
323323

324-
# If load_in_8bit or quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
325-
if load_in_8bit is None or not quantization_config:
324+
# If load_in_8bit and quantization_config not specified then ov_config is set to None and will be set by default in convert depending on the model size
325+
if load_in_8bit is None and not quantization_config:
326326
ov_config = None
327327
else:
328328
ov_config = OVConfig(dtype="fp32")

0 commit comments

Comments
 (0)