Skip to content

Commit f2d9b78

Browse files
authored
Fixed the best qmodel recovery issue (#1620)
Signed-off-by: yiliu30 <yi4.liu@intel.com>
1 parent 813d930 commit f2d9b78

File tree

7 files changed

+14
-19
lines changed

7 files changed

+14
-19
lines changed

neural_compressor/config.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,7 @@ def timeout(self):
652652
@timeout.setter
653653
def timeout(self, timeout):
654654
"""Set timeout."""
655-
if _check_value("timeout", timeout, int):
655+
if _check_value("timeout", timeout, (int, float)):
656656
self._timeout = timeout
657657

658658
@property

neural_compressor/mix_precision.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ def fit(model, conf, eval_func=None, eval_dataloader=None, eval_metric=None, **k
165165

166166
traceback.print_exc()
167167
finally:
168-
if strategy.best_qmodel:
168+
if strategy.get_best_qmodel():
169169
logger.info(
170170
"Specified timeout or max trials is reached! " "Found a quantized model which meet accuracy goal. Exit."
171171
)
@@ -176,4 +176,4 @@ def fit(model, conf, eval_func=None, eval_dataloader=None, eval_metric=None, **k
176176
"Not found any quantized model which meet accuracy goal. Exit."
177177
)
178178

179-
return strategy.best_qmodel
179+
return strategy.get_best_qmodel()

neural_compressor/quantization.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ def update(d):
249249

250250
traceback.print_exc()
251251
finally:
252-
if strategy.best_qmodel:
252+
if strategy.get_best_qmodel():
253253
logger.info(
254254
"Specified timeout or max trials is reached! " "Found a quantized model which meet accuracy goal. Exit."
255255
)
@@ -260,4 +260,4 @@ def update(d):
260260
"Not found any quantized model which meet accuracy goal. Exit."
261261
)
262262

263-
return strategy.best_qmodel
263+
return strategy.get_best_qmodel()

neural_compressor/strategy/auto.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def sequential_traverse(self):
104104

105105
pre_strategy = strategy
106106
strategy.traverse()
107-
self.best_qmodel = strategy.best_qmodel
107+
self.best_qmodel = strategy.get_best_qmodel()
108108
if self.best_qmodel:
109109
return
110110

neural_compressor/strategy/strategy.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -1045,11 +1045,9 @@ def _eval_baseline(self):
10451045
self.show_baseline_info()
10461046

10471047
def _recover_best_qmodel_from_tuning_cfg(self):
1048-
"""Recover the best quantized model from tuning config."""
10491048
if self.best_tuning_cfg and not self.best_qmodel:
10501049
logger.info(
1051-
f"[Strategy] Recover the {self.best_tuning_cfg.get('trial_number', 'N/A')}-trial\
1052-
as the tuning result."
1050+
"[Strategy] Recover the %s-trial as the tuning result.", self.best_tuning_cfg.get("trial_number", "N/A")
10531051
)
10541052
self.best_qmodel = self.adaptor.quantize(
10551053
copy.deepcopy(self.best_tuning_cfg), self.model, self.calib_dataloader, self.q_func
@@ -1797,6 +1795,11 @@ def __setstate__(self, d):
17971795
"""
17981796
self.__dict__.update(d)
17991797

1798+
def get_best_qmodel(self):
1799+
"""Get the best quantized model."""
1800+
self._recover_best_qmodel_from_tuning_cfg()
1801+
return self.best_qmodel
1802+
18001803
def stop(self, timeout, trials_count):
18011804
"""Check if need to stop traverse.
18021805

neural_compressor/strategy/utils/tuning_space.py

-8
Original file line numberDiff line numberDiff line change
@@ -279,12 +279,6 @@ def _merge_optype_wise_cfg(self, cap: Dict, optype_wise_usr_cfg: Dict, fw_cap: D
279279
cap["op"][op_name_type], op_user_cfg, fw_cap["op"][op_name_type]
280280
)
281281

282-
def _merge_model_wise_cfg(self, cap: Dict, model_wise_usr_cfg: Dict, fw_cap: Dict):
283-
for op_name_type in cap["op"].keys():
284-
cap["op"][op_name_type] = self._merge_op_cfg(
285-
cap["op"][op_name_type], model_wise_usr_cfg, fw_cap["op"][op_name_type]
286-
)
287-
288282
def _merge_op_wise_cfg(self, cap: Dict, op_wise_usr_cfg: Dict, fw_cap: Dict):
289283
op_name_types = {key[0]: key for key in cap["op"].keys()}
290284
for op_name_pattern, op_user_cfg in op_wise_usr_cfg.items():
@@ -406,8 +400,6 @@ def _merge_with_user_cfg(self, capability: Dict, user_cfg: Dict):
406400
:return:
407401
"""
408402
fw_capability = deepcopy(capability)
409-
if user_cfg["model_wise"] is not None:
410-
self._merge_model_wise_cfg(capability, user_cfg["model_wise"], fw_capability)
411403
if user_cfg["optype_wise"] is not None:
412404
self._merge_optype_wise_cfg(capability, user_cfg["optype_wise"], fw_capability)
413405
if user_cfg["op_wise"] is not None:

neural_compressor/training.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ def eval_func(model):
341341

342342
traceback.print_exc()
343343
finally:
344-
if strategy.best_qmodel:
344+
if strategy.get_best_qmodel():
345345
logger.info(
346346
"Specified timeout or max trials is reached! " "Found a quantized model which meet accuracy goal. Exit."
347347
)
@@ -352,7 +352,7 @@ def eval_func(model):
352352
"Not found any quantized model which meet accuracy goal. Exit."
353353
)
354354

355-
compression_manager.model = strategy.best_qmodel
355+
compression_manager.model = strategy.get_best_qmodel()
356356

357357
return compression_manager.model
358358

0 commit comments

Comments
 (0)