We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 19bdf0f commit dd981dfCopy full SHA for dd981df
optimum/intel/neural_compressor/quantization.py
@@ -177,7 +177,7 @@ def quantize(
177
save_directory.mkdir(parents=True, exist_ok=True)
178
save_onnx_model = kwargs.pop("save_onnx_model", False)
179
device = kwargs.pop("device", "cpu")
180
- use_cpu = True if device == torch.device("cpu") or device == "cpu" else False
+ use_cpu = device == torch.device("cpu") or device == "cpu"
181
use_xpu = device == torch.device("xpu") or device == "xpu"
182
183
if save_onnx_model and (isinstance(self._original_model, ORTModel) or weight_only):
0 commit comments