We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 867acf8 commit 1b68223Copy full SHA for 1b68223
neural_compressor/torch/algorithms/mx_quant/mx.py
@@ -19,8 +19,10 @@
19
# limitations under the License.
20
21
import torch
22
-from neural_compressor.torch.utils import set_module, logger
23
from torch.nn import functional as F
+
24
+from neural_compressor.torch.utils import logger, set_module
25
26
from .utils import quantize_elemwise_op, quantize_mx_op
27
28
@@ -123,7 +125,7 @@ def mx_quantize(
123
125
logger.debug(f"MX quantized module:{name, m}")
124
126
log_msg = (
127
f"MX quantization config: w_dtype={config[(name, type(m).__name__)].w_dtype}, "
- + f"config[(name, type(m).__name__)].act_dtype, "
128
+ + "config[(name, type(m).__name__)].act_dtype, "
129
+ f"out_dtype={config[(name, type(m).__name__)].out_dtype}"
130
)
131
logger.debug(log_msg)
0 commit comments