Skip to content

Commit 5ddd360

Browse files
committed
Changed parameters name for GPTQ in example
Signed-off-by: Cheng, Penghui <penghui.cheng@intel.com>
1 parent e5558b0 commit 5ddd360

File tree

1 file changed

+4
-4
lines changed
  • examples/neural_compressor/language-modeling

1 file changed

+4
-4
lines changed

examples/neural_compressor/language-modeling/run_clm.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -665,11 +665,11 @@ def compute_metrics(eval_preds):
665665
if optim_args.quantization_methodology == "GPTQ":
666666
algorithm_args = {
667667
"act_order": False,
668-
"percdamp": optim_args.gptq_percdamp,
668+
"percdamp": optim_args.damp_percent,
669669
"block_size": optim_args.gptq_block_size,
670-
"nsamples": optim_args.gptq_nsamples,
671-
"use_max_length": optim_args.gptq_use_max_length,
672-
"pad_max_length": optim_args.gptq_pad_max_length,
670+
"nsamples": optim_args.num_calibration_samples,
671+
"use_max_length": optim_args.use_max_length,
672+
"pad_max_length": optim_args.pad_max_length,
673673
}
674674
quantization_config = WeightOnlyQuantConfig(
675675
weight_dtype=optim_args.weight_dtype,

0 commit comments

Comments
 (0)