We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 5d7667a commit b9101e6Copy full SHA for b9101e6
neural_compressor/torch/algorithms/weight_only/save_load.py
@@ -956,6 +956,7 @@ def _use_hpu_module(self): # pragma: no cover
956
957
958
def change_config_to_hf_format(config_mappings):
959
+ """Change INC config_mappings to Huggingface format."""
960
# Refer to https://huggingface.co/TheBloke/Llama-2-7B-Chat-GPTQ/blob/main/config.json
961
default_quantization_config = {
962
"bits": 4,
0 commit comments