Skip to content

Commit f95db62

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 2397d65 commit f95db62

File tree

1 file changed

+3
-1
lines changed
  • neural_compressor/torch/algorithms/weight_only

1 file changed

+3
-1
lines changed

neural_compressor/torch/algorithms/weight_only/gptq.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,9 @@ def trace_gptq_target_blocks(module, module_types=[torch.nn.ModuleList, torch.nn
116116
gptq_related_blocks["transformers"] = m
117117
find_transformers = True
118118
# return gptq_related_blocks
119-
elif (is_leaf(m) and not find_transformers) or "Embedding" in type(m).__name__: # resolve 'LlamaRotaryEmbedding'
119+
elif (is_leaf(m) and not find_transformers) or "Embedding" in type(
120+
m
121+
).__name__: # resolve 'LlamaRotaryEmbedding'
120122
gptq_related_blocks["embeddings"][n] = m
121123
elif n.find(gptq_related_blocks["transformers_name"]) == -1 and find_transformers:
122124
# no longer belong to transformers

0 commit comments

Comments
 (0)