Skip to content

Commit b5e9edb

Browse files
committed
typo
1 parent 69df6d8 commit b5e9edb

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

nncf/torch/quantization/layers.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1078,8 +1078,8 @@ def init_lora(self, lspec: PTLoraSpec):
10781078
if rank > out_features or rank > in_features:
10791079
msg = f"Specified LoRA rank={rank} cannot exceed any dimension of the weight tensor"
10801080
raise nncf.ValidationError(msg)
1081-
self._lora_A = torch.nn.Parameter(torch.ones((rank, in_features), dtype=default_lora_dtype))
1082-
self._lora_B = torch.nn.Parameter(torch.zeros((out_features, rank), dtype=default_lora_dtype))
1081+
self.lora_A = torch.nn.Parameter(torch.ones((rank, in_features), dtype=default_lora_dtype))
1082+
self.lora_B = torch.nn.Parameter(torch.zeros((out_features, rank), dtype=default_lora_dtype))
10831083

10841084
def enable_gradients(self):
10851085
self.lora_A.requires_grad = True

0 commit comments

Comments
 (0)