Skip to content

Commit a15ca14

Browse files
committed
typo
1 parent 69df6d8 commit a15ca14

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

nncf/torch/quantization/layers.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -1072,14 +1072,15 @@ class LoraMixin:
10721072
LORA_B_PARAM_NAME = "lora_B"
10731073

10741074
def init_lora(self, lspec: PTLoraSpec):
1075+
self._lspec = lspec
10751076
default_lora_dtype = torch.bfloat16
10761077
out_features, in_features = lspec.orig_weight_shape
10771078
rank = lspec.lora_rank
10781079
if rank > out_features or rank > in_features:
10791080
msg = f"Specified LoRA rank={rank} cannot exceed any dimension of the weight tensor"
10801081
raise nncf.ValidationError(msg)
1081-
self._lora_A = torch.nn.Parameter(torch.ones((rank, in_features), dtype=default_lora_dtype))
1082-
self._lora_B = torch.nn.Parameter(torch.zeros((out_features, rank), dtype=default_lora_dtype))
1082+
self.lora_A = torch.nn.Parameter(torch.ones((rank, in_features), dtype=default_lora_dtype))
1083+
self.lora_B = torch.nn.Parameter(torch.zeros((out_features, rank), dtype=default_lora_dtype))
10831084

10841085
def enable_gradients(self):
10851086
self.lora_A.requires_grad = True

0 commit comments

Comments
 (0)