Skip to content

Commit 778bf15

Browse files
committed
fix bias
Signed-off-by: jiqing-feng <jiqing.feng@intel.com>
1 parent f64b251 commit 778bf15

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

optimum/exporters/ipex/modeling_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -825,7 +825,7 @@ def __init__(self, module, device, config) -> None:
825825
super().__init__(module, device, config)
826826
if getattr(config, "quantization_config", None) is None:
827827
concat_weight = torch.concat([self.q_proj.weight, self.k_proj.weight, self.v_proj.weight]).contiguous()
828-
bias_list = [bias for bias in [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias] if bias]
828+
bias_list = [bias for bias in [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias] if bias is not None]
829829
use_bias = bias_list != []
830830
self.concat_qkv = nn.Linear(concat_weight.shape[1], concat_weight.shape[0], bias=use_bias)
831831
self.concat_qkv.weight = nn.Parameter(concat_weight)

0 commit comments

Comments
 (0)