Set qkv attention layer bias to True

This commit is contained in:
Jason Cheng 2024-02-21 21:38:43 +08:00
parent bb57cb34e0
commit 605e0369c4

View File

@ -26,7 +26,7 @@ def load_attention(config, prefix, weights):
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0, dim=0,
weights=weights, weights=weights,
bias=False, bias=True,
) )