This commit is contained in:
Felix Marty 2023-07-19 16:35:23 +00:00
parent 5882768682
commit 2080735e16

View File

@ -168,8 +168,7 @@ def _load_gqa(config, prefix: str, weights):
config.hidden_size,
], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
bias = None
return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize))
return TensorParallelColumnLinear(get_linear(weight, bias=None, quantize=config.quantize))
class FlashLlamaAttention(torch.nn.Module):