From 2080735e16fca8a6c6c22b064a116c42ece49841 Mon Sep 17 00:00:00 2001 From: Felix Marty <9808326+fxmarty@users.noreply.github.com> Date: Wed, 19 Jul 2023 16:35:23 +0000 Subject: [PATCH] taste --- .../models/custom_modeling/flash_llama_modeling.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index cf211ef0..84e22f72 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -168,8 +168,7 @@ def _load_gqa(config, prefix: str, weights): config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" - bias = None - return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize)) + return TensorParallelColumnLinear(get_linear(weight, bias=None, quantize=config.quantize)) class FlashLlamaAttention(torch.nn.Module):