From 605e0369c4e1f9cc80109840e073ae57e03a8134 Mon Sep 17 00:00:00 2001 From: Jason Cheng Date: Wed, 21 Feb 2024 21:38:43 +0800 Subject: [PATCH] Set qkv attention layer bias to True --- .../models/custom_modeling/flash_qwen2_modeling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py b/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py index 849dd80e..42a2cbde 100644 --- a/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_qwen2_modeling.py @@ -26,7 +26,7 @@ def load_attention(config, prefix, weights): prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, - bias=False, + bias=True, )