From 6a017f520869bf5abac72ecbe102b649b5836c6e Mon Sep 17 00:00:00 2001 From: Yessen Kanapin Date: Tue, 29 Aug 2023 22:22:40 +0000 Subject: [PATCH] bugfix --- .../models/custom_modeling/flash_llama_modeling.py | 1 + 1 file changed, 1 insertion(+) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 66e2fce0..0ade126f 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -189,6 +189,7 @@ class FlashLlamaAttention(torch.nn.Module): self.rotary_emb = PositionRotaryEmbedding.static( dim=self.head_size, base=config.rope_theta, device=weights.device, + config=config ) self.softmax_scale = self.head_size**-0.5