From fc7221369eedaa7e9cd98b1f2d99763596c213bd Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 8 Aug 2023 10:43:34 +0000 Subject: [PATCH] Fix. --- .../models/custom_modeling/flash_llama_modeling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 99ebd425..d0185ede 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -189,7 +189,7 @@ class FlashLlamaAttention(torch.nn.Module): # config=config, prefix=f"{prefix}.rotary_emb", weights=weights # ) self.rotary_emb = PositionRotaryEmbedding.static( - config=config, dim=config.head_size, base=10000.0, device=weights.device + config=config, dim=self.head_size, base=10000.0, device=weights.device ) self.softmax_scale = self.head_size**-0.5