From cc8b9650bdae3125c70d979c19e465080b32789c Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Wed, 15 Jan 2025 22:56:52 +0800 Subject: [PATCH] Baichuan2-13B does not have max_position_embeddings in config (#2903) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Baichuan2-13B does not have max_position_embeddings in config see https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/config.json Signed-off-by: Wang, Yi A * Update server/text_generation_server/models/flash_causal_lm.py Co-authored-by: Daniël de Kok * fmt Signed-off-by: Wang, Yi A --------- Signed-off-by: Wang, Yi A Co-authored-by: Daniël de Kok --- server/text_generation_server/models/flash_causal_lm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 03fc6147..d097c54f 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1595,7 +1595,9 @@ class FlashCausalLM(Model): if max_total_tokens is None: if get_support_chunking(): model_max_length = self.tokenizer.model_max_length - max_position_embeddings = self.config.max_position_embeddings + max_position_embeddings = getattr( + self.config, "max_position_embeddings", model_max_length + ) max_total_tokens = min( num_blocks * BLOCK_SIZE, model_max_length, max_position_embeddings )