From 22ed5703de88af00863e7d0e6f58726f09cf967f Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Tue, 14 Jan 2025 08:58:48 +0800 Subject: [PATCH] Update server/text_generation_server/models/flash_causal_lm.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Daniƫl de Kok --- server/text_generation_server/models/flash_causal_lm.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 4f8d37c1b..739319f88 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1595,11 +1595,7 @@ class FlashCausalLM(Model): if max_total_tokens is None: if get_support_chunking(): model_max_length = self.tokenizer.model_max_length - max_position_embeddings = ( - self.config.max_position_embeddings - if hasattr(self.config, "max_position_embeddings") - else model_max_length - ) + max_position_embeddings = getattr(self.config, "max_position_embeddings", model_max_length) max_total_tokens = min( num_blocks * BLOCK_SIZE, model_max_length, max_position_embeddings )