diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 19fda9f1..4f8d37c1 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1595,7 +1595,11 @@ class FlashCausalLM(Model): if max_total_tokens is None: if get_support_chunking(): model_max_length = self.tokenizer.model_max_length - max_position_embeddings = self.config.max_position_embeddings + max_position_embeddings = ( + self.config.max_position_embeddings + if hasattr(self.config, "max_position_embeddings") + else model_max_length + ) max_total_tokens = min( num_blocks * BLOCK_SIZE, model_max_length, max_position_embeddings )