mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-20 06:12:07 +00:00
Update server/text_generation_server/models/flash_causal_lm.py
Co-authored-by: Daniël de Kok <me@github.danieldk.eu>
This commit is contained in:
parent
5ad8c9a40b
commit
22ed5703de
@ -1595,11 +1595,7 @@ class FlashCausalLM(Model):
|
||||
if max_total_tokens is None:
|
||||
if get_support_chunking():
|
||||
model_max_length = self.tokenizer.model_max_length
|
||||
max_position_embeddings = (
|
||||
self.config.max_position_embeddings
|
||||
if hasattr(self.config, "max_position_embeddings")
|
||||
else model_max_length
|
||||
)
|
||||
max_position_embeddings = getattr(self.config, "max_position_embeddings", model_max_length)
|
||||
max_total_tokens = min(
|
||||
num_blocks * BLOCK_SIZE, model_max_length, max_position_embeddings
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user