mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 16:32:12 +00:00
Baichuan2-13B does not have max_position_embeddings in config
see https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/config.json Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
This commit is contained in:
parent
83624a07be
commit
5ad8c9a40b
@ -1595,7 +1595,11 @@ class FlashCausalLM(Model):
|
||||
if max_total_tokens is None:
|
||||
if get_support_chunking():
|
||||
model_max_length = self.tokenizer.model_max_length
|
||||
max_position_embeddings = self.config.max_position_embeddings
|
||||
max_position_embeddings = (
|
||||
self.config.max_position_embeddings
|
||||
if hasattr(self.config, "max_position_embeddings")
|
||||
else model_max_length
|
||||
)
|
||||
max_total_tokens = min(
|
||||
num_blocks * BLOCK_SIZE, model_max_length, max_position_embeddings
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user