From fcf2e3a3386afaf0f8c37336475f848b18297fa8 Mon Sep 17 00:00:00 2001 From: yuanwu Date: Fri, 1 Nov 2024 05:08:18 +0200 Subject: [PATCH] Fix the prefill warmup issue Signed-off-by: yuanwu --- server/text_generation_server/models/causal_lm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index e9e8680e..4a04abed 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -1265,8 +1265,8 @@ class CausalLM(Model): #Prefill and decode warmup try: - for batch_size in range(PREFILL_BATCH_BUCKET_SIZE, max_prefill_batch_size, PREFILL_BATCH_BUCKET_SIZE): - DECODE_WARMUP_BATCH_SIZE_LIST.append(batch_size) + for batch_size in range(PREFILL_BATCH_BUCKET_SIZE, max_prefill_batch_size + 1, PREFILL_BATCH_BUCKET_SIZE): + PREFILL_WARMUP_BATCH_SIZE_LIST.append(batch_size) for seq_len in PREFILL_WARMUP_SEQLEN_LIST : batch = self.generate_warmup_batch(request, seq_len - 1, batch_size) _, prefill_batch, _ = self.generate_token([batch], is_warmup)