From 3e0a82d5124f32453bc09a50d281936a30fde8be Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 17 Oct 2024 08:48:52 -0400 Subject: [PATCH] Update server/text_generation_server/models/flash_causal_lm.py Co-authored-by: Nicolas Patry --- server/text_generation_server/models/flash_causal_lm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index 3b16a724..7018edb1 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -1922,7 +1922,7 @@ class FlashCausalLM(Model): batch.adapter_meta.adapter_indices = next_adapter_indices if prefill and prefill_logprobs: - # Get prefill logprobs with inplace softmax (avoid copying the `out` tensor (max_batch_size * vocab_size)) + # Get prefill logprobs with inplace softmax (avoid copying the `out` tensor (max_batch_prefill_tokens * vocab_size)) torch.log_softmax(out, -1, out=out) prefill_logprobs_tensor = out prefill_logprobs = torch.gather(