This commit is contained in:
Nicolas Patry 2023-12-05 15:29:34 +00:00
parent be481a4799
commit cb8a1680fe
2 changed files with 3 additions and 3 deletions

View File

@ -833,7 +833,7 @@ class FlashCausalLM(Model):
batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs batch.top_n_tokens, batch.top_n_tokens_tensor, logprobs
) )
speculative_length = speculative_ids.shape[1] speculative_length = 0 if speculative_ids is None else speculative_ids.shape[1]
if prefill: if prefill:
if len(batch) > 1 and prefill_logprobs: if len(batch) > 1 and prefill_logprobs:
# We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs # We create the prefill_tokens_indices tensor that will be used to gather prefill logprobs

View File

@ -21,6 +21,7 @@ from text_generation_server.models.custom_modeling.flash_mistral_modeling import
FlashMistralForCausalLM, FlashMistralForCausalLM,
MistralConfig, MistralConfig,
) )
from text_generation_server.utils.speculate import get_speculate
from text_generation_server.utils import ( from text_generation_server.utils import (
initialize_torch_distributed, initialize_torch_distributed,
weight_files, weight_files,
@ -132,8 +133,7 @@ class FlashMistralBatch(FlashCausalLMBatch):
# Paged attention # Paged attention
# Remove one as the first token des not have a past # Remove one as the first token des not have a past
from text_generation_server.models import SPECULATE speculative_length = get_speculate()
speculative_length = SPECULATE
total_tokens = input_length + max_new_tokens - 1 + speculative_length total_tokens = input_length + max_new_tokens - 1 + speculative_length
# Needed blocks can not go over SLIDING_WINDOW_BLOCKS # Needed blocks can not go over SLIDING_WINDOW_BLOCKS