use all tokens

This commit is contained in:
OlivierDehaene 2023-04-05 17:18:16 +02:00
parent b5233f9c3c
commit 70637b4170

View File

@ -355,7 +355,7 @@ class FlashCausalLM(Model):
# Generated token # Generated token
next_token_logprob = logprobs[-1, next_token_id_item] next_token_logprob = logprobs[-1, next_token_id_item]
next_token_text, offset, token_offset = self.decode_token( next_token_text, offset, token_offset = self.decode_token(
all_input_ids[-(stopping_criteria.current_tokens + 1) :], all_input_ids,
offset, offset,
token_offset, token_offset,
) )