mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-10 20:04:52 +00:00
Fix top_n_tokens returning non-log probs for some models
This commit is contained in:
parent
c8a01d7591
commit
2a16b4101f
@ -579,7 +579,7 @@ class CausalLM(Model):
|
|||||||
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
|
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
|
||||||
batch.top_n_tokens,
|
batch.top_n_tokens,
|
||||||
batch.top_n_tokens_tensor,
|
batch.top_n_tokens_tensor,
|
||||||
torch.softmax(logits[:, -1], -1),
|
torch.log_softmax(logits[:, -1], -1),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Zipped iterator
|
# Zipped iterator
|
||||||
|
@ -642,7 +642,7 @@ class Seq2SeqLM(Model):
|
|||||||
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
|
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
|
||||||
batch.top_n_tokens,
|
batch.top_n_tokens,
|
||||||
batch.top_n_tokens_tensor,
|
batch.top_n_tokens_tensor,
|
||||||
torch.softmax(logits[:, -1], -1),
|
torch.log_softmax(logits[:, -1], -1),
|
||||||
)
|
)
|
||||||
|
|
||||||
# Finished requests
|
# Finished requests
|
||||||
|
Loading…
Reference in New Issue
Block a user