mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 20:34:54 +00:00
Fixing truncation.
This commit is contained in:
parent
e3d765645a
commit
07a50523b3
@ -119,7 +119,11 @@ impl Validation {
|
|||||||
// If we have a fast tokenizer
|
// If we have a fast tokenizer
|
||||||
if let Some((encoding, inputs)) = self.tokenize(inputs.clone(), truncate).await? {
|
if let Some((encoding, inputs)) = self.tokenize(inputs.clone(), truncate).await? {
|
||||||
// Create response channel
|
// Create response channel
|
||||||
let input_length = encoding.len();
|
let input_length = if let Some(truncate) = truncate {
|
||||||
|
std::cmp::min(encoding.len(), truncate)
|
||||||
|
} else {
|
||||||
|
encoding.len()
|
||||||
|
};
|
||||||
|
|
||||||
// Get total tokens
|
// Get total tokens
|
||||||
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
|
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
|
||||||
|
Loading…
Reference in New Issue
Block a user