mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 04:14:52 +00:00
Fmt
This commit is contained in:
parent
b7a81ae6d4
commit
c432471546
@ -84,9 +84,9 @@ impl Validation {
|
|||||||
let (inputs, input_length) = response_receiver.await.unwrap()?;
|
let (inputs, input_length) = response_receiver.await.unwrap()?;
|
||||||
|
|
||||||
// Get total tokens
|
// Get total tokens
|
||||||
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens{
|
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
|
||||||
max_new_tokens
|
max_new_tokens
|
||||||
}else{
|
} else {
|
||||||
self.max_total_tokens.saturating_sub(input_length) as u32
|
self.max_total_tokens.saturating_sub(input_length) as u32
|
||||||
};
|
};
|
||||||
let total_tokens = input_length + max_new_tokens as usize;
|
let total_tokens = input_length + max_new_tokens as usize;
|
||||||
@ -117,9 +117,9 @@ impl Validation {
|
|||||||
// However, the inputs will be truncated by the python servers
|
// However, the inputs will be truncated by the python servers
|
||||||
// We make sure that truncate + max_new_tokens <= self.max_total_tokens
|
// We make sure that truncate + max_new_tokens <= self.max_total_tokens
|
||||||
let input_length = truncate.unwrap_or(self.max_input_length);
|
let input_length = truncate.unwrap_or(self.max_input_length);
|
||||||
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens{
|
let max_new_tokens: u32 = if let Some(max_new_tokens) = max_new_tokens {
|
||||||
max_new_tokens
|
max_new_tokens
|
||||||
}else{
|
} else {
|
||||||
self.max_total_tokens.saturating_sub(input_length) as u32
|
self.max_total_tokens.saturating_sub(input_length) as u32
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user