mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 23:12:07 +00:00
Set alias
for max_completion_tokens
in ChatRequest
(#2932)
This commit is contained in:
parent
cc212154e0
commit
6ab02931cf
@ -460,7 +460,7 @@ pub struct CompletionRequest {
|
|||||||
pub prompt: Prompt,
|
pub prompt: Prompt,
|
||||||
|
|
||||||
/// The maximum number of tokens that can be generated in the chat completion.
|
/// The maximum number of tokens that can be generated in the chat completion.
|
||||||
#[serde(default, alias = "max_completion_tokens")]
|
#[serde(default)]
|
||||||
#[schema(default = "1024", example = "32")]
|
#[schema(default = "1024", example = "32")]
|
||||||
pub max_tokens: Option<u32>,
|
pub max_tokens: Option<u32>,
|
||||||
|
|
||||||
@ -840,7 +840,7 @@ pub(crate) struct ChatRequest {
|
|||||||
pub top_logprobs: Option<u32>,
|
pub top_logprobs: Option<u32>,
|
||||||
|
|
||||||
/// The maximum number of tokens that can be generated in the chat completion.
|
/// The maximum number of tokens that can be generated in the chat completion.
|
||||||
#[serde(default)]
|
#[serde(default, alias = "max_completion_tokens")]
|
||||||
#[schema(default = "1024", example = "32")]
|
#[schema(default = "1024", example = "32")]
|
||||||
pub max_tokens: Option<u32>,
|
pub max_tokens: Option<u32>,
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user