feat(router): add support for return_full_text

This commit is contained in:
OlivierDehaene 2023-02-27 17:08:16 +01:00
parent 1cddcbbc26
commit ed22912676
2 changed files with 38 additions and 13 deletions

View File

@ -40,13 +40,16 @@ pub(crate) struct GenerateParameters {
example = 0.95 example = 0.95
)] )]
pub top_p: Option<f32>, pub top_p: Option<f32>,
#[serde(default = "default_do_sample")] #[serde(default)]
#[schema(default = "false", example = true)] #[schema(default = "false", example = true)]
pub do_sample: bool, pub do_sample: bool,
#[serde(default = "default_max_new_tokens")] #[serde(default = "default_max_new_tokens")]
#[schema(exclusive_minimum = 0, exclusive_maximum = 512, default = "20")] #[schema(exclusive_minimum = 0, exclusive_maximum = 512, default = "20")]
pub max_new_tokens: u32, pub max_new_tokens: u32,
#[serde(default)] #[serde(default)]
#[schema(default = "false", example = false)]
pub return_full_text: bool,
#[serde(default)]
#[schema(inline, max_items = 4, example = json ! (["photographer"]))] #[schema(inline, max_items = 4, example = json ! (["photographer"]))]
pub stop: Vec<String>, pub stop: Vec<String>,
#[serde(default)] #[serde(default)]
@ -56,10 +59,6 @@ pub(crate) struct GenerateParameters {
pub seed: Option<u64>, pub seed: Option<u64>,
} }
fn default_do_sample() -> bool {
false
}
fn default_max_new_tokens() -> u32 { fn default_max_new_tokens() -> u32 {
20 20
} }
@ -70,8 +69,9 @@ fn default_parameters() -> GenerateParameters {
repetition_penalty: None, repetition_penalty: None,
top_k: None, top_k: None,
top_p: None, top_p: None,
do_sample: default_do_sample(), do_sample: false,
max_new_tokens: default_max_new_tokens(), max_new_tokens: default_max_new_tokens(),
return_full_text: false,
stop: vec![], stop: vec![],
details: false, details: false,
seed: None, seed: None,

View File

@ -75,6 +75,7 @@ async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorRe
top_p: None, top_p: None,
do_sample: false, do_sample: false,
max_new_tokens: 1, max_new_tokens: 1,
return_full_text: false,
stop: Vec::new(), stop: Vec::new(),
details: false, details: false,
seed: None, seed: None,
@ -120,8 +121,14 @@ async fn generate(
let span = tracing::Span::current(); let span = tracing::Span::current();
let start_time = Instant::now(); let start_time = Instant::now();
// Inference let mut add_prompt = None;
if req.0.parameters.return_full_text {
add_prompt = Some(req.0.inputs.clone());
}
let details = req.0.parameters.details; let details = req.0.parameters.details;
// Inference
let response = infer.generate(req.0).await?; let response = infer.generate(req.0).await?;
// Token details // Token details
@ -188,8 +195,13 @@ async fn generate(
); );
// Send response // Send response
let mut output_text = response.generated_text.text;
if let Some(prompt) = add_prompt {
output_text = prompt + &output_text;
}
let response = GenerateResponse { let response = GenerateResponse {
generated_text: response.generated_text.text, generated_text: output_text,
details, details,
}; };
Ok((headers, Json(response))) Ok((headers, Json(response)))
@ -240,6 +252,11 @@ async fn generate_stream(
// Inference // Inference
let mut end_reached = false; let mut end_reached = false;
let mut error = false; let mut error = false;
let mut add_prompt = None;
if req.0.parameters.return_full_text {
add_prompt = Some(req.0.inputs.clone());
}
let details = req.0.parameters.details; let details = req.0.parameters.details;
match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await { match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
@ -306,20 +323,28 @@ async fn generate_stream(
// StreamResponse // StreamResponse
end_reached = true; end_reached = true;
let mut output_text = generated_text.text;
if let Some(prompt) = add_prompt {
output_text = prompt + &output_text;
}
let stream_token = StreamResponse { let stream_token = StreamResponse {
token, token,
generated_text: Some(generated_text.text), generated_text: Some(output_text),
details details
}; };
yield Ok(Event::default().json_data(stream_token).unwrap()) yield Ok(Event::default().json_data(stream_token).unwrap());
break;
} }
} }
} }
// yield error // yield error
Err(err) => { Err(err) => {
error = true; error = true;
yield Ok(Event::from(err)) yield Ok(Event::from(err));
break;
} }
} }
} }
@ -327,7 +352,7 @@ async fn generate_stream(
// yield error // yield error
Err(err) => { Err(err) => {
error = true; error = true;
yield Ok(Event::from(err)) yield Ok(Event::from(err));
} }
} }
// Check if generation reached the end // Check if generation reached the end
@ -336,7 +361,7 @@ async fn generate_stream(
let err = InferError::IncompleteGeneration; let err = InferError::IncompleteGeneration;
metrics::increment_counter!("tgi_request_failure", "err" => "incomplete"); metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
tracing::error!("{err}"); tracing::error!("{err}");
yield Ok(Event::from(err)) yield Ok(Event::from(err));
} }
}; };