add return_full_text support

This commit is contained in:
OlivierDehaene 2023-02-27 19:22:09 +01:00
parent ed22912676
commit f3f9faca2f
2 changed files with 114 additions and 113 deletions

View File

@ -47,8 +47,8 @@ pub(crate) struct GenerateParameters {
#[schema(exclusive_minimum = 0, exclusive_maximum = 512, default = "20")]
pub max_new_tokens: u32,
#[serde(default)]
#[schema(default = "false", example = false)]
pub return_full_text: bool,
#[schema(default = "None", example = false)]
pub return_full_text: Option<bool>,
#[serde(default)]
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
pub stop: Vec<String>,
@ -71,7 +71,7 @@ fn default_parameters() -> GenerateParameters {
top_p: None,
do_sample: false,
max_new_tokens: default_max_new_tokens(),
return_full_text: false,
return_full_text: None,
stop: vec![],
details: false,
seed: None,

View File

@ -29,31 +29,27 @@ use utoipa_swagger_ui::SwaggerUi;
/// Compatibility route with api-inference and AzureML
#[instrument(skip(infer))]
async fn compat_generate(
return_full_text: Extension<bool>,
default_return_full_text: Extension<bool>,
infer: Extension<Infer>,
req: Json<CompatGenerateRequest>,
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
// switch on stream
let req = req.0;
if req.stream {
Ok(generate_stream(infer, Json(req.into()))
.await
.into_response())
} else {
let mut add_prompt = None;
if return_full_text.0 {
add_prompt = Some(req.inputs.clone());
let mut req = req.0;
if req.parameters.return_full_text.is_none() {
req.parameters.return_full_text = Some(default_return_full_text.0)
}
let (headers, generation) = generate(infer, Json(req.into())).await?;
let mut generation = generation.0;
if let Some(prompt) = add_prompt {
generation.generated_text = prompt + &generation.generated_text;
};
if req.stream {
Ok(
generate_stream(infer, Json(req.into()))
.await
.into_response(),
)
} else {
let (headers, generation) =
generate(infer, Json(req.into())).await?;
// wrap generation inside a Vec to match api-inference
Ok((headers, Json(vec![generation])).into_response())
Ok((headers, Json(vec![generation.0])).into_response())
}
}
@ -75,7 +71,7 @@ async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorRe
top_p: None,
do_sample: false,
max_new_tokens: 1,
return_full_text: false,
return_full_text: None,
stop: Vec::new(),
details: false,
seed: None,
@ -122,7 +118,12 @@ async fn generate(
let start_time = Instant::now();
let mut add_prompt = None;
if req.0.parameters.return_full_text {
if req
.0
.parameters
.return_full_text
.unwrap_or(false)
{
add_prompt = Some(req.0.inputs.clone());
}
@ -254,7 +255,7 @@ async fn generate_stream(
let mut error = false;
let mut add_prompt = None;
if req.0.parameters.return_full_text {
if req.0.parameters.return_full_text.unwrap_or(false) {
add_prompt = Some(req.0.inputs.clone());
}
let details = req.0.parameters.details;