mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-09 11:24:53 +00:00
add return_full_text support
This commit is contained in:
parent
ed22912676
commit
f3f9faca2f
@ -47,8 +47,8 @@ pub(crate) struct GenerateParameters {
|
|||||||
#[schema(exclusive_minimum = 0, exclusive_maximum = 512, default = "20")]
|
#[schema(exclusive_minimum = 0, exclusive_maximum = 512, default = "20")]
|
||||||
pub max_new_tokens: u32,
|
pub max_new_tokens: u32,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[schema(default = "false", example = false)]
|
#[schema(default = "None", example = false)]
|
||||||
pub return_full_text: bool,
|
pub return_full_text: Option<bool>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
|
#[schema(inline, max_items = 4, example = json ! (["photographer"]))]
|
||||||
pub stop: Vec<String>,
|
pub stop: Vec<String>,
|
||||||
@ -71,7 +71,7 @@ fn default_parameters() -> GenerateParameters {
|
|||||||
top_p: None,
|
top_p: None,
|
||||||
do_sample: false,
|
do_sample: false,
|
||||||
max_new_tokens: default_max_new_tokens(),
|
max_new_tokens: default_max_new_tokens(),
|
||||||
return_full_text: false,
|
return_full_text: None,
|
||||||
stop: vec![],
|
stop: vec![],
|
||||||
details: false,
|
details: false,
|
||||||
seed: None,
|
seed: None,
|
||||||
|
@ -29,31 +29,27 @@ use utoipa_swagger_ui::SwaggerUi;
|
|||||||
/// Compatibility route with api-inference and AzureML
|
/// Compatibility route with api-inference and AzureML
|
||||||
#[instrument(skip(infer))]
|
#[instrument(skip(infer))]
|
||||||
async fn compat_generate(
|
async fn compat_generate(
|
||||||
return_full_text: Extension<bool>,
|
default_return_full_text: Extension<bool>,
|
||||||
infer: Extension<Infer>,
|
infer: Extension<Infer>,
|
||||||
req: Json<CompatGenerateRequest>,
|
req: Json<CompatGenerateRequest>,
|
||||||
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
|
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
|
||||||
// switch on stream
|
// switch on stream
|
||||||
let req = req.0;
|
let mut req = req.0;
|
||||||
|
if req.parameters.return_full_text.is_none() {
|
||||||
|
req.parameters.return_full_text = Some(default_return_full_text.0)
|
||||||
|
}
|
||||||
|
|
||||||
if req.stream {
|
if req.stream {
|
||||||
Ok(generate_stream(infer, Json(req.into()))
|
Ok(
|
||||||
.await
|
generate_stream(infer, Json(req.into()))
|
||||||
.into_response())
|
.await
|
||||||
|
.into_response(),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
let mut add_prompt = None;
|
let (headers, generation) =
|
||||||
if return_full_text.0 {
|
generate(infer, Json(req.into())).await?;
|
||||||
add_prompt = Some(req.inputs.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
let (headers, generation) = generate(infer, Json(req.into())).await?;
|
|
||||||
|
|
||||||
let mut generation = generation.0;
|
|
||||||
if let Some(prompt) = add_prompt {
|
|
||||||
generation.generated_text = prompt + &generation.generated_text;
|
|
||||||
};
|
|
||||||
|
|
||||||
// wrap generation inside a Vec to match api-inference
|
// wrap generation inside a Vec to match api-inference
|
||||||
Ok((headers, Json(vec![generation])).into_response())
|
Ok((headers, Json(vec![generation.0])).into_response())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,7 +71,7 @@ async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorRe
|
|||||||
top_p: None,
|
top_p: None,
|
||||||
do_sample: false,
|
do_sample: false,
|
||||||
max_new_tokens: 1,
|
max_new_tokens: 1,
|
||||||
return_full_text: false,
|
return_full_text: None,
|
||||||
stop: Vec::new(),
|
stop: Vec::new(),
|
||||||
details: false,
|
details: false,
|
||||||
seed: None,
|
seed: None,
|
||||||
@ -87,32 +83,32 @@ async fn health(infer: Extension<Infer>) -> Result<(), (StatusCode, Json<ErrorRe
|
|||||||
|
|
||||||
/// Generate tokens
|
/// Generate tokens
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
post,
|
post,
|
||||||
tag = "Text Generation Inference",
|
tag = "Text Generation Inference",
|
||||||
path = "/generate",
|
path = "/generate",
|
||||||
request_body = GenerateRequest,
|
request_body = GenerateRequest,
|
||||||
responses(
|
responses(
|
||||||
(status = 200, description = "Generated Text", body = GenerateResponse),
|
(status = 200, description = "Generated Text", body = GenerateResponse),
|
||||||
(status = 424, description = "Generation Error", body = ErrorResponse,
|
(status = 424, description = "Generation Error", body = ErrorResponse,
|
||||||
example = json!({"error": "Request failed during generation"})),
|
example = json ! ({"error": "Request failed during generation"})),
|
||||||
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
||||||
example = json!({"error": "Model is overloaded"})),
|
example = json ! ({"error": "Model is overloaded"})),
|
||||||
(status = 422, description = "Input validation error", body = ErrorResponse,
|
(status = 422, description = "Input validation error", body = ErrorResponse,
|
||||||
example = json!({"error": "Input validation error"})),
|
example = json ! ({"error": "Input validation error"})),
|
||||||
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
||||||
example = json!({"error": "Incomplete generation"})),
|
example = json ! ({"error": "Incomplete generation"})),
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[instrument(
|
#[instrument(
|
||||||
skip(infer),
|
skip(infer),
|
||||||
fields(
|
fields(
|
||||||
total_time,
|
total_time,
|
||||||
validation_time,
|
validation_time,
|
||||||
queue_time,
|
queue_time,
|
||||||
inference_time,
|
inference_time,
|
||||||
time_per_token,
|
time_per_token,
|
||||||
seed,
|
seed,
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
async fn generate(
|
async fn generate(
|
||||||
infer: Extension<Infer>,
|
infer: Extension<Infer>,
|
||||||
@ -122,7 +118,12 @@ async fn generate(
|
|||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
let mut add_prompt = None;
|
let mut add_prompt = None;
|
||||||
if req.0.parameters.return_full_text {
|
if req
|
||||||
|
.0
|
||||||
|
.parameters
|
||||||
|
.return_full_text
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
add_prompt = Some(req.0.inputs.clone());
|
add_prompt = Some(req.0.inputs.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,42 +210,42 @@ async fn generate(
|
|||||||
|
|
||||||
/// Generate a stream of token using Server-Sent Events
|
/// Generate a stream of token using Server-Sent Events
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
post,
|
post,
|
||||||
tag = "Text Generation Inference",
|
tag = "Text Generation Inference",
|
||||||
path = "/generate_stream",
|
path = "/generate_stream",
|
||||||
request_body = GenerateRequest,
|
request_body = GenerateRequest,
|
||||||
responses(
|
responses(
|
||||||
(status = 200, description = "Generated Text", body = StreamResponse,
|
(status = 200, description = "Generated Text", body = StreamResponse,
|
||||||
content_type="text/event-stream"),
|
content_type = "text/event-stream"),
|
||||||
(status = 424, description = "Generation Error", body = ErrorResponse,
|
(status = 424, description = "Generation Error", body = ErrorResponse,
|
||||||
example = json!({"error": "Request failed during generation"}),
|
example = json ! ({"error": "Request failed during generation"}),
|
||||||
content_type="text/event-stream"),
|
content_type = "text/event-stream"),
|
||||||
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
||||||
example = json!({"error": "Model is overloaded"}),
|
example = json ! ({"error": "Model is overloaded"}),
|
||||||
content_type="text/event-stream"),
|
content_type = "text/event-stream"),
|
||||||
(status = 422, description = "Input validation error", body = ErrorResponse,
|
(status = 422, description = "Input validation error", body = ErrorResponse,
|
||||||
example = json!({"error": "Input validation error"}),
|
example = json ! ({"error": "Input validation error"}),
|
||||||
content_type="text/event-stream"),
|
content_type = "text/event-stream"),
|
||||||
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
||||||
example = json!({"error": "Incomplete generation"}),
|
example = json ! ({"error": "Incomplete generation"}),
|
||||||
content_type="text/event-stream"),
|
content_type = "text/event-stream"),
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[instrument(
|
#[instrument(
|
||||||
skip(infer),
|
skip(infer),
|
||||||
fields(
|
fields(
|
||||||
total_time,
|
total_time,
|
||||||
validation_time,
|
validation_time,
|
||||||
queue_time,
|
queue_time,
|
||||||
inference_time,
|
inference_time,
|
||||||
time_per_token,
|
time_per_token,
|
||||||
seed,
|
seed,
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
async fn generate_stream(
|
async fn generate_stream(
|
||||||
infer: Extension<Infer>,
|
infer: Extension<Infer>,
|
||||||
req: Json<GenerateRequest>,
|
req: Json<GenerateRequest>,
|
||||||
) -> Sse<impl Stream<Item = Result<Event, Infallible>>> {
|
) -> Sse<impl Stream<Item=Result<Event, Infallible>>> {
|
||||||
let span = tracing::Span::current();
|
let span = tracing::Span::current();
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
@ -254,7 +255,7 @@ async fn generate_stream(
|
|||||||
let mut error = false;
|
let mut error = false;
|
||||||
|
|
||||||
let mut add_prompt = None;
|
let mut add_prompt = None;
|
||||||
if req.0.parameters.return_full_text {
|
if req.0.parameters.return_full_text.unwrap_or(false) {
|
||||||
add_prompt = Some(req.0.inputs.clone());
|
add_prompt = Some(req.0.inputs.clone());
|
||||||
}
|
}
|
||||||
let details = req.0.parameters.details;
|
let details = req.0.parameters.details;
|
||||||
@ -370,10 +371,10 @@ async fn generate_stream(
|
|||||||
|
|
||||||
/// Prometheus metrics scrape endpoint
|
/// Prometheus metrics scrape endpoint
|
||||||
#[utoipa::path(
|
#[utoipa::path(
|
||||||
get,
|
get,
|
||||||
tag = "Text Generation Inference",
|
tag = "Text Generation Inference",
|
||||||
path = "/metrics",
|
path = "/metrics",
|
||||||
responses((status = 200, description = "Prometheus Metrics", body = String))
|
responses((status = 200, description = "Prometheus Metrics", body = String))
|
||||||
)]
|
)]
|
||||||
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
|
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
|
||||||
prom_handle.render()
|
prom_handle.render()
|
||||||
@ -398,35 +399,35 @@ pub async fn run(
|
|||||||
// OpenAPI documentation
|
// OpenAPI documentation
|
||||||
#[derive(OpenApi)]
|
#[derive(OpenApi)]
|
||||||
#[openapi(
|
#[openapi(
|
||||||
paths(
|
paths(
|
||||||
generate,
|
generate,
|
||||||
generate_stream,
|
generate_stream,
|
||||||
metrics,
|
metrics,
|
||||||
),
|
),
|
||||||
components(
|
components(
|
||||||
schemas(
|
schemas(
|
||||||
GenerateRequest,
|
GenerateRequest,
|
||||||
GenerateParameters,
|
GenerateParameters,
|
||||||
PrefillToken,
|
PrefillToken,
|
||||||
Token,
|
Token,
|
||||||
GenerateResponse,
|
GenerateResponse,
|
||||||
Details,
|
Details,
|
||||||
FinishReason,
|
FinishReason,
|
||||||
StreamResponse,
|
StreamResponse,
|
||||||
StreamDetails,
|
StreamDetails,
|
||||||
ErrorResponse,
|
ErrorResponse,
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
tags(
|
tags(
|
||||||
(name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
|
(name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
|
||||||
),
|
),
|
||||||
info(
|
info(
|
||||||
title = "Text Generation Inference",
|
title = "Text Generation Inference",
|
||||||
license(
|
license(
|
||||||
name = "Apache 2.0",
|
name = "Apache 2.0",
|
||||||
url = "https://www.apache.org/licenses/LICENSE-2.0"
|
url = "https://www.apache.org/licenses/LICENSE-2.0"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
struct ApiDoc;
|
struct ApiDoc;
|
||||||
|
|
||||||
@ -492,7 +493,7 @@ async fn shutdown_signal() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
let terminate = async {
|
let terminate = async {
|
||||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||||
.expect("failed to install signal handler")
|
.expect("failed to install signal handler")
|
||||||
.recv()
|
.recv()
|
||||||
@ -500,7 +501,7 @@ async fn shutdown_signal() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(not(unix))]
|
#[cfg(not(unix))]
|
||||||
let terminate = std::future::pending::<()>();
|
let terminate = std::future::pending::<()>();
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = ctrl_c => {},
|
_ = ctrl_c => {},
|
||||||
|
Loading…
Reference in New Issue
Block a user