mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-09 19:34:53 +00:00
fmt
This commit is contained in:
parent
f3f9faca2f
commit
51b029b089
@ -110,21 +110,12 @@ fn main() -> Result<(), std::io::Error> {
|
|||||||
serde_json::from_str(&model_info).expect("unable to parse model info");
|
serde_json::from_str(&model_info).expect("unable to parse model info");
|
||||||
|
|
||||||
// if pipeline-tag == text-generation we return prompt + generated_text from the / route
|
// if pipeline-tag == text-generation we return prompt + generated_text from the / route
|
||||||
let compat_return_full_text = match model_info["pipeline_tag"].as_str() {
|
let compat_return_full_text = match model_info.get("pipeline_tag") {
|
||||||
None => {
|
None => {
|
||||||
tracing::warn!("no pipeline tag found for model {tokenizer_name}");
|
tracing::warn!("no pipeline tag found for model {tokenizer_name}");
|
||||||
tracing::warn!("returning only generated_text from the compat route");
|
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
Some(pipeline_tag) => {
|
Some(pipeline_tag) => pipeline_tag.as_str() == Some("text-generation"),
|
||||||
if pipeline_tag == "text-generation" {
|
|
||||||
tracing::info!("returning prompt + generated_text from the compat route");
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
tracing::info!("returning only generated_text from the compat route");
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Instantiate sharded client from the master unix socket
|
// Instantiate sharded client from the master unix socket
|
||||||
|
@ -33,21 +33,20 @@ async fn compat_generate(
|
|||||||
infer: Extension<Infer>,
|
infer: Extension<Infer>,
|
||||||
req: Json<CompatGenerateRequest>,
|
req: Json<CompatGenerateRequest>,
|
||||||
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
|
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
|
||||||
// switch on stream
|
|
||||||
let mut req = req.0;
|
let mut req = req.0;
|
||||||
|
|
||||||
|
// default return_full_text given the pipeline_tag
|
||||||
if req.parameters.return_full_text.is_none() {
|
if req.parameters.return_full_text.is_none() {
|
||||||
req.parameters.return_full_text = Some(default_return_full_text.0)
|
req.parameters.return_full_text = Some(default_return_full_text.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// switch on stream
|
||||||
if req.stream {
|
if req.stream {
|
||||||
Ok(
|
Ok(generate_stream(infer, Json(req.into()))
|
||||||
generate_stream(infer, Json(req.into()))
|
.await
|
||||||
.await
|
.into_response())
|
||||||
.into_response(),
|
|
||||||
)
|
|
||||||
} else {
|
} else {
|
||||||
let (headers, generation) =
|
let (headers, generation) = generate(infer, Json(req.into())).await?;
|
||||||
generate(infer, Json(req.into())).await?;
|
|
||||||
// wrap generation inside a Vec to match api-inference
|
// wrap generation inside a Vec to match api-inference
|
||||||
Ok((headers, Json(vec![generation.0])).into_response())
|
Ok((headers, Json(vec![generation.0])).into_response())
|
||||||
}
|
}
|
||||||
@ -100,15 +99,15 @@ example = json ! ({"error": "Incomplete generation"})),
|
|||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[instrument(
|
#[instrument(
|
||||||
skip(infer),
|
skip(infer),
|
||||||
fields(
|
fields(
|
||||||
total_time,
|
total_time,
|
||||||
validation_time,
|
validation_time,
|
||||||
queue_time,
|
queue_time,
|
||||||
inference_time,
|
inference_time,
|
||||||
time_per_token,
|
time_per_token,
|
||||||
seed,
|
seed,
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
async fn generate(
|
async fn generate(
|
||||||
infer: Extension<Infer>,
|
infer: Extension<Infer>,
|
||||||
@ -118,12 +117,7 @@ async fn generate(
|
|||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
let mut add_prompt = None;
|
let mut add_prompt = None;
|
||||||
if req
|
if req.0.parameters.return_full_text.unwrap_or(false) {
|
||||||
.0
|
|
||||||
.parameters
|
|
||||||
.return_full_text
|
|
||||||
.unwrap_or(false)
|
|
||||||
{
|
|
||||||
add_prompt = Some(req.0.inputs.clone());
|
add_prompt = Some(req.0.inputs.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,20 +226,20 @@ content_type = "text/event-stream"),
|
|||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
#[instrument(
|
#[instrument(
|
||||||
skip(infer),
|
skip(infer),
|
||||||
fields(
|
fields(
|
||||||
total_time,
|
total_time,
|
||||||
validation_time,
|
validation_time,
|
||||||
queue_time,
|
queue_time,
|
||||||
inference_time,
|
inference_time,
|
||||||
time_per_token,
|
time_per_token,
|
||||||
seed,
|
seed,
|
||||||
)
|
)
|
||||||
)]
|
)]
|
||||||
async fn generate_stream(
|
async fn generate_stream(
|
||||||
infer: Extension<Infer>,
|
infer: Extension<Infer>,
|
||||||
req: Json<GenerateRequest>,
|
req: Json<GenerateRequest>,
|
||||||
) -> Sse<impl Stream<Item=Result<Event, Infallible>>> {
|
) -> Sse<impl Stream<Item = Result<Event, Infallible>>> {
|
||||||
let span = tracing::Span::current();
|
let span = tracing::Span::current();
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
|
|
||||||
@ -493,7 +487,7 @@ async fn shutdown_signal() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
let terminate = async {
|
let terminate = async {
|
||||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||||
.expect("failed to install signal handler")
|
.expect("failed to install signal handler")
|
||||||
.recv()
|
.recv()
|
||||||
@ -501,7 +495,7 @@ async fn shutdown_signal() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(not(unix))]
|
#[cfg(not(unix))]
|
||||||
let terminate = std::future::pending::<()>();
|
let terminate = std::future::pending::<()>();
|
||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = ctrl_c => {},
|
_ = ctrl_c => {},
|
||||||
|
Loading…
Reference in New Issue
Block a user