diff --git a/router/src/main.rs b/router/src/main.rs index bbbf6d05..2d7ebb0e 100644 --- a/router/src/main.rs +++ b/router/src/main.rs @@ -110,21 +110,12 @@ fn main() -> Result<(), std::io::Error> { serde_json::from_str(&model_info).expect("unable to parse model info"); // if pipeline-tag == text-generation we return prompt + generated_text from the / route - let compat_return_full_text = match model_info["pipeline_tag"].as_str() { + let compat_return_full_text = match model_info.get("pipeline_tag") { None => { tracing::warn!("no pipeline tag found for model {tokenizer_name}"); - tracing::warn!("returning only generated_text from the compat route"); false } - Some(pipeline_tag) => { - if pipeline_tag == "text-generation" { - tracing::info!("returning prompt + generated_text from the compat route"); - true - } else { - tracing::info!("returning only generated_text from the compat route"); - false - } - } + Some(pipeline_tag) => pipeline_tag.as_str() == Some("text-generation"), }; // Instantiate sharded client from the master unix socket diff --git a/router/src/server.rs b/router/src/server.rs index af50c4c0..ede961bb 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -33,21 +33,20 @@ async fn compat_generate( infer: Extension, req: Json, ) -> Result)> { - // switch on stream let mut req = req.0; + + // default return_full_text given the pipeline_tag if req.parameters.return_full_text.is_none() { req.parameters.return_full_text = Some(default_return_full_text.0) } + // switch on stream if req.stream { - Ok( - generate_stream(infer, Json(req.into())) - .await - .into_response(), - ) + Ok(generate_stream(infer, Json(req.into())) + .await + .into_response()) } else { - let (headers, generation) = - generate(infer, Json(req.into())).await?; + let (headers, generation) = generate(infer, Json(req.into())).await?; // wrap generation inside a Vec to match api-inference Ok((headers, Json(vec![generation.0])).into_response()) } @@ -100,15 +99,15 @@ example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument( -skip(infer), -fields( -total_time, -validation_time, -queue_time, -inference_time, -time_per_token, -seed, -) + skip(infer), + fields( + total_time, + validation_time, + queue_time, + inference_time, + time_per_token, + seed, + ) )] async fn generate( infer: Extension, @@ -118,12 +117,7 @@ async fn generate( let start_time = Instant::now(); let mut add_prompt = None; - if req - .0 - .parameters - .return_full_text - .unwrap_or(false) - { + if req.0.parameters.return_full_text.unwrap_or(false) { add_prompt = Some(req.0.inputs.clone()); } @@ -232,20 +226,20 @@ content_type = "text/event-stream"), ) )] #[instrument( -skip(infer), -fields( -total_time, -validation_time, -queue_time, -inference_time, -time_per_token, -seed, -) + skip(infer), + fields( + total_time, + validation_time, + queue_time, + inference_time, + time_per_token, + seed, + ) )] async fn generate_stream( infer: Extension, req: Json, -) -> Sse>> { +) -> Sse>> { let span = tracing::Span::current(); let start_time = Instant::now(); @@ -493,7 +487,7 @@ async fn shutdown_signal() { }; #[cfg(unix)] - let terminate = async { + let terminate = async { signal::unix::signal(signal::unix::SignalKind::terminate()) .expect("failed to install signal handler") .recv() @@ -501,7 +495,7 @@ async fn shutdown_signal() { }; #[cfg(not(unix))] - let terminate = std::future::pending::<()>(); + let terminate = std::future::pending::<()>(); tokio::select! { _ = ctrl_c => {},