2023-01-31 16:04:00 +00:00
|
|
|
/// HTTP Server logic
|
2023-06-16 14:25:11 +00:00
|
|
|
use crate::health::Health;
|
2023-03-09 14:30:54 +00:00
|
|
|
use crate::infer::{InferError, InferResponse, InferStreamResponse};
|
|
|
|
use crate::validation::ValidationError;
|
2022-10-27 12:25:29 +00:00
|
|
|
use crate::{
|
2023-03-09 14:30:54 +00:00
|
|
|
BestOfSequence, CompatGenerateRequest, Details, ErrorResponse, FinishReason,
|
2023-04-21 13:36:29 +00:00
|
|
|
GenerateParameters, GenerateRequest, GenerateResponse, HubModelInfo, Infer, Info, PrefillToken,
|
2023-04-18 14:16:06 +00:00
|
|
|
StreamDetails, StreamResponse, Token, Validation,
|
2022-10-27 12:25:29 +00:00
|
|
|
};
|
2022-10-11 16:14:39 +00:00
|
|
|
use axum::extract::Extension;
|
2023-02-17 17:22:00 +00:00
|
|
|
use axum::http::{HeaderMap, Method, StatusCode};
|
2023-01-31 16:04:00 +00:00
|
|
|
use axum::response::sse::{Event, KeepAlive, Sse};
|
2023-03-02 10:41:51 +00:00
|
|
|
use axum::response::{IntoResponse, Response};
|
2022-10-15 18:21:50 +00:00
|
|
|
use axum::routing::{get, post};
|
2023-02-17 17:22:00 +00:00
|
|
|
use axum::{http, Json, Router};
|
2023-02-13 12:02:45 +00:00
|
|
|
use axum_tracing_opentelemetry::opentelemetry_tracing_layer;
|
2023-04-09 18:22:27 +00:00
|
|
|
use futures::stream::StreamExt;
|
2023-01-31 16:04:00 +00:00
|
|
|
use futures::Stream;
|
2023-04-09 18:13:28 +00:00
|
|
|
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle};
|
2023-01-31 16:04:00 +00:00
|
|
|
use std::convert::Infallible;
|
2022-10-14 13:56:21 +00:00
|
|
|
use std::net::SocketAddr;
|
2023-04-26 18:23:54 +00:00
|
|
|
use std::sync::atomic::AtomicBool;
|
|
|
|
use std::sync::Arc;
|
2023-04-21 13:36:29 +00:00
|
|
|
use text_generation_client::{ShardInfo, ShardedClient};
|
2022-10-11 14:50:54 +00:00
|
|
|
use tokenizers::Tokenizer;
|
2022-10-18 13:19:03 +00:00
|
|
|
use tokio::signal;
|
2022-10-11 08:36:51 +00:00
|
|
|
use tokio::time::Instant;
|
2023-02-17 17:22:00 +00:00
|
|
|
use tower_http::cors::{AllowOrigin, CorsLayer};
|
2023-02-13 12:02:45 +00:00
|
|
|
use tracing::{info_span, instrument, Instrument};
|
2023-02-03 11:43:37 +00:00
|
|
|
use utoipa::OpenApi;
|
|
|
|
use utoipa_swagger_ui::SwaggerUi;
|
2022-10-11 08:36:51 +00:00
|
|
|
|
2023-04-18 14:16:06 +00:00
|
|
|
/// Generate tokens if `stream == false` or a stream of token if `stream == true`
|
|
|
|
#[utoipa::path(
|
2023-07-21 14:56:30 +00:00
|
|
|
post,
|
|
|
|
tag = "Text Generation Inference",
|
|
|
|
path = "/",
|
|
|
|
request_body = CompatGenerateRequest,
|
|
|
|
responses(
|
|
|
|
(status = 200, description = "Generated Text",
|
|
|
|
content(
|
|
|
|
("application/json" = GenerateResponse),
|
|
|
|
("text/event-stream" = StreamResponse),
|
|
|
|
)),
|
|
|
|
(status = 424, description = "Generation Error", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Request failed during generation"})),
|
|
|
|
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Model is overloaded"})),
|
|
|
|
(status = 422, description = "Input validation error", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Input validation error"})),
|
|
|
|
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Incomplete generation"})),
|
|
|
|
)
|
2023-04-18 14:16:06 +00:00
|
|
|
)]
|
2023-05-23 18:47:37 +00:00
|
|
|
#[instrument(skip(infer, req))]
|
2023-02-27 13:56:58 +00:00
|
|
|
async fn compat_generate(
|
2023-02-28 09:19:32 +00:00
|
|
|
default_return_full_text: Extension<bool>,
|
2023-02-27 13:56:58 +00:00
|
|
|
infer: Extension<Infer>,
|
|
|
|
req: Json<CompatGenerateRequest>,
|
2023-03-02 10:41:51 +00:00
|
|
|
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
|
2023-02-28 09:19:32 +00:00
|
|
|
let mut req = req.0;
|
|
|
|
|
|
|
|
// default return_full_text given the pipeline_tag
|
|
|
|
if req.parameters.return_full_text.is_none() {
|
|
|
|
req.parameters.return_full_text = Some(default_return_full_text.0)
|
|
|
|
}
|
|
|
|
|
2023-02-27 13:56:58 +00:00
|
|
|
// switch on stream
|
|
|
|
if req.stream {
|
|
|
|
Ok(generate_stream(infer, Json(req.into()))
|
|
|
|
.await
|
|
|
|
.into_response())
|
|
|
|
} else {
|
|
|
|
let (headers, generation) = generate(infer, Json(req.into())).await?;
|
|
|
|
// wrap generation inside a Vec to match api-inference
|
|
|
|
Ok((headers, Json(vec![generation.0])).into_response())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-18 14:16:06 +00:00
|
|
|
/// Text Generation Inference endpoint info
|
|
|
|
#[utoipa::path(
|
2023-07-21 14:56:30 +00:00
|
|
|
get,
|
|
|
|
tag = "Text Generation Inference",
|
|
|
|
path = "/info",
|
|
|
|
responses((status = 200, description = "Served model info", body = Info))
|
2023-04-18 14:16:06 +00:00
|
|
|
)]
|
|
|
|
#[instrument]
|
2023-04-25 11:11:18 +00:00
|
|
|
async fn get_model_info(info: Extension<Info>) -> Json<Info> {
|
|
|
|
Json(info.0)
|
2023-04-18 14:16:06 +00:00
|
|
|
}
|
|
|
|
|
2023-04-26 18:23:54 +00:00
|
|
|
#[utoipa::path(
|
2023-07-21 14:56:30 +00:00
|
|
|
get,
|
|
|
|
tag = "Text Generation Inference",
|
|
|
|
path = "/health",
|
|
|
|
responses(
|
|
|
|
(status = 200, description = "Everything is working fine"),
|
|
|
|
(status = 503, description = "Text generation inference is down", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})),
|
|
|
|
)
|
2023-04-26 18:23:54 +00:00
|
|
|
)]
|
|
|
|
#[instrument(skip(health))]
|
2022-10-18 13:19:03 +00:00
|
|
|
/// Health check method
|
2023-04-26 18:23:54 +00:00
|
|
|
async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> {
|
|
|
|
match health.check().await {
|
|
|
|
true => Ok(()),
|
|
|
|
false => Err((
|
|
|
|
StatusCode::SERVICE_UNAVAILABLE,
|
|
|
|
Json(ErrorResponse {
|
|
|
|
error: "unhealthy".to_string(),
|
|
|
|
error_type: "healthcheck".to_string(),
|
|
|
|
}),
|
|
|
|
)),
|
|
|
|
}
|
2022-10-14 13:56:21 +00:00
|
|
|
}
|
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
/// Generate tokens
|
|
|
|
#[utoipa::path(
|
2023-07-21 14:56:30 +00:00
|
|
|
post,
|
|
|
|
tag = "Text Generation Inference",
|
|
|
|
path = "/generate",
|
|
|
|
request_body = GenerateRequest,
|
|
|
|
responses(
|
|
|
|
(status = 200, description = "Generated Text", body = GenerateResponse),
|
|
|
|
(status = 424, description = "Generation Error", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Request failed during generation"})),
|
|
|
|
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Model is overloaded"})),
|
|
|
|
(status = 422, description = "Input validation error", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Input validation error"})),
|
|
|
|
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Incomplete generation"})),
|
|
|
|
)
|
2023-02-03 11:43:37 +00:00
|
|
|
)]
|
2022-10-21 14:40:05 +00:00
|
|
|
#[instrument(
|
2023-07-21 14:56:30 +00:00
|
|
|
skip_all,
|
|
|
|
fields(
|
|
|
|
parameters = ? req.0.parameters,
|
|
|
|
total_time,
|
|
|
|
validation_time,
|
|
|
|
queue_time,
|
|
|
|
inference_time,
|
|
|
|
time_per_token,
|
|
|
|
seed,
|
|
|
|
)
|
2022-10-21 14:40:05 +00:00
|
|
|
)]
|
2022-10-11 08:36:51 +00:00
|
|
|
async fn generate(
|
2023-01-31 16:04:00 +00:00
|
|
|
infer: Extension<Infer>,
|
2022-10-11 08:36:51 +00:00
|
|
|
req: Json<GenerateRequest>,
|
2023-02-27 13:56:58 +00:00
|
|
|
) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> {
|
2023-01-31 16:04:00 +00:00
|
|
|
let span = tracing::Span::current();
|
2022-10-21 14:40:05 +00:00
|
|
|
let start_time = Instant::now();
|
2023-04-09 18:13:28 +00:00
|
|
|
metrics::increment_counter!("tgi_request_count");
|
2023-01-31 13:21:51 +00:00
|
|
|
|
2023-05-23 18:47:37 +00:00
|
|
|
tracing::debug!("Input: {}", req.0.inputs);
|
|
|
|
|
2023-03-02 10:41:51 +00:00
|
|
|
let compute_characters = req.0.inputs.chars().count();
|
2023-02-28 09:19:32 +00:00
|
|
|
let mut add_prompt = None;
|
|
|
|
if req.0.parameters.return_full_text.unwrap_or(false) {
|
|
|
|
add_prompt = Some(req.0.inputs.clone());
|
|
|
|
}
|
|
|
|
|
2023-06-02 15:12:30 +00:00
|
|
|
let details = req.0.parameters.details || req.0.parameters.decoder_input_details;
|
2023-02-28 09:19:32 +00:00
|
|
|
|
|
|
|
// Inference
|
2023-03-09 14:30:54 +00:00
|
|
|
let (response, best_of_responses) = match req.0.parameters.best_of {
|
|
|
|
Some(best_of) if best_of > 1 => {
|
|
|
|
let (response, best_of_responses) = infer.generate_best_of(req.0, best_of).await?;
|
|
|
|
(response, Some(best_of_responses))
|
|
|
|
}
|
|
|
|
_ => (infer.generate(req.0).await?, None),
|
|
|
|
};
|
2022-10-17 12:59:00 +00:00
|
|
|
|
2022-12-15 16:03:56 +00:00
|
|
|
// Token details
|
|
|
|
let details = match details {
|
2023-03-09 14:30:54 +00:00
|
|
|
true => {
|
|
|
|
// convert best_of_responses
|
|
|
|
let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| {
|
|
|
|
responses
|
|
|
|
.into_iter()
|
|
|
|
.map(|response: InferResponse| {
|
|
|
|
// Add prompt if return_full_text
|
|
|
|
let mut output_text = response.generated_text.text;
|
|
|
|
if let Some(prompt) = &add_prompt {
|
|
|
|
output_text = prompt.clone() + &output_text;
|
|
|
|
}
|
|
|
|
|
|
|
|
BestOfSequence {
|
|
|
|
generated_text: output_text,
|
|
|
|
finish_reason: FinishReason::from(
|
|
|
|
response.generated_text.finish_reason,
|
|
|
|
),
|
|
|
|
generated_tokens: response.generated_text.generated_tokens,
|
|
|
|
prefill: response.prefill,
|
|
|
|
tokens: response.tokens,
|
|
|
|
seed: response.generated_text.seed,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
});
|
|
|
|
|
|
|
|
Some(Details {
|
|
|
|
finish_reason: FinishReason::from(response.generated_text.finish_reason),
|
|
|
|
generated_tokens: response.generated_text.generated_tokens,
|
|
|
|
prefill: response.prefill,
|
|
|
|
tokens: response.tokens,
|
|
|
|
seed: response.generated_text.seed,
|
|
|
|
best_of_sequences,
|
|
|
|
})
|
|
|
|
}
|
2022-12-15 16:03:56 +00:00
|
|
|
false => None,
|
|
|
|
};
|
|
|
|
|
2022-10-21 14:40:05 +00:00
|
|
|
// Timings
|
|
|
|
let total_time = start_time.elapsed();
|
|
|
|
let validation_time = response.queued - start_time;
|
|
|
|
let queue_time = response.start - response.queued;
|
2023-01-31 16:04:00 +00:00
|
|
|
let inference_time = Instant::now() - response.start;
|
|
|
|
let time_per_token = inference_time / response.generated_text.generated_tokens;
|
2022-10-21 14:40:05 +00:00
|
|
|
|
2023-04-09 18:13:28 +00:00
|
|
|
// Tracing metadata
|
|
|
|
span.record("total_time", format!("{total_time:?}"));
|
|
|
|
span.record("validation_time", format!("{validation_time:?}"));
|
|
|
|
span.record("queue_time", format!("{queue_time:?}"));
|
|
|
|
span.record("inference_time", format!("{inference_time:?}"));
|
|
|
|
span.record("time_per_token", format!("{time_per_token:?}"));
|
|
|
|
span.record("seed", format!("{:?}", response.generated_text.seed));
|
|
|
|
|
2022-10-21 14:40:05 +00:00
|
|
|
// Headers
|
|
|
|
let mut headers = HeaderMap::new();
|
2023-03-02 10:41:51 +00:00
|
|
|
headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
|
|
|
|
headers.insert(
|
|
|
|
"x-compute-time",
|
|
|
|
total_time.as_millis().to_string().parse().unwrap(),
|
|
|
|
);
|
|
|
|
headers.insert(
|
|
|
|
"x-compute-characters",
|
|
|
|
compute_characters.to_string().parse().unwrap(),
|
|
|
|
);
|
2022-10-21 14:40:05 +00:00
|
|
|
headers.insert(
|
|
|
|
"x-total-time",
|
|
|
|
total_time.as_millis().to_string().parse().unwrap(),
|
|
|
|
);
|
|
|
|
headers.insert(
|
|
|
|
"x-validation-time",
|
|
|
|
validation_time.as_millis().to_string().parse().unwrap(),
|
|
|
|
);
|
|
|
|
headers.insert(
|
|
|
|
"x-queue-time",
|
|
|
|
queue_time.as_millis().to_string().parse().unwrap(),
|
2022-10-17 12:59:00 +00:00
|
|
|
);
|
2022-10-21 14:40:05 +00:00
|
|
|
headers.insert(
|
|
|
|
"x-inference-time",
|
|
|
|
inference_time.as_millis().to_string().parse().unwrap(),
|
|
|
|
);
|
|
|
|
headers.insert(
|
|
|
|
"x-time-per-token",
|
|
|
|
time_per_token.as_millis().to_string().parse().unwrap(),
|
|
|
|
);
|
|
|
|
|
2023-02-16 16:18:53 +00:00
|
|
|
// Metrics
|
|
|
|
metrics::increment_counter!("tgi_request_success");
|
2023-04-09 18:13:28 +00:00
|
|
|
metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
|
|
|
|
metrics::histogram!(
|
|
|
|
"tgi_request_validation_duration",
|
|
|
|
validation_time.as_secs_f64()
|
|
|
|
);
|
|
|
|
metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
|
|
|
|
metrics::histogram!(
|
|
|
|
"tgi_request_inference_duration",
|
|
|
|
inference_time.as_secs_f64()
|
|
|
|
);
|
|
|
|
metrics::histogram!(
|
|
|
|
"tgi_request_mean_time_per_token_duration",
|
|
|
|
time_per_token.as_secs_f64()
|
|
|
|
);
|
2023-02-16 16:18:53 +00:00
|
|
|
metrics::histogram!(
|
|
|
|
"tgi_request_generated_tokens",
|
|
|
|
response.generated_text.generated_tokens as f64
|
|
|
|
);
|
|
|
|
|
2022-10-18 13:19:03 +00:00
|
|
|
// Send response
|
2023-02-28 09:19:32 +00:00
|
|
|
let mut output_text = response.generated_text.text;
|
|
|
|
if let Some(prompt) = add_prompt {
|
|
|
|
output_text = prompt + &output_text;
|
|
|
|
}
|
|
|
|
|
2023-05-23 18:47:37 +00:00
|
|
|
tracing::debug!("Output: {}", output_text);
|
|
|
|
tracing::info!("Success");
|
2023-04-09 18:13:28 +00:00
|
|
|
|
2023-02-02 14:02:04 +00:00
|
|
|
let response = GenerateResponse {
|
2023-02-28 09:19:32 +00:00
|
|
|
generated_text: output_text,
|
2022-12-15 16:03:56 +00:00
|
|
|
details,
|
2023-02-02 14:02:04 +00:00
|
|
|
};
|
2022-10-21 14:40:05 +00:00
|
|
|
Ok((headers, Json(response)))
|
2022-10-11 08:36:51 +00:00
|
|
|
}
|
|
|
|
|
2023-02-08 21:30:11 +00:00
|
|
|
/// Generate a stream of token using Server-Sent Events
|
2023-02-03 11:43:37 +00:00
|
|
|
#[utoipa::path(
|
2023-07-21 14:56:30 +00:00
|
|
|
post,
|
|
|
|
tag = "Text Generation Inference",
|
|
|
|
path = "/generate_stream",
|
|
|
|
request_body = GenerateRequest,
|
|
|
|
responses(
|
|
|
|
(status = 200, description = "Generated Text", body = StreamResponse,
|
|
|
|
content_type = "text/event-stream"),
|
|
|
|
(status = 424, description = "Generation Error", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Request failed during generation"}),
|
|
|
|
content_type = "text/event-stream"),
|
|
|
|
(status = 429, description = "Model is overloaded", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Model is overloaded"}),
|
|
|
|
content_type = "text/event-stream"),
|
|
|
|
(status = 422, description = "Input validation error", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Input validation error"}),
|
|
|
|
content_type = "text/event-stream"),
|
|
|
|
(status = 500, description = "Incomplete generation", body = ErrorResponse,
|
|
|
|
example = json ! ({"error": "Incomplete generation"}),
|
|
|
|
content_type = "text/event-stream"),
|
|
|
|
)
|
2023-02-03 11:43:37 +00:00
|
|
|
)]
|
2023-01-31 16:04:00 +00:00
|
|
|
#[instrument(
|
2023-07-21 14:56:30 +00:00
|
|
|
skip_all,
|
|
|
|
fields(
|
|
|
|
parameters = ? req.0.parameters,
|
|
|
|
total_time,
|
|
|
|
validation_time,
|
|
|
|
queue_time,
|
|
|
|
inference_time,
|
|
|
|
time_per_token,
|
|
|
|
seed,
|
|
|
|
)
|
2023-01-31 16:04:00 +00:00
|
|
|
)]
|
|
|
|
async fn generate_stream(
|
|
|
|
infer: Extension<Infer>,
|
|
|
|
req: Json<GenerateRequest>,
|
2023-03-02 10:41:51 +00:00
|
|
|
) -> (
|
|
|
|
HeaderMap,
|
|
|
|
Sse<impl Stream<Item = Result<Event, Infallible>>>,
|
|
|
|
) {
|
2023-01-31 16:04:00 +00:00
|
|
|
let span = tracing::Span::current();
|
|
|
|
let start_time = Instant::now();
|
2023-04-09 18:13:28 +00:00
|
|
|
metrics::increment_counter!("tgi_request_count");
|
2023-01-31 16:04:00 +00:00
|
|
|
|
2023-05-23 18:47:37 +00:00
|
|
|
tracing::debug!("Input: {}", req.0.inputs);
|
|
|
|
|
2023-03-02 10:41:51 +00:00
|
|
|
let compute_characters = req.0.inputs.chars().count();
|
|
|
|
|
|
|
|
let mut headers = HeaderMap::new();
|
|
|
|
headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
|
|
|
|
headers.insert(
|
|
|
|
"x-compute-characters",
|
|
|
|
compute_characters.to_string().parse().unwrap(),
|
|
|
|
);
|
2023-06-28 09:50:12 +00:00
|
|
|
headers.insert("X-Accel-Buffering", "no".parse().unwrap());
|
2023-03-02 10:41:51 +00:00
|
|
|
|
2023-01-31 16:04:00 +00:00
|
|
|
let stream = async_stream::stream! {
|
|
|
|
// Inference
|
|
|
|
let mut end_reached = false;
|
|
|
|
let mut error = false;
|
2023-02-28 09:19:32 +00:00
|
|
|
|
|
|
|
let mut add_prompt = None;
|
|
|
|
if req.0.parameters.return_full_text.unwrap_or(false) {
|
|
|
|
add_prompt = Some(req.0.inputs.clone());
|
|
|
|
}
|
2023-01-31 16:04:00 +00:00
|
|
|
let details = req.0.parameters.details;
|
|
|
|
|
2023-03-09 14:30:54 +00:00
|
|
|
let best_of = req.0.parameters.best_of.unwrap_or(1);
|
2023-06-02 15:12:30 +00:00
|
|
|
if best_of != 1 {
|
|
|
|
let err = InferError::from(ValidationError::BestOfStream);
|
|
|
|
metrics::increment_counter!("tgi_request_failure", "err" => "validation");
|
|
|
|
tracing::error!("{err}");
|
|
|
|
yield Ok(Event::from(err));
|
|
|
|
} else if req.0.parameters.decoder_input_details {
|
|
|
|
let err = InferError::from(ValidationError::PrefillDetailsStream);
|
|
|
|
metrics::increment_counter!("tgi_request_failure", "err" => "validation");
|
|
|
|
tracing::error!("{err}");
|
|
|
|
yield Ok(Event::from(err));
|
|
|
|
} else {
|
2023-03-09 14:30:54 +00:00
|
|
|
match infer.generate_stream(req.0).instrument(info_span!(parent: &span, "async_stream")).await {
|
2023-04-20 09:07:40 +00:00
|
|
|
// Keep permit as long as generate_stream lives
|
|
|
|
Ok((_permit, mut response_stream)) => {
|
2023-03-09 14:30:54 +00:00
|
|
|
// Server-Sent Event stream
|
|
|
|
while let Some(response) = response_stream.next().await {
|
|
|
|
match response {
|
|
|
|
Ok(response) => {
|
|
|
|
match response {
|
|
|
|
// Prefill is ignored
|
|
|
|
InferStreamResponse::Prefill(_) => {}
|
|
|
|
// Yield event for every new token
|
|
|
|
InferStreamResponse::Token(token) => {
|
2023-05-23 18:47:37 +00:00
|
|
|
tracing::debug!(parent: &span, "Token: {:?}", token);
|
|
|
|
|
2023-03-09 14:30:54 +00:00
|
|
|
// StreamResponse
|
|
|
|
let stream_token = StreamResponse {
|
|
|
|
token,
|
|
|
|
generated_text: None,
|
|
|
|
details: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
yield Ok(Event::default().json_data(stream_token).unwrap())
|
2023-02-28 09:19:32 +00:00
|
|
|
}
|
2023-03-09 14:30:54 +00:00
|
|
|
// Yield event for last token and compute timings
|
|
|
|
InferStreamResponse::End {
|
2023-01-31 16:04:00 +00:00
|
|
|
token,
|
2023-03-09 14:30:54 +00:00
|
|
|
generated_text,
|
|
|
|
start,
|
|
|
|
queued,
|
|
|
|
} => {
|
|
|
|
// Token details
|
|
|
|
let details = match details {
|
|
|
|
true => Some(StreamDetails {
|
|
|
|
finish_reason: FinishReason::from(generated_text.finish_reason),
|
|
|
|
generated_tokens: generated_text.generated_tokens,
|
|
|
|
seed: generated_text.seed,
|
|
|
|
}),
|
|
|
|
false => None,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Timings
|
|
|
|
let total_time = start_time.elapsed();
|
|
|
|
let validation_time = queued - start_time;
|
|
|
|
let queue_time = start - queued;
|
|
|
|
let inference_time = Instant::now() - start;
|
|
|
|
let time_per_token = inference_time / generated_text.generated_tokens;
|
|
|
|
|
|
|
|
// Tracing metadata
|
|
|
|
span.record("total_time", format!("{total_time:?}"));
|
|
|
|
span.record("validation_time", format!("{validation_time:?}"));
|
|
|
|
span.record("queue_time", format!("{queue_time:?}"));
|
|
|
|
span.record("inference_time", format!("{inference_time:?}"));
|
|
|
|
span.record("time_per_token", format!("{time_per_token:?}"));
|
|
|
|
span.record("seed", format!("{:?}", generated_text.seed));
|
|
|
|
|
|
|
|
// Metrics
|
|
|
|
metrics::increment_counter!("tgi_request_success");
|
2023-04-09 18:13:28 +00:00
|
|
|
metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
|
|
|
|
metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
|
|
|
|
metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
|
|
|
|
metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
|
|
|
|
metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
|
2023-03-09 14:30:54 +00:00
|
|
|
metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);
|
|
|
|
|
|
|
|
// StreamResponse
|
|
|
|
end_reached = true;
|
|
|
|
|
|
|
|
let mut output_text = generated_text.text;
|
|
|
|
if let Some(prompt) = add_prompt {
|
|
|
|
output_text = prompt + &output_text;
|
|
|
|
}
|
|
|
|
|
2023-05-23 18:47:37 +00:00
|
|
|
tracing::debug!(parent: &span, "Output: {}", output_text);
|
|
|
|
tracing::info!(parent: &span, "Success");
|
2023-04-09 18:13:28 +00:00
|
|
|
|
2023-03-09 14:30:54 +00:00
|
|
|
let stream_token = StreamResponse {
|
|
|
|
token,
|
|
|
|
generated_text: Some(output_text),
|
|
|
|
details
|
|
|
|
};
|
|
|
|
|
|
|
|
yield Ok(Event::default().json_data(stream_token).unwrap());
|
|
|
|
break;
|
|
|
|
}
|
2023-01-31 16:04:00 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-09 14:30:54 +00:00
|
|
|
// yield error
|
|
|
|
Err(err) => {
|
|
|
|
error = true;
|
|
|
|
yield Ok(Event::from(err));
|
|
|
|
break;
|
|
|
|
}
|
2023-01-31 16:04:00 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-09 14:30:54 +00:00
|
|
|
},
|
|
|
|
// yield error
|
|
|
|
Err(err) => {
|
|
|
|
error = true;
|
|
|
|
yield Ok(Event::from(err));
|
2023-01-31 16:04:00 +00:00
|
|
|
}
|
2023-03-09 14:30:54 +00:00
|
|
|
}
|
|
|
|
// Check if generation reached the end
|
|
|
|
// Skip if we already sent an error
|
|
|
|
if !end_reached && !error {
|
|
|
|
let err = InferError::IncompleteGeneration;
|
|
|
|
metrics::increment_counter!("tgi_request_failure", "err" => "incomplete");
|
|
|
|
tracing::error!("{err}");
|
2023-02-28 09:19:32 +00:00
|
|
|
yield Ok(Event::from(err));
|
2023-01-31 16:04:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-03-02 10:41:51 +00:00
|
|
|
(headers, Sse::new(stream).keep_alive(KeepAlive::default()))
|
2023-01-31 16:04:00 +00:00
|
|
|
}
|
|
|
|
|
2023-02-16 16:18:53 +00:00
|
|
|
/// Prometheus metrics scrape endpoint
|
|
|
|
#[utoipa::path(
|
2023-07-21 14:56:30 +00:00
|
|
|
get,
|
|
|
|
tag = "Text Generation Inference",
|
|
|
|
path = "/metrics",
|
|
|
|
responses((status = 200, description = "Prometheus Metrics", body = String))
|
2023-02-16 16:18:53 +00:00
|
|
|
)]
|
|
|
|
async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String {
|
|
|
|
prom_handle.render()
|
|
|
|
}
|
|
|
|
|
2022-10-18 13:19:03 +00:00
|
|
|
/// Serving method
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
pub async fn run(
|
2023-04-21 13:36:29 +00:00
|
|
|
model_info: HubModelInfo,
|
|
|
|
shard_info: ShardInfo,
|
2023-02-28 09:19:32 +00:00
|
|
|
compat_return_full_text: bool,
|
2022-10-18 13:19:03 +00:00
|
|
|
max_concurrent_requests: usize,
|
2023-03-09 14:30:54 +00:00
|
|
|
max_best_of: usize,
|
2023-02-15 20:56:59 +00:00
|
|
|
max_stop_sequences: usize,
|
2022-10-18 13:19:03 +00:00
|
|
|
max_input_length: usize,
|
2023-02-15 20:56:59 +00:00
|
|
|
max_total_tokens: usize,
|
2023-04-24 15:59:00 +00:00
|
|
|
waiting_served_ratio: f32,
|
2023-06-30 17:09:59 +00:00
|
|
|
max_batch_prefill_tokens: u32,
|
2023-04-24 15:59:00 +00:00
|
|
|
max_batch_total_tokens: u32,
|
2022-10-21 14:40:05 +00:00
|
|
|
max_waiting_tokens: usize,
|
2022-10-18 13:19:03 +00:00
|
|
|
client: ShardedClient,
|
2023-04-09 18:22:27 +00:00
|
|
|
tokenizer: Option<Tokenizer>,
|
2022-10-18 13:19:03 +00:00
|
|
|
validation_workers: usize,
|
|
|
|
addr: SocketAddr,
|
2023-02-17 17:22:00 +00:00
|
|
|
allow_origin: Option<AllowOrigin>,
|
2023-06-16 14:25:11 +00:00
|
|
|
ngrok: bool,
|
|
|
|
ngrok_authtoken: Option<String>,
|
2023-07-19 09:59:58 +00:00
|
|
|
ngrok_edge: Option<String>,
|
2023-07-10 12:47:15 +00:00
|
|
|
) -> Result<(), axum::BoxError> {
|
2023-02-03 11:43:37 +00:00
|
|
|
// OpenAPI documentation
|
|
|
|
#[derive(OpenApi)]
|
|
|
|
#[openapi(
|
2023-07-21 14:56:30 +00:00
|
|
|
paths(
|
|
|
|
health,
|
|
|
|
get_model_info,
|
|
|
|
compat_generate,
|
|
|
|
generate,
|
|
|
|
generate_stream,
|
|
|
|
metrics,
|
|
|
|
),
|
|
|
|
components(
|
|
|
|
schemas(
|
|
|
|
Info,
|
|
|
|
CompatGenerateRequest,
|
|
|
|
GenerateRequest,
|
|
|
|
GenerateParameters,
|
|
|
|
PrefillToken,
|
|
|
|
Token,
|
|
|
|
GenerateResponse,
|
|
|
|
BestOfSequence,
|
|
|
|
Details,
|
|
|
|
FinishReason,
|
|
|
|
StreamResponse,
|
|
|
|
StreamDetails,
|
|
|
|
ErrorResponse,
|
|
|
|
)
|
|
|
|
),
|
|
|
|
tags(
|
|
|
|
(name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API")
|
|
|
|
),
|
|
|
|
info(
|
|
|
|
title = "Text Generation Inference",
|
|
|
|
license(
|
|
|
|
name = "Apache 2.0",
|
|
|
|
url = "https://www.apache.org/licenses/LICENSE-2.0"
|
|
|
|
)
|
|
|
|
)
|
2023-02-03 11:43:37 +00:00
|
|
|
)]
|
|
|
|
struct ApiDoc;
|
|
|
|
|
2022-10-18 13:19:03 +00:00
|
|
|
// Create state
|
2023-02-15 20:56:59 +00:00
|
|
|
let validation = Validation::new(
|
|
|
|
validation_workers,
|
|
|
|
tokenizer,
|
2023-03-09 14:30:54 +00:00
|
|
|
max_best_of,
|
2023-02-15 20:56:59 +00:00
|
|
|
max_stop_sequences,
|
|
|
|
max_input_length,
|
|
|
|
max_total_tokens,
|
|
|
|
);
|
2023-04-26 18:23:54 +00:00
|
|
|
let generation_health = Arc::new(AtomicBool::new(false));
|
|
|
|
let health_ext = Health::new(client.clone(), generation_health.clone());
|
2023-01-31 16:04:00 +00:00
|
|
|
let infer = Infer::new(
|
|
|
|
client,
|
2022-10-18 13:19:03 +00:00
|
|
|
validation,
|
2023-04-24 15:59:00 +00:00
|
|
|
waiting_served_ratio,
|
2023-06-30 17:09:59 +00:00
|
|
|
max_batch_prefill_tokens,
|
2023-04-24 15:59:00 +00:00
|
|
|
max_batch_total_tokens,
|
2023-01-31 16:04:00 +00:00
|
|
|
max_waiting_tokens,
|
|
|
|
max_concurrent_requests,
|
2023-04-24 15:59:00 +00:00
|
|
|
shard_info.requires_padding,
|
2023-04-26 18:23:54 +00:00
|
|
|
generation_health,
|
2023-01-31 16:04:00 +00:00
|
|
|
);
|
2022-10-18 13:19:03 +00:00
|
|
|
|
2023-04-09 18:13:28 +00:00
|
|
|
// Duration buckets
|
|
|
|
let duration_matcher = Matcher::Suffix(String::from("duration"));
|
|
|
|
let n_duration_buckets = 35;
|
|
|
|
let mut duration_buckets = Vec::with_capacity(n_duration_buckets);
|
|
|
|
// Minimum duration in seconds
|
|
|
|
let mut value = 0.0001;
|
|
|
|
for _ in 0..n_duration_buckets {
|
|
|
|
// geometric sequence
|
|
|
|
value *= 1.5;
|
|
|
|
duration_buckets.push(value);
|
|
|
|
}
|
|
|
|
// Input Length buckets
|
|
|
|
let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length"));
|
|
|
|
let input_length_buckets: Vec<f64> = (0..100)
|
|
|
|
.map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64)
|
|
|
|
.collect();
|
|
|
|
// Generated tokens buckets
|
|
|
|
let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens"));
|
|
|
|
let generated_tokens_buckets: Vec<f64> = (0..100)
|
|
|
|
.map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
|
|
|
|
.collect();
|
|
|
|
// Input Length buckets
|
|
|
|
let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens"));
|
|
|
|
let max_new_tokens_buckets: Vec<f64> = (0..100)
|
|
|
|
.map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64)
|
|
|
|
.collect();
|
|
|
|
// Batch size buckets
|
|
|
|
let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size"));
|
2023-04-24 15:59:00 +00:00
|
|
|
let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect();
|
2023-04-09 18:13:28 +00:00
|
|
|
|
2023-02-16 16:18:53 +00:00
|
|
|
// Prometheus handler
|
2023-04-09 18:13:28 +00:00
|
|
|
let builder = PrometheusBuilder::new()
|
|
|
|
.set_buckets_for_metric(duration_matcher, &duration_buckets)
|
|
|
|
.unwrap()
|
|
|
|
.set_buckets_for_metric(input_length_matcher, &input_length_buckets)
|
|
|
|
.unwrap()
|
|
|
|
.set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets)
|
|
|
|
.unwrap()
|
|
|
|
.set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets)
|
|
|
|
.unwrap()
|
|
|
|
.set_buckets_for_metric(batch_size_matcher, &batch_size_buckets)
|
|
|
|
.unwrap();
|
2023-02-16 16:18:53 +00:00
|
|
|
let prom_handle = builder
|
|
|
|
.install_recorder()
|
|
|
|
.expect("failed to install metrics recorder");
|
|
|
|
|
2023-02-17 17:22:00 +00:00
|
|
|
// CORS layer
|
|
|
|
let allow_origin = allow_origin.unwrap_or(AllowOrigin::any());
|
|
|
|
let cors_layer = CorsLayer::new()
|
|
|
|
.allow_methods([Method::GET, Method::POST])
|
|
|
|
.allow_headers([http::header::CONTENT_TYPE])
|
|
|
|
.allow_origin(allow_origin);
|
|
|
|
|
2023-04-25 11:11:18 +00:00
|
|
|
// Endpoint info
|
|
|
|
let info = Info {
|
|
|
|
model_id: model_info.model_id,
|
|
|
|
model_sha: model_info.sha,
|
|
|
|
model_dtype: shard_info.dtype,
|
|
|
|
model_device_type: shard_info.device_type,
|
|
|
|
model_pipeline_tag: model_info.pipeline_tag,
|
|
|
|
max_concurrent_requests,
|
|
|
|
max_best_of,
|
|
|
|
max_stop_sequences,
|
|
|
|
max_input_length,
|
|
|
|
max_total_tokens,
|
|
|
|
waiting_served_ratio,
|
|
|
|
max_batch_total_tokens,
|
|
|
|
max_waiting_tokens,
|
|
|
|
validation_workers,
|
|
|
|
version: env!("CARGO_PKG_VERSION"),
|
|
|
|
sha: option_env!("VERGEN_GIT_SHA"),
|
2023-05-02 13:43:19 +00:00
|
|
|
docker_label: option_env!("DOCKER_LABEL"),
|
2023-04-25 11:11:18 +00:00
|
|
|
};
|
|
|
|
|
2022-10-18 13:19:03 +00:00
|
|
|
// Create router
|
2022-10-14 13:56:21 +00:00
|
|
|
let app = Router::new()
|
2023-02-03 11:43:37 +00:00
|
|
|
.merge(SwaggerUi::new("/docs").url("/api-doc/openapi.json", ApiDoc::openapi()))
|
2023-03-29 19:38:30 +00:00
|
|
|
// Base routes
|
2023-02-27 13:56:58 +00:00
|
|
|
.route("/", post(compat_generate))
|
2023-04-18 14:16:06 +00:00
|
|
|
.route("/info", get(get_model_info))
|
2022-10-14 13:56:21 +00:00
|
|
|
.route("/generate", post(generate))
|
2023-01-31 16:04:00 +00:00
|
|
|
.route("/generate_stream", post(generate_stream))
|
2023-03-29 19:38:30 +00:00
|
|
|
// AWS Sagemaker route
|
|
|
|
.route("/invocations", post(compat_generate))
|
|
|
|
// Base Health route
|
2022-10-18 13:19:03 +00:00
|
|
|
.route("/health", get(health))
|
2023-03-29 19:38:30 +00:00
|
|
|
// Inference API health route
|
|
|
|
.route("/", get(health))
|
|
|
|
// AWS Sagemaker health route
|
|
|
|
.route("/ping", get(health))
|
|
|
|
// Prometheus metrics route
|
2023-02-16 16:18:53 +00:00
|
|
|
.route("/metrics", get(metrics))
|
2023-04-25 11:11:18 +00:00
|
|
|
.layer(Extension(info))
|
2023-07-21 14:56:30 +00:00
|
|
|
.layer(Extension(health_ext.clone()))
|
2023-02-28 09:19:32 +00:00
|
|
|
.layer(Extension(compat_return_full_text))
|
|
|
|
.layer(Extension(infer))
|
2023-07-21 14:56:30 +00:00
|
|
|
.layer(Extension(prom_handle.clone()))
|
2023-02-17 17:22:00 +00:00
|
|
|
.layer(opentelemetry_tracing_layer())
|
|
|
|
.layer(cors_layer);
|
2022-10-11 08:36:51 +00:00
|
|
|
|
2023-06-16 14:25:11 +00:00
|
|
|
if ngrok {
|
|
|
|
#[cfg(feature = "ngrok")]
|
|
|
|
{
|
|
|
|
use ngrok::config::TunnelBuilder;
|
|
|
|
|
|
|
|
let _ = addr;
|
|
|
|
|
|
|
|
let authtoken =
|
|
|
|
ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling");
|
|
|
|
|
2023-07-19 09:59:58 +00:00
|
|
|
let edge = ngrok_edge.expect("`ngrok-edge` must be set when using ngrok tunneling");
|
|
|
|
|
|
|
|
let tunnel = ngrok::Session::builder()
|
2023-06-16 14:25:11 +00:00
|
|
|
.authtoken(authtoken)
|
|
|
|
.connect()
|
|
|
|
.await
|
|
|
|
.unwrap()
|
2023-07-19 09:59:58 +00:00
|
|
|
.labeled_tunnel()
|
|
|
|
.label("edge", edge);
|
2023-06-16 14:25:11 +00:00
|
|
|
|
|
|
|
let listener = tunnel.listen().await.unwrap();
|
2023-07-21 14:56:30 +00:00
|
|
|
|
|
|
|
// Run prom metrics and health locally too
|
|
|
|
tokio::spawn(
|
|
|
|
axum::Server::bind(&addr)
|
|
|
|
.serve(
|
|
|
|
Router::new()
|
|
|
|
.route("/health", get(health))
|
|
|
|
.route("/metrics", get(metrics))
|
|
|
|
.layer(Extension(health_ext))
|
|
|
|
.layer(Extension(prom_handle))
|
|
|
|
.into_make_service(),
|
|
|
|
)
|
|
|
|
//Wait until all requests are finished to shut down
|
|
|
|
.with_graceful_shutdown(shutdown_signal()),
|
|
|
|
);
|
2023-06-16 14:25:11 +00:00
|
|
|
|
|
|
|
// Run server
|
|
|
|
axum::Server::builder(listener)
|
|
|
|
.serve(app.into_make_service())
|
|
|
|
//Wait until all requests are finished to shut down
|
|
|
|
.with_graceful_shutdown(shutdown_signal())
|
2023-07-10 12:47:15 +00:00
|
|
|
.await?;
|
2023-06-16 14:25:11 +00:00
|
|
|
}
|
|
|
|
#[cfg(not(feature = "ngrok"))]
|
|
|
|
{
|
|
|
|
let _ngrok_authtoken = ngrok_authtoken;
|
|
|
|
let _ngrok_domain = ngrok_domain;
|
|
|
|
let _ngrok_username = ngrok_username;
|
|
|
|
let _ngrok_password = ngrok_password;
|
|
|
|
|
|
|
|
panic!("`text-generation-router` was compiled without the `ngrok` feature");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Run server
|
|
|
|
axum::Server::bind(&addr)
|
|
|
|
.serve(app.into_make_service())
|
|
|
|
// Wait until all requests are finished to shut down
|
|
|
|
.with_graceful_shutdown(shutdown_signal())
|
2023-07-10 12:47:15 +00:00
|
|
|
.await?;
|
2023-06-16 14:25:11 +00:00
|
|
|
}
|
2023-07-10 12:47:15 +00:00
|
|
|
Ok(())
|
2022-10-11 14:50:54 +00:00
|
|
|
}
|
2022-10-18 13:19:03 +00:00
|
|
|
|
|
|
|
/// Shutdown signal handler
|
|
|
|
async fn shutdown_signal() {
|
|
|
|
let ctrl_c = async {
|
|
|
|
signal::ctrl_c()
|
|
|
|
.await
|
|
|
|
.expect("failed to install Ctrl+C handler");
|
|
|
|
};
|
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
let terminate = async {
|
|
|
|
signal::unix::signal(signal::unix::SignalKind::terminate())
|
|
|
|
.expect("failed to install signal handler")
|
|
|
|
.recv()
|
|
|
|
.await;
|
|
|
|
};
|
|
|
|
|
|
|
|
#[cfg(not(unix))]
|
|
|
|
let terminate = std::future::pending::<()>();
|
|
|
|
|
|
|
|
tokio::select! {
|
|
|
|
_ = ctrl_c => {},
|
|
|
|
_ = terminate => {},
|
|
|
|
}
|
|
|
|
|
|
|
|
tracing::info!("signal received, starting graceful shutdown");
|
2023-02-13 12:02:45 +00:00
|
|
|
opentelemetry::global::shutdown_tracer_provider();
|
2022-10-18 13:19:03 +00:00
|
|
|
}
|
2023-01-31 16:04:00 +00:00
|
|
|
|
2023-02-03 11:43:37 +00:00
|
|
|
impl From<i32> for FinishReason {
|
|
|
|
fn from(finish_reason: i32) -> Self {
|
|
|
|
let finish_reason = text_generation_client::FinishReason::from_i32(finish_reason).unwrap();
|
|
|
|
match finish_reason {
|
|
|
|
text_generation_client::FinishReason::Length => FinishReason::Length,
|
|
|
|
text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken,
|
|
|
|
text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-31 16:04:00 +00:00
|
|
|
/// Convert to Axum supported formats
|
|
|
|
impl From<InferError> for (StatusCode, Json<ErrorResponse>) {
|
|
|
|
fn from(err: InferError) -> Self {
|
|
|
|
let status_code = match err {
|
|
|
|
InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY,
|
|
|
|
InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS,
|
|
|
|
InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY,
|
|
|
|
InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
};
|
|
|
|
|
|
|
|
(
|
|
|
|
status_code,
|
|
|
|
Json(ErrorResponse {
|
|
|
|
error: err.to_string(),
|
2023-03-07 17:52:22 +00:00
|
|
|
error_type: err.error_type().to_string(),
|
2023-01-31 16:04:00 +00:00
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<InferError> for Event {
|
|
|
|
fn from(err: InferError) -> Self {
|
|
|
|
Event::default()
|
|
|
|
.json_data(ErrorResponse {
|
|
|
|
error: err.to_string(),
|
2023-03-07 17:52:22 +00:00
|
|
|
error_type: err.error_type().to_string(),
|
2023-01-31 16:04:00 +00:00
|
|
|
})
|
|
|
|
.unwrap()
|
|
|
|
}
|
|
|
|
}
|