mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-09 11:24:53 +00:00
force as_secs
This commit is contained in:
parent
23b55861bc
commit
a3bdaca014
@ -335,11 +335,12 @@ async fn prefill(
|
||||
) -> Option<Batch> {
|
||||
let start_time = Instant::now();
|
||||
let batch_id = batch.id;
|
||||
metrics::increment_counter!("tgi_batch_inference_count", "method" => "prefill");
|
||||
|
||||
match client.prefill(batch).await {
|
||||
Ok((generations, next_batch)) => {
|
||||
send_generations(generations, entries);
|
||||
metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed(), "method" => "prefill");
|
||||
metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "prefill");
|
||||
metrics::increment_counter!("tgi_batch_inference_success", "method" => "prefill");
|
||||
next_batch
|
||||
}
|
||||
@ -360,11 +361,12 @@ async fn decode(
|
||||
entries: &mut IntMap<u64, Entry>,
|
||||
) -> Option<Batch> {
|
||||
let start_time = Instant::now();
|
||||
metrics::increment_counter!("tgi_batch_inference_count", "method" => "decode");
|
||||
|
||||
match client.decode(batches).await {
|
||||
Ok((generations, next_batch)) => {
|
||||
send_generations(generations, entries);
|
||||
metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed(), "method" => "decode");
|
||||
metrics::histogram!("tgi_batch_inference_duration", start_time.elapsed().as_secs_f64(), "method" => "decode");
|
||||
metrics::increment_counter!("tgi_batch_inference_success", "method" => "decode");
|
||||
next_batch
|
||||
}
|
||||
|
@ -186,6 +186,14 @@ async fn generate(
|
||||
let inference_time = Instant::now() - response.start;
|
||||
let time_per_token = inference_time / response.generated_text.generated_tokens;
|
||||
|
||||
// Tracing metadata
|
||||
span.record("total_time", format!("{total_time:?}"));
|
||||
span.record("validation_time", format!("{validation_time:?}"));
|
||||
span.record("queue_time", format!("{queue_time:?}"));
|
||||
span.record("inference_time", format!("{inference_time:?}"));
|
||||
span.record("time_per_token", format!("{time_per_token:?}"));
|
||||
span.record("seed", format!("{:?}", response.generated_text.seed));
|
||||
|
||||
// Headers
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("x-compute-type", "gpu+optimized".parse().unwrap());
|
||||
@ -218,22 +226,22 @@ async fn generate(
|
||||
time_per_token.as_millis().to_string().parse().unwrap(),
|
||||
);
|
||||
|
||||
// Tracing metadata
|
||||
span.record("total_time", format!("{total_time:?}"));
|
||||
span.record("validation_time", format!("{validation_time:?}"));
|
||||
span.record("queue_time", format!("{queue_time:?}"));
|
||||
span.record("inference_time", format!("{inference_time:?}"));
|
||||
span.record("time_per_token", format!("{time_per_token:?}"));
|
||||
span.record("seed", format!("{:?}", response.generated_text.seed));
|
||||
tracing::info!("Output: {}", response.generated_text.text);
|
||||
|
||||
// Metrics
|
||||
metrics::increment_counter!("tgi_request_success");
|
||||
metrics::histogram!("tgi_request_duration", total_time);
|
||||
metrics::histogram!("tgi_request_validation_duration", validation_time);
|
||||
metrics::histogram!("tgi_request_queue_duration", queue_time);
|
||||
metrics::histogram!("tgi_request_inference_duration", inference_time);
|
||||
metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token);
|
||||
metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
|
||||
metrics::histogram!(
|
||||
"tgi_request_validation_duration",
|
||||
validation_time.as_secs_f64()
|
||||
);
|
||||
metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
|
||||
metrics::histogram!(
|
||||
"tgi_request_inference_duration",
|
||||
inference_time.as_secs_f64()
|
||||
);
|
||||
metrics::histogram!(
|
||||
"tgi_request_mean_time_per_token_duration",
|
||||
time_per_token.as_secs_f64()
|
||||
);
|
||||
metrics::histogram!(
|
||||
"tgi_request_generated_tokens",
|
||||
response.generated_text.generated_tokens as f64
|
||||
@ -245,6 +253,8 @@ async fn generate(
|
||||
output_text = prompt + &output_text;
|
||||
}
|
||||
|
||||
tracing::info!("Output: {}", output_text);
|
||||
|
||||
let response = GenerateResponse {
|
||||
generated_text: output_text,
|
||||
details,
|
||||
@ -370,15 +380,14 @@ async fn generate_stream(
|
||||
span.record("inference_time", format!("{inference_time:?}"));
|
||||
span.record("time_per_token", format!("{time_per_token:?}"));
|
||||
span.record("seed", format!("{:?}", generated_text.seed));
|
||||
tracing::info!(parent: &span, "Output: {}", generated_text.text);
|
||||
|
||||
// Metrics
|
||||
metrics::increment_counter!("tgi_request_success");
|
||||
metrics::histogram!("tgi_request_duration", total_time);
|
||||
metrics::histogram!("tgi_request_validation_duration", validation_time);
|
||||
metrics::histogram!("tgi_request_queue_duration", queue_time);
|
||||
metrics::histogram!("tgi_request_inference_duration", inference_time);
|
||||
metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token);
|
||||
metrics::histogram!("tgi_request_duration", total_time.as_secs_f64());
|
||||
metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64());
|
||||
metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64());
|
||||
metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64());
|
||||
metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64());
|
||||
metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64);
|
||||
|
||||
// StreamResponse
|
||||
@ -389,6 +398,8 @@ async fn generate_stream(
|
||||
output_text = prompt + &output_text;
|
||||
}
|
||||
|
||||
tracing::info!(parent: &span, "Output: {}", output_text);
|
||||
|
||||
let stream_token = StreamResponse {
|
||||
token,
|
||||
generated_text: Some(output_text),
|
||||
|
Loading…
Reference in New Issue
Block a user