mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-22 15:32:08 +00:00
Add request parameters to OTel span for /v1/chat/completions
endpoint (#3000)
Record request parameters in OTel span for /v1/chat/completions endpoint
This commit is contained in:
parent
976eae216f
commit
58a65f7914
@ -1217,7 +1217,7 @@ example = json ! ({"error": "Incomplete generation"})),
|
|||||||
#[instrument(
|
#[instrument(
|
||||||
skip_all,
|
skip_all,
|
||||||
fields(
|
fields(
|
||||||
// parameters = ? req.parameters,
|
parameters,
|
||||||
total_time,
|
total_time,
|
||||||
validation_time,
|
validation_time,
|
||||||
queue_time,
|
queue_time,
|
||||||
@ -1243,7 +1243,7 @@ pub(crate) async fn chat_completions(
|
|||||||
} = chat.clone();
|
} = chat.clone();
|
||||||
let (generate_request, using_tools): (GenerateRequest, bool) =
|
let (generate_request, using_tools): (GenerateRequest, bool) =
|
||||||
chat.try_into_generate(&infer)?;
|
chat.try_into_generate(&infer)?;
|
||||||
|
span.record("parameters", format!("{:?}", generate_request.parameters));
|
||||||
let logprobs = logprobs.unwrap_or_default();
|
let logprobs = logprobs.unwrap_or_default();
|
||||||
|
|
||||||
// extract model id from request if specified
|
// extract model id from request if specified
|
||||||
|
Loading…
Reference in New Issue
Block a user