diff --git a/router/src/server.rs b/router/src/server.rs index 7da56a36..ef3782d6 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -1,8 +1,8 @@ /// HTTP Server logic use crate::infer::{InferError, InferStreamResponse}; use crate::{ - Details, ErrorResponse, GenerateParameters, GenerateRequest, GenerateResponse, Infer, StreamResponse, - Validation, + Details, ErrorResponse, GenerateParameters, GenerateRequest, GenerateResponse, Infer, + StreamResponse, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, StatusCode}; diff --git a/server/text_generation/models/causal_lm.py b/server/text_generation/models/causal_lm.py index 0b8f9b91..a8fc23fe 100644 --- a/server/text_generation/models/causal_lm.py +++ b/server/text_generation/models/causal_lm.py @@ -368,7 +368,9 @@ class CausalLM(Model): else: seed = None - generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed) + generated_text = GeneratedText( + output_text, stopping_criteria.current_tokens, reason, seed + ) else: # Keep request in the batch generated_text = None