From 3fc811f596ff382f9c5c0a37f20a47dd54ebb46e Mon Sep 17 00:00:00 2001 From: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com> Date: Mon, 30 Jan 2023 16:17:32 +0100 Subject: [PATCH] formating --- router/src/server.rs | 4 ++-- server/text_generation/models/causal_lm.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/router/src/server.rs b/router/src/server.rs index 7da56a36..ef3782d6 100644 --- a/router/src/server.rs +++ b/router/src/server.rs @@ -1,8 +1,8 @@ /// HTTP Server logic use crate::infer::{InferError, InferStreamResponse}; use crate::{ - Details, ErrorResponse, GenerateParameters, GenerateRequest, GenerateResponse, Infer, StreamResponse, - Validation, + Details, ErrorResponse, GenerateParameters, GenerateRequest, GenerateResponse, Infer, + StreamResponse, Validation, }; use axum::extract::Extension; use axum::http::{HeaderMap, StatusCode}; diff --git a/server/text_generation/models/causal_lm.py b/server/text_generation/models/causal_lm.py index 0b8f9b91..a8fc23fe 100644 --- a/server/text_generation/models/causal_lm.py +++ b/server/text_generation/models/causal_lm.py @@ -368,7 +368,9 @@ class CausalLM(Model): else: seed = None - generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed) + generated_text = GeneratedText( + output_text, stopping_criteria.current_tokens, reason, seed + ) else: # Keep request in the batch generated_text = None