From cea64e234fd0fab2c6d52a7c160807b674759ece Mon Sep 17 00:00:00 2001 From: Morgan Funtowicz Date: Wed, 31 Jul 2024 20:38:30 +0000 Subject: [PATCH] (chore) fmt ... why? --- backends/trtllm/src/backend.rs | 12 ++++++------ backends/trtllm/src/main.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/backends/trtllm/src/backend.rs b/backends/trtllm/src/backend.rs index 6bf18472..b23aa6c0 100644 --- a/backends/trtllm/src/backend.rs +++ b/backends/trtllm/src/backend.rs @@ -2,8 +2,8 @@ use std::future::Future; use std::path::Path; use std::pin::{pin, Pin}; use std::str::FromStr; -use std::sync::{Arc, OnceLock}; use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, OnceLock}; use std::task::{Context, Poll}; use std::time::Duration; @@ -12,17 +12,17 @@ use cxx::UniquePtr; use log::{error, warn}; use tokenizers::Tokenizer; use tokio::sync::mpsc::{unbounded_channel, UnboundedSender}; -use tokio::time::{Instant, sleep}; -use tokio_stream::{Stream, StreamExt}; +use tokio::time::{sleep, Instant}; use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::{instrument, Level, span}; +use tokio_stream::{Stream, StreamExt}; +use tracing::{instrument, span, Level}; // use tokio::sync::RwLock; use parking_lot::RwLock; -use text_generation_router::{FinishReason, Token}; use text_generation_router::infer::{Backend, GeneratedText, InferError, InferStreamResponse}; -use text_generation_router::validation::{Chunk, ValidationError, ValidGenerateRequest}; use text_generation_router::validation::ValidationError::UnsupportedModality; +use text_generation_router::validation::{Chunk, ValidGenerateRequest, ValidationError}; +use text_generation_router::{FinishReason, Token}; use crate::errors::TensorRtLlmBackendError; use crate::ffi::{create_tensorrt_llm_backend, GenerationStep, TensorRtLlmBackendImpl}; diff --git a/backends/trtllm/src/main.rs b/backends/trtllm/src/main.rs index 9faa66a4..e0ba46c7 100644 --- a/backends/trtllm/src/main.rs +++ b/backends/trtllm/src/main.rs @@ -159,7 +159,7 @@ async fn main() -> Result<(), TensorRtLlmBackendError> { true, max_client_batch_size, false, - false + false, ) .await?; Ok(())