continue refactoring

This commit is contained in:
OlivierDehaene 2024-06-03 15:18:03 +02:00
parent dc07ad2691
commit 188c396b88
5 changed files with 144 additions and 128 deletions

View File

@ -16,38 +16,18 @@ use minijinja::{Environment, ErrorKind, Template};
use serde_json::{json, Map, Value}; use serde_json::{json, Map, Value};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{ use std::sync::{
atomic::{AtomicBool},
Arc, Arc,
}; };
use text_generation_client::v2::{ShardedClient};
use thiserror::Error; use thiserror::Error;
use tokio::sync::{mpsc, Notify, OwnedSemaphorePermit, Semaphore, TryAcquireError}; use tokio::sync::{OwnedSemaphorePermit, Semaphore, TryAcquireError};
use tokio::time::Instant; use tokio::time::Instant;
use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_stream::wrappers::UnboundedReceiverStream;
use tokio_stream::StreamExt; use tokio_stream::StreamExt;
use tracing::{instrument, Span}; use tracing::{instrument};
/// Queue entry
#[derive(Debug)]
pub(crate) struct Entry {
/// Request
pub request: ValidGenerateRequest,
/// Response sender to communicate between the Infer struct and the batching_task
pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>,
/// Span that will live as long as entry
pub span: Span,
/// Temporary span used as a guard when logging inference, wait times...
pub temp_span: Option<Span>,
/// Instant when this entry was queued
pub queue_time: Instant,
/// Instant when this entry was added to a batch
pub batch_time: Option<Instant>,
}
pub(crate) trait InferQueue { pub(crate) trait Scheduler {
/// Append an entry to the queue fn schedule(&self, request: ValidGenerateRequest, permit: OwnedSemaphorePermit) -> Result<GenerateStreamResponse, InferError>;
#[instrument(skip_all)]
fn append(&self, entry: Entry);
} }
@ -56,10 +36,8 @@ pub(crate) trait InferQueue {
pub struct Infer { pub struct Infer {
/// Validation /// Validation
validation: Validation, validation: Validation,
/// Request queue /// Request scheduler
queue: Arc<dyn InferQueue + Send + Sync>, scheduler: Arc<dyn Scheduler + Send + Sync>,
/// Notify batcher on queue appends
batching_task_notifier: Arc<Notify>,
/// Chat template /// Chat template
chat_template: Option<ChatTemplate>, chat_template: Option<ChatTemplate>,
/// Inference limit /// Inference limit
@ -71,37 +49,12 @@ pub struct Infer {
impl Infer { impl Infer {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub(crate) fn new( pub(crate) fn new(
client: ShardedClient, scheduler: Arc<dyn Scheduler + Send + Sync>,
validation: Validation, validation: Validation,
waiting_served_ratio: f32,
max_batch_prefill_tokens: u32,
max_batch_total_tokens: u32,
max_waiting_tokens: usize,
max_batch_size: Option<usize>,
max_concurrent_requests: usize, max_concurrent_requests: usize,
requires_padding: bool,
window_size: Option<u32>,
speculate: u32,
generation_health: Arc<AtomicBool>,
tokenizer_config: HubTokenizerConfig, tokenizer_config: HubTokenizerConfig,
processor_config: HubProcessorConfig, processor_config: HubProcessorConfig,
) -> Self { ) -> Self {
let queue = v2::Queue::new(requires_padding, 16, window_size, speculate);
let batching_task_notifier = Arc::new(Notify::new());
// Spawn batching background task that contains all the inference logic
tokio::spawn(v2::batching_task(
client,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
queue.clone(),
batching_task_notifier.clone(),
generation_health,
));
let chat_template = tokenizer_config let chat_template = tokenizer_config
.chat_template .chat_template
.or(processor_config.chat_template) .or(processor_config.chat_template)
@ -126,8 +79,7 @@ impl Infer {
Self { Self {
validation, validation,
queue: Arc::new(queue), scheduler,
batching_task_notifier,
chat_template, chat_template,
limit_concurrent_requests: semaphore, limit_concurrent_requests: semaphore,
} }
@ -157,30 +109,7 @@ impl Infer {
err err
})?; })?;
// MPSC channel to communicate with the background batching task self.scheduler.schedule(valid_request, permit)
let (response_tx, response_rx) = mpsc::unbounded_channel();
let input_length = valid_request.input_length;
// Append the request to the queue
self.queue.append(Entry {
request: valid_request,
response_tx,
span: Span::current(),
temp_span: None,
queue_time: Instant::now(),
batch_time: None,
});
// Notify the background task that we have a new entry in the queue that needs
// to be batched
self.batching_task_notifier.notify_one();
// Return stream
Ok((
permit,
input_length,
UnboundedReceiverStream::new(response_rx),
))
} }
/// Tokenizer the input /// Tokenizer the input

View File

@ -1,5 +1,4 @@
mod batcher; mod scheduler;
mod queue; mod queue;
pub(crate) use batcher::batching_task; pub(crate) use scheduler::SchedulerV2;
pub(crate) use queue::Queue;

View File

@ -1,7 +1,5 @@
use crate::infer::{Entry, InferQueue}; use crate::infer::{InferError, InferStreamResponse};
use crate::validation::{ use crate::validation::{ValidGenerateRequest, ValidGrammar, ValidParameters, ValidStoppingParameters};
ValidGrammar, ValidParameters, ValidStoppingParameters,
};
use nohash_hasher::{BuildNoHashHasher, IntMap}; use nohash_hasher::{BuildNoHashHasher, IntMap};
use std::cmp::min; use std::cmp::min;
use std::collections::VecDeque; use std::collections::VecDeque;
@ -15,6 +13,23 @@ use tokio::sync::{mpsc, oneshot};
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{info_span, instrument, Span}; use tracing::{info_span, instrument, Span};
/// Queue entry
#[derive(Debug)]
pub(crate) struct Entry {
/// Request
pub request: ValidGenerateRequest,
/// Response sender to communicate between the Infer struct and the batching_task
pub response_tx: mpsc::UnboundedSender<Result<InferStreamResponse, InferError>>,
/// Span that will live as long as entry
pub span: Span,
/// Temporary span used as a guard when logging inference, wait times...
pub temp_span: Option<Span>,
/// Instant when this entry was queued
pub queue_time: Instant,
/// Instant when this entry was added to a batch
pub batch_time: Option<Instant>,
}
/// Request Queue /// Request Queue
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(crate) struct Queue { pub(crate) struct Queue {
@ -22,19 +37,6 @@ pub(crate) struct Queue {
queue_sender: mpsc::UnboundedSender<QueueCommand>, queue_sender: mpsc::UnboundedSender<QueueCommand>,
} }
impl InferQueue for Queue {
/// Append an entry to the queue
#[instrument(skip_all)]
fn append(&self, entry: Entry) {
// Send append command to the background task managing the state
// Unwrap is safe here
self.queue_sender
.send(QueueCommand::Append(Box::new(entry), Span::current()))
.unwrap();
}
}
impl Queue { impl Queue {
pub(crate) fn new( pub(crate) fn new(
requires_padding: bool, requires_padding: bool,
@ -57,6 +59,15 @@ impl Queue {
Self { queue_sender } Self { queue_sender }
} }
#[instrument(skip_all)]
pub(crate) fn append(&self, entry: Entry) {
// Send append command to the background task managing the state
// Unwrap is safe here
self.queue_sender
.send(QueueCommand::Append(Box::new(entry), Span::current()))
.unwrap();
}
// Get the next batch // Get the next batch
#[instrument(skip(self))] #[instrument(skip(self))]
pub(crate) async fn next_batch( pub(crate) async fn next_batch(

View File

@ -1,7 +1,6 @@
/// Batching and inference logic /// Batching and inference logic
use crate::infer::Entry; use crate::infer::v2::queue::{Queue, Entry};
use crate::infer::v2::{Queue};
use crate::{FinishReason, PrefillToken, Token}; use crate::{FinishReason, PrefillToken, Token};
use nohash_hasher::IntMap; use nohash_hasher::IntMap;
use std::sync::{ use std::sync::{
@ -11,10 +10,86 @@ use std::sync::{
use text_generation_client::v2::{Batch, CachedBatch, Generation, ShardedClient}; use text_generation_client::v2::{Batch, CachedBatch, Generation, ShardedClient};
use text_generation_client::{ClientError}; use text_generation_client::{ClientError};
use tokio::sync::mpsc::error::SendError; use tokio::sync::mpsc::error::SendError;
use tokio::sync::Notify; use tokio::sync::{mpsc, Notify, OwnedSemaphorePermit};
use tokio::time::Instant; use tokio::time::Instant;
use tracing::{info_span, instrument, Instrument}; use tokio_stream::wrappers::UnboundedReceiverStream;
use crate::infer::{GeneratedText, InferError, InferStreamResponse}; use tracing::{info_span, instrument, Instrument, Span};
use crate::infer::{GeneratedText, GenerateStreamResponse, InferError, InferStreamResponse, Scheduler};
use crate::validation::ValidGenerateRequest;
pub(crate) struct SchedulerV2 {
/// Request queue
queue: Queue,
/// Notify batcher on queue appends
batching_task_notifier: Arc<Notify>,
}
impl SchedulerV2 {
pub(crate) fn new(
client: ShardedClient,
waiting_served_ratio: f32,
max_batch_prefill_tokens: u32,
max_batch_total_tokens: u32,
max_waiting_tokens: usize,
max_batch_size: Option<usize>,
requires_padding: bool,
window_size: Option<u32>,
speculate: u32,
generation_health: Arc<AtomicBool>,
) -> Self {
let queue = Queue::new(requires_padding, 16, window_size, speculate);
let batching_task_notifier = Arc::new(Notify::new());
// Spawn batching background task that contains all the inference logic
tokio::spawn(batching_task(
client,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
queue.clone(),
batching_task_notifier.clone(),
generation_health,
));
Self {
queue,
batching_task_notifier
}
}
}
impl Scheduler for SchedulerV2 {
#[instrument(skip_all)]
fn schedule(&self, request: ValidGenerateRequest, permit: OwnedSemaphorePermit) -> Result<GenerateStreamResponse, InferError> {
// MPSC channel to communicate with the background batching task
let (response_tx, response_rx) = mpsc::unbounded_channel();
let input_length = request.input_length;
// Append the request to the queue
self.queue.append(Entry {
request,
response_tx,
span: Span::current(),
temp_span: None,
queue_time: Instant::now(),
batch_time: None,
});
// Notify the background task that we have a new entry in the queue that needs
// to be batched
self.batching_task_notifier.notify_one();
// Return stream
Ok((
permit,
input_length,
UnboundedReceiverStream::new(response_rx),
))
}
}
/// Batching logic /// Batching logic
/// Will be launched in a background Tokio task /// Will be launched in a background Tokio task
@ -692,10 +767,10 @@ mod tests {
content: "You are a friendly chatbot who always responds in the style of a pirate" content: "You are a friendly chatbot who always responds in the style of a pirate"
.to_string(), .to_string(),
}] }]
.iter() .iter()
.chain(&example_chat) .chain(&example_chat)
.cloned() .cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let test_default_templates = vec![ let test_default_templates = vec![
ChatTemplateTestItem { ChatTemplateTestItem {

View File

@ -46,6 +46,7 @@ use tracing::{info_span, instrument, Instrument};
use utoipa::OpenApi; use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi; use utoipa_swagger_ui::SwaggerUi;
use thiserror::Error; use thiserror::Error;
use crate::infer::v2::SchedulerV2;
/// Generate tokens if `stream == false` or a stream of token if `stream == true` /// Generate tokens if `stream == false` or a stream of token if `stream == true`
#[utoipa::path( #[utoipa::path(
@ -1472,8 +1473,10 @@ pub async fn run(
)] )]
struct ApiDoc; struct ApiDoc;
// Create state
// Open connection, get model info and warmup // Open connection, get model info and warmup
let (infer, health_ext, shard_info, max_batch_total_tokens) = { let (scheduler, health_ext, shard_info, max_batch_total_tokens) = {
// Helper function to check both v2 and v3 // Helper function to check both v2 and v3
let check_max_batch_total_tokens = |max_supported_batch_total_tokens: Option<u32>| { let check_max_batch_total_tokens = |max_supported_batch_total_tokens: Option<u32>| {
match max_supported_batch_total_tokens { match max_supported_batch_total_tokens {
@ -1505,18 +1508,7 @@ pub async fn run(
} }
}; };
// Create state
let validation = Validation::new(
validation_workers,
tokenizer,
config,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
grammar_support,
);
let generation_health = Arc::new(AtomicBool::new(false)); let generation_health = Arc::new(AtomicBool::new(false));
// Try to open a v3 client // Try to open a v3 client
@ -1546,26 +1538,36 @@ pub async fn run(
tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}"); tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}");
let health_ext = HealthCheck::new(Arc::new(sharded_client.clone()), generation_health.clone()); let health_ext = HealthCheck::new(Arc::new(sharded_client.clone()), generation_health.clone());
let infer = Infer::new( let scheduler = SchedulerV2::new(
sharded_client, sharded_client,
validation,
waiting_served_ratio, waiting_served_ratio,
max_batch_prefill_tokens, max_batch_prefill_tokens,
max_batch_total_tokens, max_batch_total_tokens,
max_waiting_tokens, max_waiting_tokens,
max_batch_size, max_batch_size,
max_concurrent_requests,
shard_info.requires_padding, shard_info.requires_padding,
shard_info.window_size, shard_info.window_size,
shard_info.speculate, shard_info.speculate,
generation_health, generation_health,
tokenizer_config,
processor_config,
); );
(infer, health_ext, shard_info, max_batch_total_tokens) (scheduler, health_ext, shard_info, max_batch_total_tokens)
}; };
let validation = Validation::new(
validation_workers,
tokenizer,
config,
max_best_of,
max_stop_sequences,
max_top_n_tokens,
max_input_tokens,
max_total_tokens,
grammar_support,
);
let infer = Infer::new(Arc::new(scheduler), validation, max_concurrent_requests, tokenizer_config, processor_config);
// Duration buckets // Duration buckets
let duration_matcher = Matcher::Suffix(String::from("duration")); let duration_matcher = Matcher::Suffix(String::from("duration"));
let n_duration_buckets = 35; let n_duration_buckets = 35;