This commit is contained in:
OlivierDehaene 2023-01-27 19:46:58 +01:00
parent 54fec93193
commit 432566d931
14 changed files with 370 additions and 225 deletions

2
Cargo.lock generated
View File

@ -1829,6 +1829,7 @@ dependencies = [
name = "text-generation-router" name = "text-generation-router"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-stream",
"axum", "axum",
"clap 4.0.22", "clap 4.0.22",
"futures", "futures",
@ -1841,6 +1842,7 @@ dependencies = [
"thiserror", "thiserror",
"tokenizers", "tokenizers",
"tokio", "tokio",
"tokio-stream",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
] ]

View File

@ -7,10 +7,10 @@ service TextGenerationService {
rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {} rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {}
/// Empties batch cache /// Empties batch cache
rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse); rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse);
/// Generate tokens for a batch /// Prefill batch and decode first token
rpc Generate (GenerateRequest) returns (GenerateResponse); rpc Prefill (PrefillRequest) returns (PrefillResponse);
/// Generate tokens for a list of cached batches /// Decode token for a list of prefilled batches
rpc GenerateWithCache (GenerateWithCacheRequest) returns (GenerateWithCacheResponse); rpc Decode (DecodeRequest) returns (DecodeResponse);
} }
/// Empty request /// Empty request
@ -70,44 +70,60 @@ message Batch {
} }
message GeneratedText { message GeneratedText {
/// Request
Request request = 1;
/// Output /// Output
string output_text = 2; string text = 1;
/// Number of generated tokens /// Number of generated tokens
uint32 generated_tokens = 3; uint32 generated_tokens = 2;
/// Tokens
repeated string tokens = 4;
/// Token IDs
repeated uint32 token_ids = 5;
/// Logprobs
repeated float logprobs = 6;
/// Finish reason /// Finish reason
string finish_reason = 7; string finish_reason = 3;
/// Seed /// Seed
optional uint64 seed = 8; optional uint64 seed = 4;
} }
message GenerateRequest { message PrefillTokens {
/// Prefill Token IDs
repeated uint32 ids = 1;
/// Prefill Logprobs
repeated float logprobs = 2;
/// Prefill tokens
repeated string texts = 3;
}
message Generation {
/// Request ID
uint64 request_id = 1;
/// Prefill tokens (optional)
PrefillTokens prefill_tokens = 2;
/// Token ID
uint32 token_id = 3;
/// Logprob
float token_logprob = 4;
/// Text
string token_text = 5;
/// Complete generated text
GeneratedText generated_text = 6;
}
message PrefillRequest {
/// Batch /// Batch
Batch batch = 1; Batch batch = 1;
} }
message GenerateResponse { message PrefillResponse {
/// Finished requests /// Generation
repeated GeneratedText generated_texts = 1; repeated Generation generations = 1;
/// Next batch (cached) /// Next batch (cached)
optional Batch batch = 2; optional Batch batch = 2;
} }
message GenerateWithCacheRequest { message DecodeRequest {
/// Cached batches /// Cached batches
repeated Batch batches = 1; repeated Batch batches = 1;
} }
message GenerateWithCacheResponse { message DecodeResponse {
/// Finished requests /// Decodes
repeated GeneratedText generated_texts = 1; repeated Generation generations = 1;
/// Next batch (cached) /// Next batch (cached)
optional Batch batch = 2; optional Batch batch = 2;
} }

View File

@ -13,6 +13,7 @@ name = "text-generation-router"
path = "src/main.rs" path = "src/main.rs"
[dependencies] [dependencies]
async-stream = "0.3.3"
axum = { version = "0.5.16", features = ["json", "serde_json"] } axum = { version = "0.5.16", features = ["json", "serde_json"] }
text-generation-client = { path = "client" } text-generation-client = { path = "client" }
clap = { version = "4.0.15", features = ["derive", "env"] } clap = { version = "4.0.15", features = ["derive", "env"] }
@ -25,6 +26,7 @@ serde_json = "1.0.85"
thiserror = "1.0.37" thiserror = "1.0.37"
tokenizers = "0.13.0" tokenizers = "0.13.0"
tokio = { version = "1.21.1", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] } tokio = { version = "1.21.1", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] }
tokio-stream = "0.1.11"
tracing = "0.1.36" tracing = "0.1.36"
tracing-subscriber = { version = "0.3.15", features = ["json"] } tracing-subscriber = { version = "0.3.15", features = ["json"] }

View File

@ -73,15 +73,15 @@ impl Client {
/// Returns a list of generated texts of request that met their stopping criteria /// Returns a list of generated texts of request that met their stopping criteria
/// and the next cached batch /// and the next cached batch
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn generate(&mut self, batch: Batch) -> Result<(Vec<GeneratedText>, Option<Batch>)> { pub async fn prefill(&mut self, batch: Batch) -> Result<(Vec<Generation>, Option<Batch>)> {
let request = tonic::Request::new(GenerateRequest { batch: Some(batch) }); let request = tonic::Request::new(PrefillRequest { batch: Some(batch) });
let response = self let response = self
.stub .stub
.generate(request) .prefill(request)
.instrument(info_span!("generate")) .instrument(info_span!("prefill"))
.await? .await?
.into_inner(); .into_inner();
Ok((response.generated_texts, response.batch)) Ok((response.generations, response.batch))
} }
/// Generate one token for each request in the given cached batch /// Generate one token for each request in the given cached batch
@ -89,17 +89,17 @@ impl Client {
/// Returns a list of generated texts of request that met their stopping criteria /// Returns a list of generated texts of request that met their stopping criteria
/// and the next cached batch /// and the next cached batch
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn generate_with_cache( pub async fn decode(
&mut self, &mut self,
batches: Vec<Batch>, batches: Vec<Batch>,
) -> Result<(Vec<GeneratedText>, Option<Batch>)> { ) -> Result<(Vec<Generation>, Option<Batch>)> {
let request = tonic::Request::new(GenerateWithCacheRequest { batches }); let request = tonic::Request::new(DecodeRequest { batches });
let response = self let response = self
.stub .stub
.generate_with_cache(request) .decode(request)
.instrument(info_span!("generate_with_cache")) .instrument(info_span!("decode"))
.await? .await?
.into_inner(); .into_inner();
Ok((response.generated_texts, response.batch)) Ok((response.generations, response.batch))
} }
} }

View File

@ -7,7 +7,8 @@ mod sharded_client;
pub use client::Client; pub use client::Client;
pub use pb::generate::v1::{ pub use pb::generate::v1::{
Batch, GeneratedText, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Batch, GeneratedText, Generation, NextTokenChooserParameters, Request,
StoppingCriteriaParameters,
}; };
pub use sharded_client::ShardedClient; pub use sharded_client::ShardedClient;
use thiserror::Error; use thiserror::Error;

View File

@ -1,6 +1,6 @@
/// Multi shard Client /// Multi shard Client
use crate::Result; use crate::Result;
use crate::{Batch, Client, GeneratedText}; use crate::{Batch, Client, Generation};
use futures::future::join_all; use futures::future::join_all;
use futures::future::select_all; use futures::future::select_all;
use tonic::transport::Uri; use tonic::transport::Uri;
@ -41,11 +41,11 @@ impl ShardedClient {
/// ///
/// Returns a list of generated texts of request that met their stopping criteria /// Returns a list of generated texts of request that met their stopping criteria
/// and the next cached batch /// and the next cached batch
pub async fn generate(&mut self, batch: Batch) -> Result<(Vec<GeneratedText>, Option<Batch>)> { pub async fn prefill(&mut self, batch: Batch) -> Result<(Vec<Generation>, Option<Batch>)> {
let futures: Vec<_> = self let futures: Vec<_> = self
.clients .clients
.iter_mut() .iter_mut()
.map(|client| Box::pin(client.generate(batch.clone()))) .map(|client| Box::pin(client.prefill(batch.clone())))
.collect(); .collect();
// As soon as we receive one response, we can return as all shards will return the same // As soon as we receive one response, we can return as all shards will return the same
let (result, _, _) = select_all(futures).await; let (result, _, _) = select_all(futures).await;
@ -56,14 +56,14 @@ impl ShardedClient {
/// ///
/// Returns a list of generated texts of request that met their stopping criteria /// Returns a list of generated texts of request that met their stopping criteria
/// and the next cached batch /// and the next cached batch
pub async fn generate_with_cache( pub async fn decode(
&mut self, &mut self,
batches: Vec<Batch>, batches: Vec<Batch>,
) -> Result<(Vec<GeneratedText>, Option<Batch>)> { ) -> Result<(Vec<Generation>, Option<Batch>)> {
let futures: Vec<_> = self let futures: Vec<_> = self
.clients .clients
.iter_mut() .iter_mut()
.map(|client| Box::pin(client.generate_with_cache(batches.clone()))) .map(|client| Box::pin(client.decode(batches.clone())))
.collect(); .collect();
// As soon as we receive one response, we can return as all shards will return the same // As soon as we receive one response, we can return as all shards will return the same
let (result, _, _) = select_all(futures).await; let (result, _, _) = select_all(futures).await;

View File

@ -1,15 +1,17 @@
/// Batching and inference logic /// Batching and inference logic
use crate::{Db, Entry}; use crate::{Db, Entry, Token};
use crate::{ErrorResponse, GenerateRequest}; use crate::{ErrorResponse, GenerateRequest};
use axum::http::StatusCode; use axum::http::StatusCode;
use axum::Json; use axum::Json;
use nohash_hasher::IntMap; use nohash_hasher::IntMap;
use std::future::Future; use std::future::Future;
use std::sync::Arc; use std::sync::Arc;
use text_generation_client::{Batch, ClientError, GeneratedText, ShardedClient}; use text_generation_client::{Batch, ClientError, GeneratedText, Generation, ShardedClient};
use thiserror::Error; use thiserror::Error;
use tokio::sync::{oneshot, Notify}; use tokio::sync::{mpsc, Notify};
use tokio::time::Instant; use tokio::time::Instant;
use tokio_stream::wrappers::UnboundedReceiverStream;
use tokio_stream::StreamExt;
use tracing::instrument; use tracing::instrument;
/// Batcher /// Batcher
@ -51,14 +53,14 @@ impl Batcher {
Self { db, shared } Self { db, shared }
} }
/// Add a new request to the database and return a future that will generate the text /// Add a new request to the database and return a stream of tokens
pub(crate) async fn infer( pub(crate) fn infer_stream(
&self, &self,
input_length: usize, input_length: usize,
request: GenerateRequest, request: GenerateRequest,
) -> Result<InferResponse, InferError> { ) -> UnboundedReceiverStream<Result<InferStreamResponse, InferError>> {
// One shot channel to communicate with the background batching task // MPSC channel to communicate with the background batching task
let (response_tx, response_rx) = oneshot::channel(); let (response_tx, response_rx) = mpsc::unbounded_channel();
// Try to append the request to the database // Try to append the request to the database
self.db.append(Entry { self.db.append(Entry {
@ -73,12 +75,45 @@ impl Batcher {
// to be batched // to be batched
self.shared.batching_task.notify_one(); self.shared.batching_task.notify_one();
// Await on the response from the background task // Return stream
// We can safely unwrap as the background task will never drop the sender UnboundedReceiverStream::new(response_rx)
response_rx }
.await
.unwrap() pub(crate) async fn infer(
.map_err(|err| InferError::GenerationError(err.to_string())) &self,
input_length: usize,
request: GenerateRequest,
) -> Result<InferResponse, InferError> {
let mut stream = self.infer_stream(input_length, request);
let mut result_tokens = Vec::new();
let mut result_generated_text = None;
let mut result_start = None;
let mut result_queued = None;
while let Some(response) = stream.next().await {
match response? {
InferStreamResponse::Prefill(prefill_tokens) => {
result_tokens.extend(prefill_tokens)
}
InferStreamResponse::Token(token) => result_tokens.push(token),
InferStreamResponse::End {
generated_text,
start,
queued,
} => {
result_generated_text = Some(generated_text);
result_start = Some(start);
result_queued = Some(queued)
}
}
}
Ok(InferResponse {
tokens: result_tokens,
generated_text: result_generated_text.unwrap(),
queued: result_queued.unwrap(),
start: result_start.unwrap(),
})
} }
} }
@ -106,7 +141,7 @@ async fn batching_task(
// This batch might be smaller than the maximum batch size if there are not enough requests // This batch might be smaller than the maximum batch size if there are not enough requests
// waiting in the DB // waiting in the DB
while let Some((mut entries, batch)) = db.next_batch(None, max_batch_size) { while let Some((mut entries, batch)) = db.next_batch(None, max_batch_size) {
let mut cached_batch = wrap_future(client.generate(batch), &mut entries).await; let mut cached_batch = wrap_future(client.prefill(batch), &mut entries).await;
let mut waiting_tokens = 1; let mut waiting_tokens = 1;
// We loop until we do not receive any cached batch from the inference server (== until // We loop until we do not receive any cached batch from the inference server (== until
@ -132,7 +167,7 @@ async fn batching_task(
{ {
// Generate one token for this new batch to have the attention past in cache // Generate one token for this new batch to have the attention past in cache
let new_cached_batch = let new_cached_batch =
wrap_future(client.generate(new_batch), &mut new_entries).await; wrap_future(client.prefill(new_batch), &mut new_entries).await;
// Reset waiting counter // Reset waiting counter
waiting_tokens = 1; waiting_tokens = 1;
// Extend current batch with the new batch // Extend current batch with the new batch
@ -143,7 +178,7 @@ async fn batching_task(
} }
} }
cached_batch = wrap_future(client.generate_with_cache(batches), &mut entries).await; cached_batch = wrap_future(client.decode(batches), &mut entries).await;
waiting_tokens += 1; waiting_tokens += 1;
} }
} }
@ -152,12 +187,12 @@ async fn batching_task(
/// Wrap a future inside a match statement to handle errors and send the response to the Batcher /// Wrap a future inside a match statement to handle errors and send the response to the Batcher
async fn wrap_future( async fn wrap_future(
future: impl Future<Output = Result<(Vec<GeneratedText>, Option<Batch>), ClientError>>, future: impl Future<Output = Result<(Vec<Generation>, Option<Batch>), ClientError>>,
entries: &mut IntMap<u64, Entry>, entries: &mut IntMap<u64, Entry>,
) -> Option<Batch> { ) -> Option<Batch> {
match future.await { match future.await {
Ok((generated_texts, next_batch)) => { Ok((generations, next_batch)) => {
send_generated(generated_texts, entries); send_generated(generations, entries);
next_batch next_batch
} }
// If we have an error, we discard the whole batch // If we have an error, we discard the whole batch
@ -172,47 +207,79 @@ async fn wrap_future(
fn send_error(error: ClientError, entries: &mut IntMap<u64, Entry>) { fn send_error(error: ClientError, entries: &mut IntMap<u64, Entry>) {
entries.drain().for_each(|(_, entry)| { entries.drain().for_each(|(_, entry)| {
// unwrap_or is valid here as we don't care if the receiver is gone. // unwrap_or is valid here as we don't care if the receiver is gone.
entry.response_tx.send(Err(error.clone())).unwrap_or(()); entry
.response_tx
.send(Err(InferError::GenerationError(error.to_string())))
.unwrap_or(());
}); });
} }
/// Send `generated_text` to the Batcher for all `finished` /// Send `generated_text` to the Batcher for all `finished`
fn send_generated(finished: Vec<GeneratedText>, entries: &mut IntMap<u64, Entry>) { fn send_generated(generations: Vec<Generation>, entries: &mut IntMap<u64, Entry>) {
finished.into_iter().for_each(|output| { generations.into_iter().for_each(|generation| {
// We can `expect` here as the request id should always be in the entries
let entry = entries let entry = entries
.remove(&output.request.unwrap().id) .get(&generation.request_id)
.expect("ID not found in entries. This is a bug."); .expect("ID not found in entries. This is a bug.");
let response = InferResponse { if let Some(prefill_tokens) = generation.prefill_tokens {
output_text: output.output_text, let tokens = prefill_tokens
generated_tokens: output.generated_tokens, .ids
token_ids: output.token_ids, .into_iter()
tokens: output.tokens, .zip(prefill_tokens.logprobs.into_iter())
logprobs: output.logprobs, .zip(prefill_tokens.texts.into_iter())
finish_reason: output.finish_reason, .map(|((id, logprob), text)| Token(id, text, logprob))
seed: output.seed, .collect();
queued: entry.time, entry
start: entry.batch_time.unwrap(), // unwrap is always valid .response_tx
end: Instant::now(), .send(Ok(InferStreamResponse::Prefill(tokens)))
}; .unwrap_or(());
// unwrap_or is valid here as we don't care if the receiver is gone. }
entry.response_tx.send(Ok(response)).unwrap_or(());
let token = Token(
generation.token_id,
generation.token_text,
generation.token_logprob,
);
entry
.response_tx
.send(Ok(InferStreamResponse::Token(token)))
.unwrap_or(());
if let Some(generated_text) = generation.generated_text {
let entry = entries
.remove(&generation.request_id)
.expect("ID not found in entries. This is a bug.");
entry
.response_tx
.send(Ok(InferStreamResponse::End {
generated_text,
queued: entry.time,
start: entry.batch_time.unwrap(),
}))
.unwrap_or(());
}
}); });
} }
#[derive(Debug)]
pub(crate) enum InferStreamResponse {
Prefill(Vec<Token>),
Token(Token),
End {
generated_text: GeneratedText,
start: Instant,
queued: Instant,
},
}
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct InferResponse { pub(crate) struct InferResponse {
pub(crate) output_text: String, pub(crate) tokens: Vec<Token>,
pub(crate) generated_tokens: u32, pub(crate) generated_text: GeneratedText,
pub(crate) token_ids: Vec<u32>, pub(crate) seed: Option<u64>
pub(crate) tokens: Vec<String>,
pub(crate) logprobs: Vec<f32>,
pub(crate) finish_reason: String,
pub(crate) seed: Option<u64>,
pub(crate) queued: Instant, pub(crate) queued: Instant,
pub(crate) start: Instant, pub(crate) start: Instant,
pub(crate) end: Instant,
} }
#[derive(Debug, Error)] #[derive(Debug, Error)]

View File

@ -1,14 +1,15 @@
use crate::batcher::InferError;
/// This code is massively inspired by Tokio mini-redis /// This code is massively inspired by Tokio mini-redis
use crate::InferResponse; use crate::batcher::InferStreamResponse;
use crate::{GenerateParameters, GenerateRequest}; use crate::{GenerateParameters, GenerateRequest};
use nohash_hasher::{BuildNoHashHasher, IntMap}; use nohash_hasher::{BuildNoHashHasher, IntMap};
use parking_lot::Mutex; use parking_lot::Mutex;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::sync::Arc; use std::sync::Arc;
use text_generation_client::{ use text_generation_client::{
Batch, ClientError, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Batch, NextTokenChooserParameters, Request, StoppingCriteriaParameters,
}; };
use tokio::sync::oneshot::Sender; use tokio::sync::mpsc::UnboundedSender;
use tokio::time::Instant; use tokio::time::Instant;
/// Database entry /// Database entry
@ -17,7 +18,7 @@ pub(crate) struct Entry {
/// Request /// Request
pub request: GenerateRequest, pub request: GenerateRequest,
/// Response sender to communicate between the Batcher and the batching_task /// Response sender to communicate between the Batcher and the batching_task
pub response_tx: Sender<Result<InferResponse, ClientError>>, pub response_tx: UnboundedSender<Result<InferStreamResponse, InferError>>,
/// Number of tokens in the input /// Number of tokens in the input
pub input_length: usize, pub input_length: usize,
/// Instant when this entry was created /// Instant when this entry was created

View File

@ -4,7 +4,7 @@ mod db;
pub mod server; pub mod server;
mod validation; mod validation;
use batcher::{Batcher, InferResponse}; use batcher::Batcher;
use db::{Db, Entry}; use db::{Db, Entry};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use validation::Validation; use validation::Validation;
@ -69,12 +69,15 @@ pub(crate) struct GenerateRequest {
pub parameters: GenerateParameters, pub parameters: GenerateParameters,
} }
#[derive(Debug, Serialize)]
pub struct Token(u32, String, f32);
#[derive(Serialize)] #[derive(Serialize)]
pub(crate) struct Details { pub(crate) struct Details {
pub finish_reason: String, pub finish_reason: String,
pub generated_tokens: u32, pub generated_tokens: u32,
pub seed: Option<u64>, pub seed: Option<u64>,
pub tokens: Vec<(u32, String, f32)>, pub tokens: Vec<Token>,
} }
#[derive(Serialize)] #[derive(Serialize)]

View File

@ -1,11 +1,14 @@
use crate::batcher::InferStreamResponse;
use crate::{ use crate::{
Batcher, Details, ErrorResponse, GenerateParameters, GenerateRequest, GeneratedText, Validation, Batcher, Details, ErrorResponse, GenerateParameters, GenerateRequest, GeneratedText, Validation,
}; };
use axum::extract::Extension; use axum::extract::Extension;
use axum::http::{HeaderMap, StatusCode}; use axum::http::{HeaderMap, StatusCode};
use axum::response::sse::{Event, KeepAlive, Sse};
use axum::response::IntoResponse; use axum::response::IntoResponse;
use axum::routing::{get, post}; use axum::routing::{get, post};
use axum::{Json, Router}; use axum::{BoxError, Json, Router};
use futures::Stream;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use text_generation_client::ShardedClient; use text_generation_client::ShardedClient;
@ -13,6 +16,7 @@ use tokenizers::Tokenizer;
use tokio::signal; use tokio::signal;
use tokio::sync::Semaphore; use tokio::sync::Semaphore;
use tokio::time::Instant; use tokio::time::Instant;
use tokio_stream::StreamExt;
use tracing::instrument; use tracing::instrument;
// Server shared state // Server shared state
@ -111,21 +115,12 @@ async fn generate(
// Token details // Token details
let details = match details { let details = match details {
true => { true => Some(Details {
let tokens = response finish_reason: response.generated_text.finish_reason,
.token_ids generated_tokens: response.generated_text.generated_tokens,
.into_iter() tokens: response.tokens,
.zip(response.tokens.into_iter()) seed: response.seed,
.zip(response.logprobs.into_iter()) }),
.map(|((id, text), logprob)| (id, text, logprob))
.collect();
Some(Details {
seed: response.seed,
finish_reason: response.finish_reason,
generated_tokens: response.generated_tokens,
tokens,
})
}
false => None, false => None,
}; };
@ -133,8 +128,8 @@ async fn generate(
let total_time = start_time.elapsed(); let total_time = start_time.elapsed();
let validation_time = response.queued - start_time; let validation_time = response.queued - start_time;
let queue_time = response.start - response.queued; let queue_time = response.start - response.queued;
let inference_time = response.end - response.start; let inference_time = Instant::now() - response.start;
let time_per_token = inference_time / response.generated_tokens; let time_per_token = inference_time / response.generated_text.generated_tokens;
// Headers // Headers
let mut headers = HeaderMap::new(); let mut headers = HeaderMap::new();
@ -166,16 +161,57 @@ async fn generate(
tracing::Span::current().record("inference_time", format!("{:?}", inference_time)); tracing::Span::current().record("inference_time", format!("{:?}", inference_time));
tracing::Span::current().record("time_per_token", format!("{:?}", time_per_token)); tracing::Span::current().record("time_per_token", format!("{:?}", time_per_token));
tracing::Span::current().record("seed", format!("{:?}", response.seed)); tracing::Span::current().record("seed", format!("{:?}", response.seed));
tracing::info!("Output: {}", response.output_text); tracing::info!("Output: {}", response.generated_text.text);
// Send response // Send response
let response = vec![GeneratedText { let response = vec![GeneratedText {
generated_text: response.output_text, generated_text: response.generated_text.text,
details, details,
}]; }];
Ok((headers, Json(response))) Ok((headers, Json(response)))
} }
async fn generate_stream(
state: Extension<ServerState>,
req: Json<GenerateRequest>,
) -> Sse<impl Stream<Item = Result<Event, BoxError>>> {
let stream = async_stream::stream! {
// Limit concurrent requests by acquiring a permit from the semaphore
let _permit = state.limit_concurrent_requests.try_acquire().map_err(| err | {
tracing::error!("Model is overloaded");
err
})?;
// Validate request
let (input_length, validated_request) =
state.validation.validate(req.0).await.map_err(|err| {
tracing::error!("{}", err);
err
})?;
// Inference
let mut response_stream = state
.batcher
.infer_stream(input_length, validated_request);
while let Some(response) = response_stream.next().await {
match response {
Ok(response) => {
if let InferStreamResponse::Token(token) = response {
yield Ok(Event::default().json_data(token).unwrap());
}
}
Err(err) => {
tracing::error!("{}", err.to_string());
yield Ok(Event::default().data(err.to_string()));
}
}
}
};
Sse::new(stream).keep_alive(KeepAlive::default())
}
/// Serving method /// Serving method
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn run( pub async fn run(
@ -201,6 +237,7 @@ pub async fn run(
let app = Router::new() let app = Router::new()
.route("/", post(generate)) .route("/", post(generate))
.route("/generate", post(generate)) .route("/generate", post(generate))
.route("/generate_stream", post(generate_stream))
.route("/", get(health)) .route("/", get(health))
.route("/health", get(health)) .route("/health", get(health))
.layer(Extension(shared_state.clone())); .layer(Extension(shared_state.clone()));

View File

@ -1,6 +1,6 @@
gen-server: gen-server:
# Compile protos # Compile protos
pip install grpcio-tools==1.49.1 --no-cache-dir #pip install grpcio-tools==1.49.1 --no-cache-dir
mkdir text_generation/pb || true mkdir text_generation/pb || true
python -m grpc_tools.protoc -I../proto --python_out=text_generation/pb --grpc_python_out=text_generation/pb ../proto/generate.proto python -m grpc_tools.protoc -I../proto --python_out=text_generation/pb --grpc_python_out=text_generation/pb ../proto/generate.proto
find text_generation/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; find text_generation/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;

View File

@ -5,7 +5,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, PreTrainedTokenize
from typing import Optional, Tuple, List, Type from typing import Optional, Tuple, List, Type
from text_generation.models import Model from text_generation.models import Model
from text_generation.models.types import GeneratedText, Batch from text_generation.models.types import Batch, PrefillTokens, Generation, GeneratedText
from text_generation.pb import generate_pb2 from text_generation.pb import generate_pb2
from text_generation.utils import NextTokenChooser, StoppingCriteria, Sampling from text_generation.utils import NextTokenChooser, StoppingCriteria, Sampling
@ -23,7 +23,6 @@ class CausalLMBatch(Batch):
# All tokens # All tokens
all_input_ids: List[torch.Tensor] all_input_ids: List[torch.Tensor]
all_logprobs: List[Optional[torch.Tensor]]
# Lengths of all generations present in the batch # Lengths of all generations present in the batch
input_lengths: List[int] input_lengths: List[int]
@ -48,16 +47,15 @@ class CausalLMBatch(Batch):
@classmethod @classmethod
def from_pb( def from_pb(
cls, cls,
pb: generate_pb2.Batch, pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase, tokenizer: PreTrainedTokenizerBase,
device: torch.device, device: torch.device,
) -> "CausalLMBatch": ) -> "CausalLMBatch":
inputs = [] inputs = []
next_token_choosers = [] next_token_choosers = []
stopping_criterias = [] stopping_criterias = []
input_lengths = [] input_lengths = []
all_logprobs = []
# Parse batch # Parse batch
for r in pb.requests: for r in pb.requests:
@ -67,7 +65,6 @@ class CausalLMBatch(Batch):
stopping_criterias.append( stopping_criterias.append(
StoppingCriteria.from_pb(r.stopping_parameters, tokenizer) StoppingCriteria.from_pb(r.stopping_parameters, tokenizer)
) )
all_logprobs.append(None)
pad_to_multiple_of = 8 if device.type == "cuda" else None pad_to_multiple_of = 8 if device.type == "cuda" else None
tokenized_inputs = tokenizer( tokenized_inputs = tokenizer(
@ -89,7 +86,6 @@ class CausalLMBatch(Batch):
position_ids=position_ids, position_ids=position_ids,
past_key_values=None, past_key_values=None,
all_input_ids=all_input_ids, all_input_ids=all_input_ids,
all_logprobs=all_logprobs,
input_lengths=input_lengths, input_lengths=input_lengths,
next_token_choosers=next_token_choosers, next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias, stopping_criterias=stopping_criterias,
@ -107,7 +103,6 @@ class CausalLMBatch(Batch):
requests = [] requests = []
input_lengths = [] input_lengths = []
all_input_ids = [] all_input_ids = []
all_logprobs = []
next_token_choosers = [] next_token_choosers = []
stopping_criterias = [] stopping_criterias = []
@ -124,7 +119,6 @@ class CausalLMBatch(Batch):
requests.extend(batch.requests) requests.extend(batch.requests)
input_lengths.extend(batch.input_lengths) input_lengths.extend(batch.input_lengths)
all_input_ids.extend(batch.all_input_ids) all_input_ids.extend(batch.all_input_ids)
all_logprobs.extend(batch.all_logprobs)
next_token_choosers.extend(batch.next_token_choosers) next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias) stopping_criterias.extend(batch.stopping_criterias)
@ -151,8 +145,8 @@ class CausalLMBatch(Batch):
# We need to slice the attention mask to remove padding from previous steps # We need to slice the attention mask to remove padding from previous steps
attention_mask[ attention_mask[
start_index:end_index, -batch.max_sequence_length : start_index:end_index, -batch.max_sequence_length:
] = batch.attention_mask[:, -batch.max_sequence_length :] ] = batch.attention_mask[:, -batch.max_sequence_length:]
# Create empty tensor # Create empty tensor
# position_ids is always of shape [batch_size, 1] # position_ids is always of shape [batch_size, 1]
@ -198,22 +192,22 @@ class CausalLMBatch(Batch):
# We slice the past keys and values to remove the padding from previous batches # We slice the past keys and values to remove the padding from previous batches
if batch.keys_head_dim_last: if batch.keys_head_dim_last:
past_key_values[j][0][ past_key_values[j][0][
start_index:end_index, start_index:end_index,
:, :,
-(batch.max_sequence_length - 1) :, -(batch.max_sequence_length - 1):,
:, :,
] = past_keys[:, :, -(batch.max_sequence_length - 1) :, :] ] = past_keys[:, :, -(batch.max_sequence_length - 1):, :]
else: else:
past_key_values[j][0][ past_key_values[j][0][
start_index:end_index, start_index:end_index,
:, :,
:, :,
-(batch.max_sequence_length - 1) :, -(batch.max_sequence_length - 1):,
] = past_keys[:, :, :, -(batch.max_sequence_length - 1) :] ] = past_keys[:, :, :, -(batch.max_sequence_length - 1):]
past_key_values[j][1][ past_key_values[j][1][
start_index:end_index, :, -(batch.max_sequence_length - 1) :, : start_index:end_index, :, -(batch.max_sequence_length - 1):, :
] = past_values[:, :, -(batch.max_sequence_length - 1) :, :] ] = past_values[:, :, -(batch.max_sequence_length - 1):, :]
start_index += batch.size start_index += batch.size
@ -225,7 +219,6 @@ class CausalLMBatch(Batch):
position_ids=position_ids, position_ids=position_ids,
past_key_values=past_key_values, past_key_values=past_key_values,
all_input_ids=all_input_ids, all_input_ids=all_input_ids,
all_logprobs=all_logprobs,
input_lengths=input_lengths, input_lengths=input_lengths,
next_token_choosers=next_token_choosers, next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias, stopping_criterias=stopping_criterias,
@ -234,6 +227,9 @@ class CausalLMBatch(Batch):
keys_head_dim_last=batches[0].keys_head_dim_last, keys_head_dim_last=batches[0].keys_head_dim_last,
) )
def __len__(self):
return len(self.requests)
class CausalLM(Model): class CausalLM(Model):
def __init__(self, model_name: str, quantize=False): def __init__(self, model_name: str, quantize=False):
@ -275,7 +271,7 @@ class CausalLM(Model):
) )
def forward( def forward(
self, input_ids, attention_mask, position_ids, past_key_values: Optional = None self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]: ) -> Tuple[torch.Tensor, List[Tuple[torch.Tensor, torch.Tensor]]]:
# Model Forward # Model Forward
outputs = self.model.forward( outputs = self.model.forward(
@ -288,8 +284,8 @@ class CausalLM(Model):
return outputs.logits, outputs.past_key_values return outputs.logits, outputs.past_key_values
def generate_token( def generate_token(
self, batch: CausalLMBatch self, batch: CausalLMBatch
) -> Tuple[List[GeneratedText], Optional[CausalLMBatch]]: ) -> Tuple[List[Generation], Optional[CausalLMBatch]]:
# For some reason, inference_mode does not work well with GLOO which we use on CPU # For some reason, inference_mode does not work well with GLOO which we use on CPU
context_manager = ( context_manager = (
torch.no_grad if self.device.type == "cpu" else torch.inference_mode torch.no_grad if self.device.type == "cpu" else torch.inference_mode
@ -309,14 +305,13 @@ class CausalLM(Model):
next_batch_input_lengths = [] next_batch_input_lengths = []
next_batch_input_ids = [] next_batch_input_ids = []
next_batch_all_input_ids = [] next_batch_all_input_ids = []
next_batch_all_logprobs = []
# Metadata # Metadata
next_batch_size = 0 next_batch_size = 0
next_batch_max_sequence_length = 0 next_batch_max_sequence_length = 0
# Finished requests # Results
generated_texts: List[GeneratedText] = [] results = []
# Zipped iterator # Zipped iterator
iterator = zip( iterator = zip(
@ -326,55 +321,42 @@ class CausalLM(Model):
batch.next_token_choosers, batch.next_token_choosers,
batch.stopping_criterias, batch.stopping_criterias,
batch.all_input_ids, batch.all_input_ids,
batch.all_logprobs,
) )
# For each member of the batch # For each member of the batch
for i, ( for i, (
request, request,
input_length, input_length,
logits, logits,
next_token_chooser, next_token_chooser,
stopping_criteria, stopping_criteria,
all_input_ids, all_input_ids,
all_logprobs,
) in enumerate(iterator): ) in enumerate(iterator):
# Select next token # Select next token
tokens, logprobs = next_token_chooser(all_input_ids, logits) tokens, logprobs = next_token_chooser(all_input_ids, logits)
next_token = tokens[-1].view(1, 1) next_token_id = tokens[-1].view(1, 1)
# Append next token to all tokens # Append next token to all tokens
all_input_ids = torch.cat([all_input_ids, next_token]) all_input_ids = torch.cat([all_input_ids, next_token_id])
new_input_length = input_length + 1 new_input_length = input_length + 1
if all_logprobs is None: # Generated token
# logprobs of all prompt tokens (except the first one) and the generated token next_token_logprob = logprobs[-1, next_token_id]
all_logprobs = logprobs.gather(1, all_input_ids[1:]) next_token_id_squeezed = next_token_id.squeeze()
else: next_token_text = self.decode(next_token_id.squeeze())
# logprob of the generated token
next_token_logprob = logprobs[-1, next_token]
all_logprobs = torch.cat([all_logprobs, next_token_logprob])
# Evaluate stopping criteria # Evaluate stopping criteria
stop, reason = stopping_criteria( stop, reason = stopping_criteria(
next_token.squeeze(), next_token_id_squeezed,
self.tokenizer.decode( next_token_text,
next_token.squeeze(), clean_up_tokenization_spaces=False
),
) )
if stop: if stop:
# Decode generated tokens # Decode generated tokens
generated_text = self.decode( generated_text = self.decode(
all_input_ids[-stopping_criteria.current_tokens :, 0] all_input_ids[-stopping_criteria.current_tokens:, 0]
) )
output_text = request.inputs + generated_text output_text = request.inputs + generated_text
# Slice with input_length to remove padding
token_ids = all_input_ids[-new_input_length:]
tokens = self.tokenizer.batch_decode(token_ids)
# Add NaN for the first prompt token
logprobs = [float("nan")] + all_logprobs[-input_length:].squeeze(
1
).tolist()
# Get seed # Get seed
if isinstance(next_token_chooser.choice, Sampling): if isinstance(next_token_chooser.choice, Sampling):
@ -382,39 +364,48 @@ class CausalLM(Model):
else: else:
seed = None seed = None
# Add to the list of finished generations with the original request generated_text = GeneratedText(output_text, stopping_criteria.current_tokens, reason, seed)
generated_texts.append(
GeneratedText(
request=request,
output_text=output_text,
generated_tokens=stopping_criteria.current_tokens,
tokens=tokens,
token_ids=token_ids.squeeze(1).tolist(),
logprobs=logprobs,
reason=reason,
seed=seed,
)
)
# add to the next batch
else: else:
# Keep request in the batch
generated_text = None
next_batch_keep_indices.append(i) next_batch_keep_indices.append(i)
next_batch_input_ids.append(next_token) next_batch_input_ids.append(next_token_id)
next_batch_all_input_ids.append(all_input_ids) next_batch_all_input_ids.append(all_input_ids)
next_batch_all_logprobs.append(all_logprobs)
next_batch_size += 1 next_batch_size += 1
next_batch_input_lengths.append(new_input_length) next_batch_input_lengths.append(new_input_length)
next_batch_max_sequence_length = max( next_batch_max_sequence_length = max(
next_batch_max_sequence_length, new_input_length next_batch_max_sequence_length, new_input_length
) )
# Prefill
if stopping_criteria.current_tokens == 0:
# Remove generated token to only have prefill and add nan for first prompt token
prefill_logprobs = [float("nan")] + logprobs[-new_input_length:-1].gather(1, all_input_ids[
-new_input_length:-1]).squeeze(
1).tolist()
prefill_token_ids = all_input_ids[-new_input_length:-1]
prefill_texts = self.tokenizer.batch_decode(prefill_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False)
prefill_tokens = PrefillTokens(prefill_token_ids,
prefill_logprobs,
prefill_texts)
else:
prefill_tokens = None
result = Generation(request.id, prefill_tokens, next_token_id_squeezed, next_token_logprob, next_token_text,
generated_text)
results.append(result)
# We finished all generations in the batch; there is no next batch # We finished all generations in the batch; there is no next batch
if not next_batch_keep_indices: if not next_batch_keep_indices:
return generated_texts, None return results, None
next_batch_input_ids = torch.cat(next_batch_input_ids, dim=0) next_batch_input_ids = torch.cat(next_batch_input_ids, dim=0)
# If we finished at least one generation, we need to evict the indices of the generations that finished # If we finished at least one generation, we need to evict the indices of the generations that finished
# from the values of the next batch # from the values of the next batch
if generated_texts: if len(next_batch_keep_indices) != len(batch):
# Apply indices to attention mask, past key values and other items that need to be cached # Apply indices to attention mask, past key values and other items that need to be cached
next_batch_attention_mask = batch.attention_mask[next_batch_keep_indices] next_batch_attention_mask = batch.attention_mask[next_batch_keep_indices]
next_batch_position_ids = batch.position_ids[next_batch_keep_indices] next_batch_position_ids = batch.position_ids[next_batch_keep_indices]
@ -461,7 +452,6 @@ class CausalLM(Model):
position_ids=next_batch_position_ids, position_ids=next_batch_position_ids,
past_key_values=next_batch_past_key_values, past_key_values=next_batch_past_key_values,
all_input_ids=next_batch_all_input_ids, all_input_ids=next_batch_all_input_ids,
all_logprobs=next_batch_all_logprobs,
input_lengths=next_batch_input_lengths, input_lengths=next_batch_input_lengths,
next_token_choosers=next_batch_next_token_choosers, next_token_choosers=next_batch_next_token_choosers,
stopping_criterias=next_batch_stopping_criterias, stopping_criterias=next_batch_stopping_criterias,
@ -469,4 +459,4 @@ class CausalLM(Model):
max_sequence_length=next_batch_max_sequence_length, max_sequence_length=next_batch_max_sequence_length,
keys_head_dim_last=batch.keys_head_dim_last, keys_head_dim_last=batch.keys_head_dim_last,
) )
return generated_texts, next_batch return results, next_batch

View File

@ -17,10 +17,10 @@ class Batch(ABC):
@classmethod @classmethod
@abstractmethod @abstractmethod
def from_pb( def from_pb(
cls, cls,
pb: generate_pb2.Batch, pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase, tokenizer: PreTrainedTokenizerBase,
device: torch.device, device: torch.device,
) -> "Batch": ) -> "Batch":
raise NotImplementedError raise NotImplementedError
@ -32,23 +32,49 @@ class Batch(ABC):
@dataclass @dataclass
class GeneratedText: class GeneratedText:
request: generate_pb2.Request text: str
output_text: str
generated_tokens: int generated_tokens: int
tokens: List[str] finish_reason: str
token_ids: List[int]
logprobs: List[float]
reason: str
seed: Optional[int] seed: Optional[int]
def to_pb(self) -> generate_pb2.GeneratedText: def to_pb(self) -> generate_pb2.GeneratedText:
return generate_pb2.GeneratedText( return generate_pb2.GeneratedText(
request=self.request, text=self.text,
output_text=self.output_text,
generated_tokens=self.generated_tokens, generated_tokens=self.generated_tokens,
tokens=self.tokens, finish_reason=self.finish_reason
token_ids=self.token_ids, seed=self.seed
logprobs=self.logprobs, )
finish_reason=self.reason,
seed=self.seed,
@dataclass
class PrefillTokens:
token_ids: List[int]
logprobs: List[float]
texts: List[str]
def to_pb(self) -> generate_pb2.PrefillTokens:
return generate_pb2.PrefillTokens(
ids=self.token_ids,
logprobs=self.logprobs,
texts=self.texts
)
@dataclass
class Generation:
request_id: int
prefill_tokens: Optional[PrefillTokens]
token_id: int
token_logprob: float
token_text: str
generated_text: Optional[GeneratedText]
def to_pb(self) -> generate_pb2.Generation:
return generate_pb2.Generation(
request_id=self.request_id,
prefill_tokens=self.prefill_tokens.to_pb() if self.prefill_tokens is not None else None,
token_id=self.token_id,
token_logprob=self.token_logprob,
token_text=self.token_text,
generated_text=self.generated_text.to_pb() if self.generated_text is not None else None,
) )

View File

@ -27,22 +27,22 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
self.cache.clear() self.cache.clear()
return generate_pb2.ClearCacheResponse() return generate_pb2.ClearCacheResponse()
async def Generate(self, request, context): async def Prefill(self, request, context):
batch = self.model.batch_type.from_pb( batch = self.model.batch_type.from_pb(
request.batch, self.model.tokenizer, self.model.device request.batch, self.model.tokenizer, self.model.device
) )
generated_texts, next_batch = self.model.generate_token(batch) generations, next_batch = self.model.generate_token(batch)
self.cache.set(next_batch) self.cache.set(next_batch)
return generate_pb2.GenerateResponse( return generate_pb2.PrefillResponse(
generated_texts=[ generations=[
generated_text.to_pb() for generated_text in generated_texts generation.to_pb() for generation in generations
], ],
batch=next_batch.to_pb() if next_batch else None, batch=next_batch.to_pb() if next_batch else None,
) )
async def GenerateWithCache(self, request, context): async def Decode(self, request, context):
if len(request.batches) == 0: if len(request.batches) == 0:
raise ValueError("Must provide at least one batch") raise ValueError("Must provide at least one batch")
@ -58,12 +58,12 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
else: else:
batch = batches[0] batch = batches[0]
generated_texts, next_batch = self.model.generate_token(batch) generations, next_batch = self.model.generate_token(batch)
self.cache.set(next_batch) self.cache.set(next_batch)
return generate_pb2.GenerateWithCacheResponse( return generate_pb2.DecodeResponse(
generated_texts=[ generations=[
generated_text.to_pb() for generated_text in generated_texts generation.to_pb() for generation in generations
], ],
batch=next_batch.to_pb() if next_batch else None, batch=next_batch.to_pb() if next_batch else None,
) )