text-generation-inference/router/client/src/sharded_client.rs

83 lines
3.0 KiB
Rust
Raw Normal View History

2022-10-18 13:19:03 +00:00
/// Multi shard Client
2022-10-08 10:30:12 +00:00
use crate::Result;
use crate::{Batch, Client, GeneratedText};
2022-10-08 10:30:12 +00:00
use futures::future::join_all;
2022-10-22 21:40:05 +00:00
use futures::future::select_all;
2022-10-08 10:30:12 +00:00
use tonic::transport::Uri;
2022-10-18 13:19:03 +00:00
/// Text Generation Inference gRPC multi client
2022-10-08 10:30:12 +00:00
pub struct ShardedClient {
2022-10-22 21:40:05 +00:00
clients: Vec<Client>,
2022-10-08 10:30:12 +00:00
}
impl ShardedClient {
2022-10-18 13:19:03 +00:00
fn new(clients: Vec<Client>) -> Self {
2022-10-27 12:25:29 +00:00
Self { clients }
2022-10-08 10:30:12 +00:00
}
2022-10-18 13:19:03 +00:00
/// Create a new ShardedClient from a master client. The master client will communicate with
/// the other shards and returns all uris/unix sockets with the `service_discovery` gRPC method.
2022-10-17 12:59:00 +00:00
async fn from_master_client(mut master_client: Client) -> Result<Self> {
2022-10-18 13:19:03 +00:00
// Get all uris/unix sockets from the master client
2022-10-08 10:30:12 +00:00
let uris = master_client.service_discovery().await.unwrap();
2022-10-18 13:19:03 +00:00
let futures = uris.into_iter().map(Client::connect_uds);
2022-10-17 12:59:00 +00:00
let clients: Result<Vec<Client>> = join_all(futures).await.into_iter().collect();
Ok(Self::new(clients?))
2022-10-08 10:30:12 +00:00
}
2022-10-18 13:19:03 +00:00
/// Returns a client connected to the given uri
2022-10-17 12:59:00 +00:00
pub async fn connect(uri: Uri) -> Result<Self> {
let master_client = Client::connect(uri).await?;
2022-10-08 10:30:12 +00:00
Self::from_master_client(master_client).await
}
2022-10-17 12:59:00 +00:00
/// Returns a client connected to the given unix socket
pub async fn connect_uds(path: String) -> Result<Self> {
let master_client = Client::connect_uds(path).await?;
2022-10-08 10:30:12 +00:00
Self::from_master_client(master_client).await
}
2022-10-18 13:19:03 +00:00
/// Generate one token for each request in the given batch
///
/// Returns a list of generated texts of request that met their stopping criteria
2022-10-18 13:19:03 +00:00
/// and the next cached batch
pub async fn generate(&mut self, batch: Batch) -> Result<(Vec<GeneratedText>, Option<Batch>)> {
2022-10-22 21:40:05 +00:00
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.generate(batch.clone())))
2022-10-22 21:40:05 +00:00
.collect();
2022-10-18 13:19:03 +00:00
// As soon as we receive one response, we can return as all shards will return the same
2022-10-22 21:40:05 +00:00
let (result, _, _) = select_all(futures).await;
result
2022-10-08 10:30:12 +00:00
}
/// Generate one token for each request in the given cached batch
2022-10-18 13:19:03 +00:00
///
/// Returns a list of generated texts of request that met their stopping criteria
2022-10-18 13:19:03 +00:00
/// and the next cached batch
pub async fn generate_with_cache(
2022-10-22 21:40:05 +00:00
&mut self,
batches: Vec<Batch>,
) -> Result<(Vec<GeneratedText>, Option<Batch>)> {
2022-10-22 21:40:05 +00:00
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| Box::pin(client.generate_with_cache(batches.clone())))
2022-10-22 21:40:05 +00:00
.collect();
2022-10-18 13:19:03 +00:00
// As soon as we receive one response, we can return as all shards will return the same
2022-10-22 21:40:05 +00:00
let (result, _, _) = select_all(futures).await;
result
}
/// Clear the past generations cache
pub async fn clear_cache(&mut self) -> Result<()> {
let futures: Vec<_> = self
.clients
.iter_mut()
.map(|client| client.clear_cache())
.collect();
join_all(futures).await.into_iter().collect()
}
2022-10-08 10:30:12 +00:00
}