mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-23 07:52:06 +00:00
- Refactor code to allow supporting multiple versions of the generate.proto at the same time - Add v3/generate.proto (ISO to generate.proto for now but allow for future changes without impacting v2 backends) - Add Schedule trait to abstract queuing and batching mechanisms that will be different in the future - Add SchedulerV2/V3 impl
14 lines
418 B
Rust
14 lines
418 B
Rust
#[allow(clippy::derive_partial_eq_without_eq)]
|
|
mod pb;
|
|
|
|
mod client;
|
|
mod sharded_client;
|
|
|
|
pub use client::Client;
|
|
pub use pb::generate::v3::{
|
|
input_chunk::Chunk, Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType,
|
|
HealthResponse, Image, InfoResponse, Input, InputChunk, NextTokenChooserParameters, Request,
|
|
StoppingCriteriaParameters, Tokens,
|
|
};
|
|
pub use sharded_client::ShardedClient;
|