mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-25 03:52:08 +00:00
* Add llamacpp backend Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Get rid of llama_batch_get_one() Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Use max_batch_total_tokens Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Handle max_batch_size Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add some input validation checks Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Handle ctx args & fix sampling Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add GPU args Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add --defrag-threshold Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add a stupid batch mechanism Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Cleanup Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add --numa Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix args Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Enable flash attention by default Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add --offload-kqv Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix batch_pos Signed-off-by: Adrien Gallouët <angt@huggingface.co> * backend(llama): add CUDA Dockerfile_llamacpp for now * Only export the latest logits Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Output real logprobs Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix batching Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix seq iterations Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Auto-detect n_threads when not provided Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Clear request cache after completion Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Remove warmup Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Cleanup Signed-off-by: Adrien Gallouët <angt@huggingface.co> * backend(llama): add CUDA architectures build argument for Dockerfile * Add specific args for batch Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add --type-v & --type-k Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Bump llamacpp to b4623 Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Disable graceful shutdown in debug mode Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update Dockerfile_llamacpp Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Cleanup Dockerfile Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update Cargo.lock Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update args Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Simplify batching logic Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Set TGI_LLAMA_PKG_CUDA from CUDA_VERSION Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Rename bindings Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Remove n_ctx Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Make max_batch_total_tokens optional Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Ensure all samplers are freed on error Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Initialize penalty_last_n with llamacpp default value Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Cleanup Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Improve default settings Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add doc Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update docs Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Thanks clippy Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Thanks cargo fmt Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update docs Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Do not use HOSTNAME env Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Bump llama.cpp & cuda Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix requirements.txt Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix fmt Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Enable KQV offload by default Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Remove Ngrok tunneling Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Remove .cargo/config.toml Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix Dockerfile Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add missing cuda prefix Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Handle custom llama.cpp dir Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Cleanup Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add README.md Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add HF transfer Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix bool args Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update doc Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update doc Signed-off-by: Adrien Gallouët <angt@huggingface.co> --------- Signed-off-by: Adrien Gallouët <angt@huggingface.co> Co-authored-by: Morgan Funtowicz <funtowiczmo@gmail.com>
285 lines
8.5 KiB
Rust
285 lines
8.5 KiB
Rust
mod backend;
|
|
|
|
use backend::{
|
|
BackendError, LlamacppBackend, LlamacppConfig, LlamacppGGMLType, LlamacppNuma,
|
|
LlamacppSplitMode,
|
|
};
|
|
use clap::Parser;
|
|
use text_generation_router::{logging, server, usage_stats};
|
|
use thiserror::Error;
|
|
use tokenizers::{FromPretrainedParameters, Tokenizer};
|
|
use tokio::sync::oneshot::error::RecvError;
|
|
use tracing::{error, warn};
|
|
|
|
/// Backend Configuration
|
|
#[derive(Parser, Debug)]
|
|
#[clap(author, version, about, long_about = None)]
|
|
struct Args {
|
|
/// Name of the model to load.
|
|
#[clap(long, env)]
|
|
model_id: String,
|
|
|
|
/// Revision of the model.
|
|
#[clap(default_value = "main", long, env)]
|
|
revision: String,
|
|
|
|
/// Path to the GGUF model file for inference.
|
|
#[clap(long, env)]
|
|
model_gguf: String, // TODO Option() with hf->gguf & quantize
|
|
|
|
/// Number of threads to use for generation.
|
|
#[clap(long, env)]
|
|
n_threads: Option<usize>,
|
|
|
|
/// Number of threads to use for batch processing.
|
|
#[clap(long, env)]
|
|
n_threads_batch: Option<usize>,
|
|
|
|
/// Number of layers to store in VRAM.
|
|
#[clap(default_value = "0", long, env)]
|
|
n_gpu_layers: usize,
|
|
|
|
/// Split the model across multiple GPUs.
|
|
#[clap(default_value = "layer", long, env)]
|
|
split_mode: LlamacppSplitMode,
|
|
|
|
/// Defragment the KV cache if holes/size > threshold.
|
|
#[clap(default_value = "-1.0", long, env)]
|
|
defrag_threshold: f32,
|
|
|
|
/// Enable NUMA optimizations.
|
|
#[clap(default_value = "disabled", value_enum, long, env)]
|
|
numa: LlamacppNuma,
|
|
|
|
/// Use memory mapping for the model.
|
|
#[clap(long, env)]
|
|
use_mmap: bool,
|
|
|
|
/// Use memory locking to prevent swapping.
|
|
#[clap(long, env)]
|
|
use_mlock: bool,
|
|
|
|
/// Enable offloading of KQV operations to the GPU.
|
|
#[clap(long, env)]
|
|
offload_kqv: bool,
|
|
|
|
/// Enable flash attention for faster inference. (EXPERIMENTAL)
|
|
#[clap(long, env)]
|
|
flash_attention: bool,
|
|
|
|
/// Data type used for K cache.
|
|
#[clap(default_value = "f16", value_enum, long, env)]
|
|
type_k: LlamacppGGMLType,
|
|
|
|
/// Data type used for V cache.
|
|
#[clap(default_value = "f16", value_enum, long, env)]
|
|
type_v: LlamacppGGMLType,
|
|
|
|
/// Number of tokenizer workers used for payload validation and truncation.
|
|
#[clap(default_value = "2", long, env)]
|
|
validation_workers: usize,
|
|
|
|
/// Maximum number of concurrent requests.
|
|
#[clap(long, env)]
|
|
max_concurrent_requests: Option<usize>,
|
|
|
|
/// Maximum number of input tokens per request.
|
|
#[clap(default_value = "1024", long, env)]
|
|
max_input_tokens: usize,
|
|
|
|
/// Maximum number of total tokens (input + output) per request.
|
|
#[clap(default_value = "2048", long, env)]
|
|
max_total_tokens: usize,
|
|
|
|
/// Maximum number of tokens in a batch.
|
|
#[clap(long, env)]
|
|
max_batch_total_tokens: Option<usize>,
|
|
|
|
/// Maximum number of tokens in a physical batch.
|
|
#[clap(long, env)]
|
|
max_physical_batch_total_tokens: Option<usize>,
|
|
|
|
/// Maximum number of requests per batch.
|
|
#[clap(long, env)]
|
|
max_batch_size: Option<usize>,
|
|
|
|
/// IP address to listen on.
|
|
#[clap(default_value = "0.0.0.0", long)]
|
|
hostname: String,
|
|
|
|
/// Port to listen on.
|
|
#[clap(default_value = "3000", long, short, env)]
|
|
port: u16,
|
|
|
|
/// Enable JSON output format.
|
|
#[clap(long, env)]
|
|
json_output: bool,
|
|
|
|
/// OTLP endpoint for telemetry data.
|
|
#[clap(long, env)]
|
|
otlp_endpoint: Option<String>,
|
|
|
|
/// Service name for OTLP telemetry.
|
|
#[clap(default_value = "text-generation-inference.router", long, env)]
|
|
otlp_service_name: String,
|
|
|
|
/// Allowed origins for CORS.
|
|
#[clap(long, env)]
|
|
cors_allow_origin: Option<Vec<String>>,
|
|
|
|
/// Path to the tokenizer configuration file.
|
|
#[clap(long, env)]
|
|
tokenizer_config_path: Option<String>,
|
|
|
|
/// Disable grammar support.
|
|
#[clap(long, env)]
|
|
disable_grammar_support: bool,
|
|
|
|
/// Maximum number of inputs per request.
|
|
#[clap(default_value = "4", long, env)]
|
|
max_client_batch_size: usize,
|
|
|
|
/// Level of usage statistics collection.
|
|
#[clap(default_value = "on", long, env)]
|
|
usage_stats: usage_stats::UsageStatsLevel,
|
|
|
|
/// Maximum payload size in bytes.
|
|
#[clap(default_value = "2000000", long, env)]
|
|
payload_limit: usize,
|
|
}
|
|
|
|
#[tokio::main]
|
|
async fn main() -> Result<(), RouterError> {
|
|
let args = Args::parse();
|
|
|
|
logging::init_logging(args.otlp_endpoint, args.otlp_service_name, args.json_output);
|
|
|
|
let n_threads = match args.n_threads {
|
|
Some(0) | None => num_cpus::get(),
|
|
Some(threads) => threads,
|
|
};
|
|
let n_threads_batch = match args.n_threads_batch {
|
|
Some(0) | None => n_threads,
|
|
Some(threads) => threads,
|
|
};
|
|
let max_batch_size = match args.max_batch_size {
|
|
Some(0) | None => n_threads_batch,
|
|
Some(threads) => threads,
|
|
};
|
|
let max_batch_total_tokens = match args.max_batch_total_tokens {
|
|
None => max_batch_size * args.max_total_tokens,
|
|
Some(size) => size,
|
|
};
|
|
let max_physical_batch_total_tokens = match args.max_physical_batch_total_tokens {
|
|
None => max_batch_total_tokens,
|
|
Some(size) => size,
|
|
};
|
|
let max_concurrent_requests = match args.max_concurrent_requests {
|
|
None => max_batch_size * 2,
|
|
Some(size) => size,
|
|
};
|
|
if args.max_input_tokens >= args.max_total_tokens {
|
|
return Err(RouterError::ArgumentValidation(
|
|
"`max_input_tokens` must be < `max_total_tokens`".to_string(),
|
|
));
|
|
}
|
|
if args.max_total_tokens > max_batch_total_tokens {
|
|
return Err(RouterError::ArgumentValidation(
|
|
"`max_total_tokens` must be <= `max_batch_total_tokens`".to_string(),
|
|
));
|
|
}
|
|
if max_batch_size * args.max_total_tokens > max_batch_total_tokens {
|
|
return Err(RouterError::ArgumentValidation(
|
|
"`max_batch_size` * `max_total_tokens` must be <= `max_batch_total_tokens`".to_string(),
|
|
));
|
|
}
|
|
|
|
// TODO: check if we use the same cache of Server
|
|
// check if llamacpp is faster
|
|
let tokenizer = {
|
|
let token = std::env::var("HF_TOKEN")
|
|
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
|
|
.ok();
|
|
let params = FromPretrainedParameters {
|
|
revision: args.revision.clone(),
|
|
token,
|
|
..Default::default()
|
|
};
|
|
Tokenizer::from_pretrained(args.model_id.clone(), Some(params))?
|
|
};
|
|
|
|
let (backend, ok, shutdown) = LlamacppBackend::new(
|
|
LlamacppConfig {
|
|
model_gguf: args.model_gguf,
|
|
n_threads,
|
|
n_threads_batch,
|
|
n_gpu_layers: args.n_gpu_layers,
|
|
split_mode: args.split_mode,
|
|
defrag_threshold: args.defrag_threshold,
|
|
numa: args.numa,
|
|
use_mmap: args.use_mmap,
|
|
use_mlock: args.use_mlock,
|
|
flash_attention: args.flash_attention,
|
|
type_k: args.type_k,
|
|
type_v: args.type_v,
|
|
offload_kqv: args.offload_kqv,
|
|
max_batch_total_tokens,
|
|
max_physical_batch_total_tokens,
|
|
max_batch_size,
|
|
batch_timeout: tokio::time::Duration::from_millis(5),
|
|
},
|
|
tokenizer,
|
|
);
|
|
ok.await??;
|
|
|
|
if cfg!(debug_assertions) {
|
|
warn!("Graceful shutdown disabled!");
|
|
let _ = tokio::task::spawn(async move {
|
|
let _ = tokio::signal::ctrl_c().await;
|
|
let _ = shutdown.send(true);
|
|
});
|
|
}
|
|
|
|
server::run(
|
|
backend,
|
|
max_concurrent_requests,
|
|
0, // max_best_of
|
|
0, // max_stop_sequences
|
|
0, // max_top_n_tokens
|
|
args.max_input_tokens,
|
|
args.max_total_tokens,
|
|
args.validation_workers,
|
|
None, // api_key
|
|
args.model_id, // tokenizer_name
|
|
args.tokenizer_config_path,
|
|
Some(args.revision),
|
|
false, // trust_remote_code
|
|
args.hostname,
|
|
args.port,
|
|
args.cors_allow_origin,
|
|
false, // ngrok,
|
|
None, // ngrok_authtoken,
|
|
None, // ngrok_edge,
|
|
args.disable_grammar_support,
|
|
args.max_client_batch_size,
|
|
args.usage_stats,
|
|
args.payload_limit,
|
|
)
|
|
.await?;
|
|
Ok(())
|
|
}
|
|
|
|
#[derive(Debug, Error)]
|
|
enum RouterError {
|
|
#[error("Argument validation error: {0}")]
|
|
ArgumentValidation(String),
|
|
#[error("Tokenizer error: {0}")]
|
|
Tokenizer(#[from] tokenizers::Error),
|
|
#[error("Backend error: {0}")]
|
|
Backend(#[from] BackendError),
|
|
#[error("WebServer error: {0}")]
|
|
WebServer(#[from] server::WebServerError),
|
|
#[error("Recv error: {0}")]
|
|
RecvError(#[from] RecvError),
|
|
}
|