mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 00:12:08 +00:00
* Build faster Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Make --model-gguf optional Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Bump llama.cpp Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Enable mmap, offload_kqv & flash_attention by default Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update doc Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Better error message Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update doc Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update installed packages Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Save gguf in models/MODEL_ID/model.gguf Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Fix build with Mach-O Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Quantize without llama-quantize Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Bump llama.cpp and switch to ggml-org Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Remove make-gguf.sh Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Update Cargo.lock Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Support HF_HUB_USER_AGENT_ORIGIN Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Bump llama.cpp Signed-off-by: Adrien Gallouët <angt@huggingface.co> * Add --build-arg llamacpp_native & llamacpp_cpu_arm_arch Signed-off-by: Adrien Gallouët <angt@huggingface.co> --------- Signed-off-by: Adrien Gallouët <angt@huggingface.co>
23 lines
512 B
TOML
23 lines
512 B
TOML
[package]
|
|
name = "text-generation-router-llamacpp"
|
|
version.workspace = true
|
|
edition.workspace = true
|
|
authors.workspace = true
|
|
homepage.workspace = true
|
|
|
|
[build-dependencies]
|
|
bindgen = "0.71.1"
|
|
pkg-config = "0.3.31"
|
|
|
|
[dependencies]
|
|
async-trait = "0.1.85"
|
|
clap = "4.5.27"
|
|
hf-hub.workspace = true
|
|
num_cpus = "1.16.0"
|
|
text-generation-router = { path = "../../router" }
|
|
thiserror = "2.0.11"
|
|
tokenizers.workspace = true
|
|
tokio = { version = "1.43.0", features = ["process"] }
|
|
tokio-stream = "0.1.17"
|
|
tracing = "0.1.41"
|