mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-05-20 17:22:09 +00:00
* Update to Torch 2.7.0 * Try to fix typer/click issue * Pin click to fix incompatibility with typer * Fix some test outputs with slight deviations * Attempt again to sync with CI * Mamba too * Fixup mllama Also switch to `unsloth/Llama-3.2-11B-Vision-Instruct` for testing from the EU :).
112 lines
2.6 KiB
TOML
112 lines
2.6 KiB
TOML
[project]
|
|
name = "text-generation-server"
|
|
version = "2.0.5-dev0"
|
|
description = "Text Generation Inference Python gRPC Server"
|
|
readme = "README.md"
|
|
requires-python = ">=3.9"
|
|
authors = [
|
|
{name = "Olivier Dehaene", email = "olivier@huggingface.co"},
|
|
{name = "Nicolas Patry", email = "nicolas@huggingface.co"},
|
|
]
|
|
dependencies = [
|
|
# Remove explicit click dependency once typer/click are compatible again.
|
|
"click<8.2.0",
|
|
"einops>=0.8.0",
|
|
"grpc-interceptor>=0.15.4",
|
|
"grpcio>=1.67.0",
|
|
"grpcio-reflection>=1.67.0",
|
|
"grpcio-status>=1.67.0",
|
|
"kernels>=0.2.1",
|
|
"hf-transfer>=0.1.8",
|
|
"loguru>=0.7.3",
|
|
"numpy>=1.26,<3",
|
|
"opentelemetry-api>=1.27.0",
|
|
"opentelemetry-exporter-otlp>=1.27.0",
|
|
"opentelemetry-instrumentation-grpc>=0.50b0",
|
|
"pillow>=11.1.0",
|
|
"prometheus-client>=0.21.0",
|
|
"protobuf>=5.28.3",
|
|
"py-cpuinfo>=9.0.0",
|
|
"rich>=13.8.1",
|
|
"safetensors>=0.4.5",
|
|
"scipy>=1.13.1",
|
|
"sentencepiece>=0.2.0",
|
|
"tokenizers>=0.20.3",
|
|
"typer>=0.15.1",
|
|
"transformers>=4.51.0",
|
|
"huggingface-hub>=0.30.1",
|
|
"hf-xet>=1.0.0",
|
|
]
|
|
|
|
[[tool.uv.index]]
|
|
name = "pytorch-cu128"
|
|
url = "https://download.pytorch.org/whl/cu128"
|
|
explicit = true
|
|
|
|
[tool.uv.sources]
|
|
torch = [
|
|
{ index = "pytorch-cu128", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
torchvision = [
|
|
{ index = "pytorch-cu128", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
|
|
[build-system]
|
|
requires = ["kernels>=0.1.7", "setuptools"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[tool.kernels.dependencies]
|
|
"kernels-community/paged-attention" = ">=0.0.2"
|
|
"kernels-community/moe" = ">=0.1.1"
|
|
"kernels-community/quantization" = ">=0.0.3"
|
|
"kernels-community/quantization-eetq" = ">=0.0.1"
|
|
"kernels-community/rotary" = ">=0.0.1"
|
|
|
|
[project.scripts]
|
|
text-generation-server = "text_generation_server.cli:app"
|
|
|
|
[project.optional-dependencies]
|
|
accelerate = [
|
|
"accelerate>=1.2.1,<2",
|
|
]
|
|
bnb = [
|
|
"bitsandbytes>=0.45.0",
|
|
]
|
|
compressed-tensors = [
|
|
"compressed-tensors>=0.9.0",
|
|
]
|
|
peft = [
|
|
"peft>=0.14.0",
|
|
]
|
|
outlines = [
|
|
"outlines>=0.1.13",
|
|
]
|
|
dev = [
|
|
"grpcio-tools>=1.51.1,<2.0",
|
|
"pytest>=7.3.0,<8"
|
|
]
|
|
quantize = [
|
|
"texttable>=1.6.7,<2",
|
|
"datasets>=2.21,<3",
|
|
]
|
|
gen = [
|
|
"grpcio-tools>=1.69.0",
|
|
"mypy-protobuf>=3.6.0",
|
|
]
|
|
torch = [
|
|
"torch==2.7.0",
|
|
"torchvision==0.22.0",
|
|
]
|
|
|
|
[tool.pytest.ini_options]
|
|
markers = ["private: marks tests as requiring an admin hf token (deselect with '-m \"not private\"')"]
|
|
|
|
[tool.isort]
|
|
profile = "black"
|
|
|
|
[tool.uv]
|
|
package = true
|
|
|
|
[tool.setuptools.packages.find]
|
|
include = ["text_generation_server*"]
|