mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
* update transformres * Upgrading the nix deps too. * Forcing torchvision to be in there. * Fixing bug in mllama. * Those tests cannot be run in CI. * Lint. --------- Co-authored-by: Pedro Cuenca <pedro@huggingface.co> Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
110 lines
2.5 KiB
TOML
110 lines
2.5 KiB
TOML
[project]
|
|
name = "text-generation-server"
|
|
version = "2.0.5-dev0"
|
|
description = "Text Generation Inference Python gRPC Server"
|
|
readme = "README.md"
|
|
requires-python = ">=3.9"
|
|
authors = [
|
|
{name = "Olivier Dehaene", email = "olivier@huggingface.co"},
|
|
{name = "Nicolas Patry", email = "nicolas@huggingface.co"},
|
|
]
|
|
dependencies = [
|
|
"einops>=0.8.0",
|
|
"grpc-interceptor>=0.15.4",
|
|
"grpcio>=1.67.0",
|
|
"grpcio-reflection>=1.67.0",
|
|
"grpcio-status>=1.67.0",
|
|
"kernels>=0.2.1",
|
|
"hf-transfer>=0.1.8",
|
|
"loguru>=0.7.3",
|
|
"numpy>=1.26,<3",
|
|
"opentelemetry-api>=1.27.0",
|
|
"opentelemetry-exporter-otlp>=1.27.0",
|
|
"opentelemetry-instrumentation-grpc>=0.50b0",
|
|
"pillow>=11.1.0",
|
|
"prometheus-client>=0.21.0",
|
|
"protobuf>=5.28.3",
|
|
"py-cpuinfo>=9.0.0",
|
|
"rich>=13.8.1",
|
|
"safetensors>=0.4.5",
|
|
"scipy>=1.13.1",
|
|
"sentencepiece>=0.2.0",
|
|
"tokenizers>=0.20.3",
|
|
"typer>=0.15.1",
|
|
"transformers>=4.51.0",
|
|
"huggingface-hub>=0.30.1",
|
|
"hf-xet>=1.0.0",
|
|
]
|
|
|
|
[[tool.uv.index]]
|
|
name = "pytorch-cu124"
|
|
url = "https://download.pytorch.org/whl/cu124"
|
|
explicit = true
|
|
|
|
[tool.uv.sources]
|
|
torch = [
|
|
{ index = "pytorch-cu124", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
torchvision = [
|
|
{ index = "pytorch-cu124", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
|
]
|
|
|
|
[build-system]
|
|
requires = ["kernels>=0.1.7", "setuptools"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[tool.kernels.dependencies]
|
|
"kernels-community/paged-attention" = ">=0.0.2"
|
|
"kernels-community/moe" = ">=0.1.1"
|
|
"kernels-community/quantization" = ">=0.0.3"
|
|
"kernels-community/quantization-eetq" = ">=0.0.1"
|
|
"kernels-community/rotary" = ">=0.0.1"
|
|
|
|
[project.scripts]
|
|
text-generation-server = "text_generation_server.cli:app"
|
|
|
|
[project.optional-dependencies]
|
|
accelerate = [
|
|
"accelerate>=1.2.1,<2",
|
|
]
|
|
bnb = [
|
|
"bitsandbytes>=0.45.0",
|
|
]
|
|
compressed-tensors = [
|
|
"compressed-tensors>=0.9.0",
|
|
]
|
|
peft = [
|
|
"peft>=0.14.0",
|
|
]
|
|
outlines = [
|
|
"outlines>=0.1.13",
|
|
]
|
|
dev = [
|
|
"grpcio-tools>=1.51.1,<2.0",
|
|
"pytest>=7.3.0,<8"
|
|
]
|
|
quantize = [
|
|
"texttable>=1.6.7,<2",
|
|
"datasets>=2.21,<3",
|
|
]
|
|
gen = [
|
|
"grpcio-tools>=1.69.0",
|
|
"mypy-protobuf>=3.6.0",
|
|
]
|
|
torch = [
|
|
"torch==2.6.0",
|
|
"torchvision==0.21.0",
|
|
]
|
|
|
|
[tool.pytest.ini_options]
|
|
markers = ["private: marks tests as requiring an admin hf token (deselect with '-m \"not private\"')"]
|
|
|
|
[tool.isort]
|
|
profile = "black"
|
|
|
|
[tool.uv]
|
|
package = true
|
|
|
|
[tool.setuptools.packages.find]
|
|
include = ["text_generation_server*"]
|