From 042791fbd5742b1644d42c493db6bec669df6537 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 10 Dec 2024 01:12:42 +0530 Subject: [PATCH] Prep new version (#2810) * New version. * Link fixup. * Update docs. * FIxup. --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- README.md | 6 +++--- docs/openapi.json | 2 +- docs/source/basic_tutorials/gated_model_access.md | 2 +- docs/source/conceptual/chunking.md | 2 +- docs/source/conceptual/quantization.md | 6 +++--- docs/source/installation_amd.md | 2 +- docs/source/installation_intel.md | 4 ++-- docs/source/installation_nvidia.md | 2 +- docs/source/quicktour.md | 4 ++-- docs/source/reference/api_reference.md | 2 +- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 72f70fdc5..9551ae2d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4367,7 +4367,7 @@ dependencies = [ [[package]] name = "text-generation-backends-trtllm" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "async-stream", "async-trait", @@ -4391,7 +4391,7 @@ dependencies = [ [[package]] name = "text-generation-benchmark" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "average", "clap 4.5.21", @@ -4411,7 +4411,7 @@ dependencies = [ [[package]] name = "text-generation-client" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "async-trait", "base64 0.22.1", @@ -4429,7 +4429,7 @@ dependencies = [ [[package]] name = "text-generation-launcher" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "clap 4.5.21", "ctrlc", @@ -4450,7 +4450,7 @@ dependencies = [ [[package]] name = "text-generation-router" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "anyhow", "async-stream", @@ -4501,7 +4501,7 @@ dependencies = [ [[package]] name = "text-generation-router-v2" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "async-stream", "async-trait", @@ -4550,7 +4550,7 @@ dependencies = [ [[package]] name = "text-generation-router-v3" -version = "2.4.2-dev0" +version = "3.0.1-dev0" dependencies = [ "async-stream", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 0a12b3143..806c94a04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ default-members = [ resolver = "2" [workspace.package] -version = "2.4.2-dev0" +version = "3.0.1-dev0" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" diff --git a/README.md b/README.md index 85137c1ce..631a97a2d 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ model=HuggingFaceH4/zephyr-7b-beta volume=$PWD/data docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model +3.0.0 ghcr.io/huggingface/text-generation-inference:3.0.0 --model-id $model ``` And then you can make requests like @@ -121,7 +121,7 @@ curl localhost:8080/v1/chat/completions \ **Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar. -**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1-rocm --model-id $model` instead of the command above. +**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.0.0-rocm --model-id $model` instead of the command above. To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli): ``` @@ -151,7 +151,7 @@ model=meta-llama/Meta-Llama-3.1-8B-Instruct volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run token= -docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model +docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.0.0 --model-id $model ``` ### A note on Shared Memory (shm) diff --git a/docs/openapi.json b/docs/openapi.json index f552ee08e..1caf67525 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -10,7 +10,7 @@ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, - "version": "2.4.2-dev0" + "version": "3.0.1-dev0" }, "paths": { "/": { diff --git a/docs/source/basic_tutorials/gated_model_access.md b/docs/source/basic_tutorials/gated_model_access.md index bb35db9d4..60b347b13 100644 --- a/docs/source/basic_tutorials/gated_model_access.md +++ b/docs/source/basic_tutorials/gated_model_access.md @@ -19,6 +19,6 @@ docker run --gpus all \ --shm-size 1g \ -e HF_TOKEN=$token \ -p 8080:80 \ - -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 \ + -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.0.0 \ --model-id $model ``` diff --git a/docs/source/conceptual/chunking.md b/docs/source/conceptual/chunking.md index 110d60ab8..f6489afd2 100644 --- a/docs/source/conceptual/chunking.md +++ b/docs/source/conceptual/chunking.md @@ -72,7 +72,7 @@ Long: `MODEL_ID=$MODEL_ID HOST=localhost:8000 k6 run load_tests/long.js` ### Results -![benchmarks_v3](https://github.com/huggingface/text-generation-inference/blob/main/assets/benchmarks_v3.png) +![benchmarks_v3](https://github.com/huggingface/text-generation-inference/blob/main/assets/v3_benchmarks.png) Our benchmarking results show significant performance gains, with a 13x speedup over vLLM with prefix caching, and up to 30x speedup without prefix caching. These results are consistent with our production data and demonstrate the effectiveness of our optimized LLM architecture. diff --git a/docs/source/conceptual/quantization.md b/docs/source/conceptual/quantization.md index 6edfa3e75..953e36837 100644 --- a/docs/source/conceptual/quantization.md +++ b/docs/source/conceptual/quantization.md @@ -19,7 +19,7 @@ bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models. In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇 ```bash -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize bitsandbytes +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.0.0 --model-id $model --quantize bitsandbytes ``` 4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load. @@ -27,7 +27,7 @@ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingf In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇 ```bash -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize bitsandbytes-nf4 +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.0.0 --model-id $model --quantize bitsandbytes-nf4 ``` You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes). @@ -48,7 +48,7 @@ $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$ TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇 ```bash -docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize gptq +docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.0.0 --model-id $model --quantize gptq ``` Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI. diff --git a/docs/source/installation_amd.md b/docs/source/installation_amd.md index e55c78278..3c9c0eecf 100644 --- a/docs/source/installation_amd.md +++ b/docs/source/installation_amd.md @@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ --device=/dev/kfd --device=/dev/dri --group-add video \ --ipc=host --shm-size 256g --net host -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.4.1-rocm \ + ghcr.io/huggingface/text-generation-inference:3.0.0-rocm \ --model-id $model ``` diff --git a/docs/source/installation_intel.md b/docs/source/installation_intel.md index 649f8e332..a5578e769 100644 --- a/docs/source/installation_intel.md +++ b/docs/source/installation_intel.md @@ -12,7 +12,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading docker run --rm --privileged --cap-add=sys_nice \ --device=/dev/dri \ --ipc=host --shm-size 1g --net host -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.4.1-intel-xpu \ + ghcr.io/huggingface/text-generation-inference:3.0.0-intel-xpu \ --model-id $model --cuda-graphs 0 ``` @@ -29,7 +29,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading docker run --rm --privileged --cap-add=sys_nice \ --device=/dev/dri \ --ipc=host --shm-size 1g --net host -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.4.1-intel-cpu \ + ghcr.io/huggingface/text-generation-inference:3.0.0-intel-cpu \ --model-id $model --cuda-graphs 0 ``` diff --git a/docs/source/installation_nvidia.md b/docs/source/installation_nvidia.md index 9b8df0876..d954481ed 100644 --- a/docs/source/installation_nvidia.md +++ b/docs/source/installation_nvidia.md @@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.4.1 \ + ghcr.io/huggingface/text-generation-inference:3.0.0 \ --model-id $model ``` diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index 2c6d950c0..d1f3efa4f 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.4.1 \ + ghcr.io/huggingface/text-generation-inference:3.0.0 \ --model-id $model ``` @@ -96,7 +96,7 @@ curl 127.0.0.1:8080/generate \ To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash -docker run ghcr.io/huggingface/text-generation-inference:2.4.1 --help +docker run ghcr.io/huggingface/text-generation-inference:3.0.0 --help ``` diff --git a/docs/source/reference/api_reference.md b/docs/source/reference/api_reference.md index 345328f81..42a777039 100644 --- a/docs/source/reference/api_reference.md +++ b/docs/source/reference/api_reference.md @@ -163,7 +163,7 @@ hub = { # create Hugging Face Model Class huggingface_model = HuggingFaceModel( - image_uri=get_huggingface_llm_image_uri("huggingface",version="2.4.1"), + image_uri=get_huggingface_llm_image_uri("huggingface",version="3.0.0"), env=hub, role=role, )