updated release version to 2.0.6

This commit is contained in:
Thanaji 2024-10-31 23:54:34 +02:00
parent 8d84ffabf2
commit f428f5fc8a
4 changed files with 21 additions and 21 deletions

10
Cargo.lock generated
View File

@ -1528,7 +1528,7 @@ dependencies = [
[[package]] [[package]]
name = "indoc" name = "indoc"
version = "2.0.5" version = "2.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5"
@ -3552,7 +3552,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-benchmark" name = "text-generation-benchmark"
version = "2.0.4" version = "2.0.6"
dependencies = [ dependencies = [
"average", "average",
"clap", "clap",
@ -3573,7 +3573,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-client" name = "text-generation-client"
version = "2.0.4" version = "2.0.6"
dependencies = [ dependencies = [
"futures", "futures",
"grpc-metadata", "grpc-metadata",
@ -3590,7 +3590,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-launcher" name = "text-generation-launcher"
version = "2.0.4" version = "2.0.6"
dependencies = [ dependencies = [
"clap", "clap",
"ctrlc", "ctrlc",
@ -3609,7 +3609,7 @@ dependencies = [
[[package]] [[package]]
name = "text-generation-router" name = "text-generation-router"
version = "2.0.4" version = "2.0.6"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"axum", "axum",

View File

@ -9,7 +9,7 @@ members = [
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
version = "2.0.4" version = "2.0.6"
edition = "2021" edition = "2021"
authors = ["Olivier Dehaene"] authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference" homepage = "https://github.com/huggingface/text-generation-inference"

View File

@ -62,7 +62,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene
1. Pull the official Docker image with: 1. Pull the official Docker image with:
```bash ```bash
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5 docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
``` ```
> [!NOTE] > [!NOTE]
> Alternatively, you can build the Docker image using the `Dockerfile` located in this folder with: > Alternatively, you can build the Docker image using the `Dockerfile` located in this folder with:
@ -83,7 +83,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene
-e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HF_TOKEN=$hf_token \ -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HF_TOKEN=$hf_token \
-e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true \ -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true \
-e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host \ -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 --model-id $model --max-input-tokens 1024 \
--max-total-tokens 2048 --max-total-tokens 2048
``` ```
@ -97,7 +97,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene
-e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none \ -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none \
-e HF_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true \ -e HF_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true \
-e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice \ -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice \
--ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --sharded true \ --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.6 --model-id $model --sharded true \
--num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048 --num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048
``` ```
3. Wait for the TGI-Gaudi server to come online. You will see something like so: 3. Wait for the TGI-Gaudi server to come online. You will see something like so:
@ -140,7 +140,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
--max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \
@ -172,7 +172,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--sharded true --num-shard 8 \ --sharded true --num-shard 8 \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
@ -204,7 +204,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
--max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \
@ -236,7 +236,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--sharded true --num-shard 8 \ --sharded true --num-shard 8 \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
@ -268,7 +268,7 @@ docker run -p 8080:80 \
-e BATCH_BUCKET_SIZE=1 \ -e BATCH_BUCKET_SIZE=1 \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
--max-total-tokens 8192 --max-batch-total-tokens 32768 --max-total-tokens 8192 --max-batch-total-tokens 32768
@ -319,7 +319,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
--max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \
@ -354,7 +354,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--sharded true --num-shard 8 \ --sharded true --num-shard 8 \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
@ -390,7 +390,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
--max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \
@ -425,7 +425,7 @@ docker run -p 8080:80 \
-e FLASH_ATTENTION_RECOMPUTE=true \ -e FLASH_ATTENTION_RECOMPUTE=true \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--sharded true --num-shard 8 \ --sharded true --num-shard 8 \
--max-input-length 1024 --max-total-tokens 2048 \ --max-input-length 1024 --max-total-tokens 2048 \
@ -458,7 +458,7 @@ docker run -p 8080:80 \
-e BATCH_BUCKET_SIZE=1 \ -e BATCH_BUCKET_SIZE=1 \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
--max-total-tokens 8192 --max-batch-total-tokens 32768 --max-total-tokens 8192 --max-batch-total-tokens 32768
@ -489,7 +489,7 @@ docker run -p 8080:80 \
-e BATCH_BUCKET_SIZE=1 \ -e BATCH_BUCKET_SIZE=1 \
--cap-add=sys_nice \ --cap-add=sys_nice \
--ipc=host \ --ipc=host \
ghcr.io/huggingface/tgi-gaudi:2.0.5 \ ghcr.io/huggingface/tgi-gaudi:2.0.6 \
--model-id $model \ --model-id $model \
--sharded true --num-shard 8 \ --sharded true --num-shard 8 \
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "text-generation-server" name = "text-generation-server"
version = "2.0.4" version = "2.0.6"
description = "Text Generation Inference Python gRPC Server" description = "Text Generation Inference Python gRPC Server"
authors = ["Olivier Dehaene <olivier@huggingface.co>"] authors = ["Olivier Dehaene <olivier@huggingface.co>"]