From 6ba3d1d6e58cd44e44f23d5e46d601648fea9442 Mon Sep 17 00:00:00 2001 From: Thanaji Rao Thakkalapelli Date: Thu, 31 Oct 2024 15:44:16 -0700 Subject: [PATCH] updated release docker image version in readme to 2.0.6 (#242) --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 7f418d17..fc5be00d 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene 1. Pull the official Docker image with: ```bash - docker pull ghcr.io/huggingface/tgi-gaudi:2.0.5 + docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6 ``` > [!NOTE] > Alternatively, you can build the Docker image using the `Dockerfile` located in this folder with: @@ -83,7 +83,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene -e OMPI_MCA_btl_vader_single_copy_mechanism=none -e HF_TOKEN=$hf_token \ -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true -e USE_FLASH_ATTENTION=true \ -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --max-input-tokens 1024 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 --model-id $model --max-input-tokens 1024 \ --max-total-tokens 2048 ``` @@ -97,7 +97,7 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none \ -e HF_TOKEN=$hf_token -e ENABLE_HPU_GRAPH=true -e LIMIT_HPU_GRAPH=true \ -e USE_FLASH_ATTENTION=true -e FLASH_ATTENTION_RECOMPUTE=true --cap-add=sys_nice \ - --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.5 --model-id $model --sharded true \ + --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.6 --model-id $model --sharded true \ --num-shard 8 --max-input-tokens 1024 --max-total-tokens 2048 ``` 3. Wait for the TGI-Gaudi server to come online. You will see something like so: @@ -140,7 +140,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -172,7 +172,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -204,7 +204,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -236,7 +236,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -268,7 +268,7 @@ docker run -p 8080:80 \ -e BATCH_BUCKET_SIZE=1 \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-total-tokens 8192 --max-batch-total-tokens 32768 @@ -319,7 +319,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -354,7 +354,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -390,7 +390,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --max-input-length 1024 --max-total-tokens 2048 \ --max-batch-prefill-tokens 2048 --max-batch-total-tokens 65536 \ @@ -425,7 +425,7 @@ docker run -p 8080:80 \ -e FLASH_ATTENTION_RECOMPUTE=true \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-length 1024 --max-total-tokens 2048 \ @@ -458,7 +458,7 @@ docker run -p 8080:80 \ -e BATCH_BUCKET_SIZE=1 \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \ --max-total-tokens 8192 --max-batch-total-tokens 32768 @@ -489,7 +489,7 @@ docker run -p 8080:80 \ -e BATCH_BUCKET_SIZE=1 \ --cap-add=sys_nice \ --ipc=host \ - ghcr.io/huggingface/tgi-gaudi:2.0.5 \ + ghcr.io/huggingface/tgi-gaudi:2.0.6 \ --model-id $model \ --sharded true --num-shard 8 \ --max-input-tokens 4096 --max-batch-prefill-tokens 16384 \