This commit is contained in:
OlivierDehaene 2023-12-11 14:55:03 +01:00 committed by Karol Damaszke
parent 79f268f95a
commit db5053fc86
7 changed files with 12 additions and 12 deletions

8
Cargo.lock generated
View File

@ -2798,7 +2798,7 @@ dependencies = [
[[package]]
name = "text-generation-benchmark"
version = "1.2.0"
version = "1.3.0"
dependencies = [
"average",
"clap",
@ -2819,7 +2819,7 @@ dependencies = [
[[package]]
name = "text-generation-client"
version = "1.2.0"
version = "1.3.0"
dependencies = [
"futures",
"grpc-metadata",
@ -2836,7 +2836,7 @@ dependencies = [
[[package]]
name = "text-generation-launcher"
version = "1.2.0"
version = "1.3.0"
dependencies = [
"clap",
"ctrlc",
@ -2852,7 +2852,7 @@ dependencies = [
[[package]]
name = "text-generation-router"
version = "1.2.0"
version = "1.3.0"
dependencies = [
"async-stream",
"axum",

View File

@ -9,7 +9,7 @@ members = [
resolver = "2"
[workspace.package]
version = "1.2.0"
version = "1.3.0"
edition = "2021"
authors = ["Olivier Dehaene"]
homepage = "https://github.com/huggingface/text-generation-inference"

View File

@ -10,7 +10,7 @@
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0"
},
"version": "1.2.0"
"version": "1.3.0"
},
"paths": {
"/": {

View File

@ -19,6 +19,6 @@ docker run --gpus all \
--shm-size 1g \
-e HUGGING_FACE_HUB_TOKEN=$token \
-p 8080:80 \
-v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2 \
-v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 \
--model-id $model
```

View File

@ -8,7 +8,7 @@ Let's say you want to deploy [Falcon-7B Instruct](https://huggingface.co/tiiuae/
model=tiiuae/falcon-7b-instruct
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2 --model-id $model
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3 --model-id $model
```
<Tip warning={true}>
@ -20,7 +20,7 @@ To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://d
TGI also supports ROCm-enabled AMD GPUs (only MI210 and MI250 are tested), details are available in the [Supported Hardware section](./supported_models#supported-hardware) and [AMD documentation](https://rocm.docs.amd.com/en/latest/deploy/docker.html). To launch TGI on ROCm GPUs, please use instead:
```bash
docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2-rocm --model-id $model
docker run --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --device=/dev/kfd --device=/dev/dri --group-add video --ipc=host --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.3-rocm --model-id $model
```
Once TGI is running, you can use the `generate` endpoint by doing requests. To learn more about how to query the endpoints, check the [Consuming TGI](./basic_tutorials/consuming_tgi) section, where we show examples with utility libraries and UIs. Below you can see a simple snippet to query the endpoint.
@ -91,7 +91,7 @@ curl 127.0.0.1:8080/generate \
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
```bash
docker run ghcr.io/huggingface/text-generation-inference:1.2 --help
docker run ghcr.io/huggingface/text-generation-inference:1.3 --help
```
</Tip>

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "text-generation-integration-tests"
version = "1.2.0"
version = "1.3.0"
description = "Text Generation Inference integration tests"
authors = ["Nicolas Patry <nicolas@huggingface.co>"]

View File

@ -1,6 +1,6 @@
[tool.poetry]
name = "text-generation-server"
version = "1.2.0"
version = "1.3.0"
description = "Text Generation Inference Python gRPC Server"
authors = ["Olivier Dehaene <olivier@huggingface.co>"]