Preparing for release. (#2540)

* Preparing for release.

* Upgrade version in docs.
This commit is contained in:
Nicolas Patry 2024-09-20 17:42:04 +02:00 committed by yuanwu
parent bd9675c8c7
commit 514a5a737d
7 changed files with 36 additions and 2191 deletions

File diff suppressed because it is too large Load Diff

10
docs/openapi.json.rej Normal file
View File

@ -0,0 +1,10 @@
diff a/docs/openapi.json b/docs/openapi.json (rejected hunks)
@@ -10,7 +10,7 @@
"name": "Apache 2.0",
"url": "https://www.apache.org/licenses/LICENSE-2.0"
},
- "version": "2.2.1-dev0"
+ "version": "2.3.1-dev0"
},
"paths": {
"/": {

View File

@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--device=/dev/kfd --device=/dev/dri --group-add video \
--ipc=host --shm-size 256g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0-rocm \
ghcr.io/huggingface/text-generation-inference:2.3.0-rocm \
--model-id $model
```

View File

@ -12,7 +12,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0-intel-xpu \
ghcr.io/huggingface/text-generation-inference:2.3.0-intel-xpu \
--model-id $model --cuda-graphs 0
```
@ -29,7 +29,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0-intel-cpu \
ghcr.io/huggingface/text-generation-inference:2.3.0-intel-cpu \
--model-id $model --cuda-graphs 0
```

View File

@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0 \
ghcr.io/huggingface/text-generation-inference:2.3.0 \
--model-id $model
```

View File

@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.2.0 \
ghcr.io/huggingface/text-generation-inference:2.3.0 \
--model-id $model
```

21
nix/client.nix Normal file
View File

@ -0,0 +1,21 @@
{
buildPythonPackage,
poetry-core,
huggingface-hub,
pydantic,
}:
buildPythonPackage {
name = "text-generation";
src = ../clients/python;
pyproject = true;
build-system = [ poetry-core ];
dependencies = [
huggingface-hub
pydantic
];
}