From 659bd67fec0a874e325fc2a2afd0c2ed2af692f0 Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Fri, 31 May 2024 07:03:24 -0700 Subject: [PATCH] Update documentation version to 2.0.4 (#1980) As per title cc @Narsil --- docs/README.md | 10 ++++++++++ docs/source/basic_tutorials/gated_model_access.md | 2 +- docs/source/installation_amd.md | 2 +- docs/source/installation_nvidia.md | 2 +- docs/source/quicktour.md | 4 ++-- 5 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 docs/README.md diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..fb2ff1986 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,10 @@ +Documentation available at: https://huggingface.co/docs/text-generation-inference + +## Release + +When making a release, please update the latest version in the documentation with: +``` +export OLD_VERSION="2\.0\.3" +export NEW_VERSION="2\.0\.4" +find . -name '*.md' -exec sed -i -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \; +``` diff --git a/docs/source/basic_tutorials/gated_model_access.md b/docs/source/basic_tutorials/gated_model_access.md index 970afa0e1..b49c59c92 100644 --- a/docs/source/basic_tutorials/gated_model_access.md +++ b/docs/source/basic_tutorials/gated_model_access.md @@ -19,6 +19,6 @@ docker run --gpus all \ --shm-size 1g \ -e HUGGING_FACE_HUB_TOKEN=$token \ -p 8080:80 \ - -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.0.3 \ + -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.0.4 \ --model-id $model ``` diff --git a/docs/source/installation_amd.md b/docs/source/installation_amd.md index 636d301c0..d70953ae7 100644 --- a/docs/source/installation_amd.md +++ b/docs/source/installation_amd.md @@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ --device=/dev/kfd --device=/dev/dri --group-add video \ --ipc=host --shm-size 256g --net host -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.0.3-rocm \ + ghcr.io/huggingface/text-generation-inference:2.0.4-rocm \ --model-id $model ``` diff --git a/docs/source/installation_nvidia.md b/docs/source/installation_nvidia.md index 62e1a3d69..9077f7fd9 100644 --- a/docs/source/installation_nvidia.md +++ b/docs/source/installation_nvidia.md @@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.0.3 \ + ghcr.io/huggingface/text-generation-inference:2.0.4 \ --model-id $model ``` diff --git a/docs/source/quicktour.md b/docs/source/quicktour.md index 6137c6f6f..b84de85d5 100644 --- a/docs/source/quicktour.md +++ b/docs/source/quicktour.md @@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ - ghcr.io/huggingface/text-generation-inference:2.0.3 \ + ghcr.io/huggingface/text-generation-inference:2.0.4 \ --model-id $model ``` @@ -88,7 +88,7 @@ curl 127.0.0.1:8080/generate \ To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. ```bash -docker run ghcr.io/huggingface/text-generation-inference:2.0.3 --help +docker run ghcr.io/huggingface/text-generation-inference:2.0.4 --help ```