Preparing for release. (#2285)

* Preparing for release.

* Updating docs.

* Fixing token within the docker image for the launcher.
This commit is contained in:
Nicolas Patry 2024-07-23 16:20:17 +02:00 committed by yuanwu
parent 69b67b7add
commit 5390973c09
5 changed files with 16 additions and 7 deletions

View File

@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \ docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
--device=/dev/kfd --device=/dev/dri --group-add video \ --device=/dev/kfd --device=/dev/dri --group-add video \
--ipc=host --shm-size 256g --net host -v $volume:/data \ --ipc=host --shm-size 256g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.1.1-rocm \ ghcr.io/huggingface/text-generation-inference:2.2.0-rocm \
--model-id $model --model-id $model
``` ```

View File

@ -12,7 +12,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
docker run --rm --privileged --cap-add=sys_nice \ docker run --rm --privileged --cap-add=sys_nice \
--device=/dev/dri \ --device=/dev/dri \
--ipc=host --shm-size 1g --net host -v $volume:/data \ --ipc=host --shm-size 1g --net host -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:latest-intel \ ghcr.io/huggingface/text-generation-inference:2.2.0-intel \
--model-id $model --cuda-graphs 0 --model-id $model --cuda-graphs 0
``` ```

View File

@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \ docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.1.1 \ ghcr.io/huggingface/text-generation-inference:2.2.0 \
--model-id $model --model-id $model
``` ```

View File

@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
ghcr.io/huggingface/text-generation-inference:2.1.1 \ ghcr.io/huggingface/text-generation-inference:2.2.0 \
--model-id $model --model-id $model
``` ```
@ -88,7 +88,7 @@ curl 127.0.0.1:8080/generate \
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more. To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
```bash ```bash
docker run ghcr.io/huggingface/text-generation-inference:2.1.1 --help docker run ghcr.io/huggingface/text-generation-inference:2.2.0 --help
``` ```
</Tip> </Tip>

View File

@ -1,7 +1,10 @@
/// Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. /// Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
use clap::{Parser, ValueEnum}; use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType}; use hf_hub::{
api::sync::{Api, ApiBuilder},
Repo, RepoType,
};
use nix::sys::signal::{self, Signal}; use nix::sys::signal::{self, Signal};
use nix::unistd::Pid; use nix::unistd::Pid;
use serde::Deserialize; use serde::Deserialize;
@ -1415,7 +1418,13 @@ fn main() -> Result<(), LauncherError> {
let mut path = std::path::Path::new(&args.model_id).to_path_buf(); let mut path = std::path::Path::new(&args.model_id).to_path_buf();
let filename = if !path.exists() { let filename = if !path.exists() {
// Assume it's a hub id // Assume it's a hub id
let api = Api::new()?;
let api = if let Ok(token) = std::env::var("HF_TOKEN") {
// env variable has precedence over on file token.
ApiBuilder::new().with_token(Some(token)).build()?
} else {
Api::new()?
};
let repo = if let Some(ref revision) = args.revision { let repo = if let Some(ref revision) = args.revision {
api.repo(Repo::with_revision( api.repo(Repo::with_revision(
model_id, model_id,