mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-27 21:12:07 +00:00
Merge branch 'huggingface:main' into feature/get-trace-id-from-req-headers
This commit is contained in:
commit
ba72c188d0
837
Cargo.lock
generated
837
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -20,7 +20,7 @@ default-members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "2.4.1-dev0"
|
version = "2.4.2-dev0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["Olivier Dehaene"]
|
authors = ["Olivier Dehaene"]
|
||||||
homepage = "https://github.com/huggingface/text-generation-inference"
|
homepage = "https://github.com/huggingface/text-generation-inference"
|
||||||
|
@ -84,7 +84,7 @@ model=HuggingFaceH4/zephyr-7b-beta
|
|||||||
volume=$PWD/data
|
volume=$PWD/data
|
||||||
|
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model
|
ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
And then you can make requests like
|
And then you can make requests like
|
||||||
@ -121,7 +121,7 @@ curl localhost:8080/v1/chat/completions \
|
|||||||
|
|
||||||
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
|
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
|
||||||
|
|
||||||
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0-rocm --model-id $model` instead of the command above.
|
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1-rocm --model-id $model` instead of the command above.
|
||||||
|
|
||||||
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
|
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
|
||||||
```
|
```
|
||||||
@ -151,7 +151,7 @@ model=meta-llama/Meta-Llama-3.1-8B-Instruct
|
|||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
token=<your cli READ token>
|
token=<your cli READ token>
|
||||||
|
|
||||||
docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model
|
docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
### A note on Shared Memory (shm)
|
### A note on Shared Memory (shm)
|
||||||
|
@ -147,7 +147,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
tracing::info!("Downloading tokenizer");
|
tracing::info!("Downloading tokenizer");
|
||||||
|
|
||||||
// Parse Huggingface hub token
|
// Parse Huggingface hub token
|
||||||
let auth_token = std::env::var("HF_TOKEN")
|
let token = std::env::var("HF_TOKEN")
|
||||||
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
|
.or_else(|_| std::env::var("HUGGING_FACE_HUB_TOKEN"))
|
||||||
.ok();
|
.ok();
|
||||||
|
|
||||||
@ -155,7 +155,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
// We need to download it outside of the Tokio runtime
|
// We need to download it outside of the Tokio runtime
|
||||||
let params = FromPretrainedParameters {
|
let params = FromPretrainedParameters {
|
||||||
revision,
|
revision,
|
||||||
auth_token,
|
token,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap()
|
Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap()
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
"name": "Apache 2.0",
|
"name": "Apache 2.0",
|
||||||
"url": "https://www.apache.org/licenses/LICENSE-2.0"
|
"url": "https://www.apache.org/licenses/LICENSE-2.0"
|
||||||
},
|
},
|
||||||
"version": "2.4.1-dev0"
|
"version": "2.4.2-dev0"
|
||||||
},
|
},
|
||||||
"paths": {
|
"paths": {
|
||||||
"/": {
|
"/": {
|
||||||
|
@ -19,6 +19,6 @@ docker run --gpus all \
|
|||||||
--shm-size 1g \
|
--shm-size 1g \
|
||||||
-e HF_TOKEN=$token \
|
-e HF_TOKEN=$token \
|
||||||
-p 8080:80 \
|
-p 8080:80 \
|
||||||
-v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 \
|
-v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
@ -19,7 +19,7 @@ bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models.
|
|||||||
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
|
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model --quantize bitsandbytes
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize bitsandbytes
|
||||||
```
|
```
|
||||||
|
|
||||||
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
|
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
|
||||||
@ -27,7 +27,7 @@ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingf
|
|||||||
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
|
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model --quantize bitsandbytes-nf4
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize bitsandbytes-nf4
|
||||||
```
|
```
|
||||||
|
|
||||||
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
|
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
|
||||||
@ -48,7 +48,7 @@ $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$
|
|||||||
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
|
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.0 --model-id $model --quantize gptq
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:2.4.1 --model-id $model --quantize gptq
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.
|
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.
|
||||||
|
@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||||
--device=/dev/kfd --device=/dev/dri --group-add video \
|
--device=/dev/kfd --device=/dev/dri --group-add video \
|
||||||
--ipc=host --shm-size 256g --net host -v $volume:/data \
|
--ipc=host --shm-size 256g --net host -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:2.4.0-rocm \
|
ghcr.io/huggingface/text-generation-inference:2.4.1-rocm \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
docker run --rm --privileged --cap-add=sys_nice \
|
docker run --rm --privileged --cap-add=sys_nice \
|
||||||
--device=/dev/dri \
|
--device=/dev/dri \
|
||||||
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:2.4.0-intel-xpu \
|
ghcr.io/huggingface/text-generation-inference:2.4.1-intel-xpu \
|
||||||
--model-id $model --cuda-graphs 0
|
--model-id $model --cuda-graphs 0
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -29,7 +29,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
docker run --rm --privileged --cap-add=sys_nice \
|
docker run --rm --privileged --cap-add=sys_nice \
|
||||||
--device=/dev/dri \
|
--device=/dev/dri \
|
||||||
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu \
|
ghcr.io/huggingface/text-generation-inference:2.4.1-intel-cpu \
|
||||||
--model-id $model --cuda-graphs 0
|
--model-id $model --cuda-graphs 0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
|
|||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
|
|
||||||
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:2.4.0 \
|
ghcr.io/huggingface/text-generation-inference:2.4.1 \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
|
|||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
|
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:2.4.0 \
|
ghcr.io/huggingface/text-generation-inference:2.4.1 \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ curl 127.0.0.1:8080/generate \
|
|||||||
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
|
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run ghcr.io/huggingface/text-generation-inference:2.4.0 --help
|
docker run ghcr.io/huggingface/text-generation-inference:2.4.1 --help
|
||||||
```
|
```
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
@ -163,7 +163,7 @@ hub = {
|
|||||||
|
|
||||||
# create Hugging Face Model Class
|
# create Hugging Face Model Class
|
||||||
huggingface_model = HuggingFaceModel(
|
huggingface_model = HuggingFaceModel(
|
||||||
image_uri=get_huggingface_llm_image_uri("huggingface",version="2.4.0"),
|
image_uri=get_huggingface_llm_image_uri("huggingface",version="2.4.1"),
|
||||||
env=hub,
|
env=hub,
|
||||||
role=role,
|
role=role,
|
||||||
)
|
)
|
||||||
|
30
flake.lock
30
flake.lock
@ -108,11 +108,11 @@
|
|||||||
"pre-commit-hooks": "pre-commit-hooks_3"
|
"pre-commit-hooks": "pre-commit-hooks_3"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730277369,
|
"lastModified": 1732039290,
|
||||||
"narHash": "sha256-yvQbeJbnnwCB68yv7uZXdGb+P7NMn5JMGBw0aBHymDI=",
|
"narHash": "sha256-LQKY7bShf2H9kJouxa9ZspfdrulnZF9o4kLTqGqCDYM=",
|
||||||
"owner": "nix-community",
|
"owner": "nix-community",
|
||||||
"repo": "crate2nix",
|
"repo": "crate2nix",
|
||||||
"rev": "151122427d030874ebef3517cda766a6984e6ed6",
|
"rev": "9ff208ce7f5a482272b1bcefbe363c772d7ff914",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -479,11 +479,11 @@
|
|||||||
"systems": "systems_6"
|
"systems": "systems_6"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1726560853,
|
"lastModified": 1731533236,
|
||||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -581,11 +581,11 @@
|
|||||||
},
|
},
|
||||||
"nix-filter": {
|
"nix-filter": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730207686,
|
"lastModified": 1731533336,
|
||||||
"narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=",
|
"narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "nix-filter",
|
"repo": "nix-filter",
|
||||||
"rev": "776e68c1d014c3adde193a18db9d738458cd2ba4",
|
"rev": "f7653272fd234696ae94229839a99b73c9ab7de0",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -853,11 +853,11 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1730687492,
|
"lastModified": 1732242723,
|
||||||
"narHash": "sha256-xQVadjquBA/tFxDt5A55LJ1D1AvkVWsnrKC2o+pr8F4=",
|
"narHash": "sha256-NWI8csIK0ujFlFuEXKnoc+7hWoCiEtINK9r48LUUMeU=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "41814763a2c597755b0755dbe3e721367a5e420f",
|
"rev": "a229311fcb45b88a95fdfa5cecd8349c809a272a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -978,11 +978,11 @@
|
|||||||
"nixpkgs": "nixpkgs_6"
|
"nixpkgs": "nixpkgs_6"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1732187990,
|
"lastModified": 1732218602,
|
||||||
"narHash": "sha256-93xEH3aUs6+D5Kab9DGBUX9vrEpwhm839wdp2yCg9hI=",
|
"narHash": "sha256-BElslL34KjOJCFMPkNtilOz6S/7iY7Vd72FNbRRWKDY=",
|
||||||
"owner": "huggingface",
|
"owner": "huggingface",
|
||||||
"repo": "text-generation-inference-nix",
|
"repo": "text-generation-inference-nix",
|
||||||
"rev": "f25a1cd889a6ae49c1e204232500005f82241a8b",
|
"rev": "f79638ac4e420e661321261744e745a3a747e182",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "Qwen/Qwen2-VL-7B-Instruct",
|
"model": "Qwen/Qwen2-VL-7B-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 58,
|
"completion_tokens": 58,
|
||||||
"prompt_tokens": 349,
|
"prompt_tokens": 349,
|
||||||
|
@ -15,6 +15,6 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "Qwen/Qwen2-VL-7B-Instruct",
|
"model": "Qwen/Qwen2-VL-7B-Instruct",
|
||||||
"object": "chat.completion.chunk",
|
"object": "chat.completion.chunk",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": null
|
"usage": null
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 50,
|
"prompt_tokens": 50,
|
||||||
@ -44,7 +44,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 50,
|
"prompt_tokens": 50,
|
||||||
@ -70,7 +70,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 50,
|
"prompt_tokens": 50,
|
||||||
@ -96,7 +96,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 50,
|
"prompt_tokens": 50,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 50,
|
"prompt_tokens": 50,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 23,
|
"completion_tokens": 23,
|
||||||
"prompt_tokens": 604,
|
"prompt_tokens": 604,
|
||||||
|
@ -15,6 +15,6 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
"object": "chat.completion.chunk",
|
"object": "chat.completion.chunk",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": null
|
"usage": null
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,6 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
"model": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
"object": "chat.completion.chunk",
|
"object": "chat.completion.chunk",
|
||||||
"system_fingerprint": "2.4.1-dev0-native",
|
"system_fingerprint": "2.4.2-dev0-native",
|
||||||
"usage": null
|
"usage": null
|
||||||
}
|
}
|
||||||
|
@ -24,10 +24,12 @@ class InferenceEngineRunner:
|
|||||||
|
|
||||||
|
|
||||||
class TGIDockerRunner(InferenceEngineRunner):
|
class TGIDockerRunner(InferenceEngineRunner):
|
||||||
def __init__(self,
|
def __init__(
|
||||||
model: str,
|
self,
|
||||||
image: str = "ghcr.io/huggingface/text-generation-inference:latest",
|
model: str,
|
||||||
volumes=None):
|
image: str = "ghcr.io/huggingface/text-generation-inference:latest",
|
||||||
|
volumes=None,
|
||||||
|
):
|
||||||
super().__init__(model)
|
super().__init__(model)
|
||||||
if volumes is None:
|
if volumes is None:
|
||||||
volumes = []
|
volumes = []
|
||||||
@ -43,13 +45,15 @@ class TGIDockerRunner(InferenceEngineRunner):
|
|||||||
volumes = {}
|
volumes = {}
|
||||||
for v in self.volumes:
|
for v in self.volumes:
|
||||||
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
|
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
|
||||||
self.container = run_docker(self.image, params,
|
self.container = run_docker(
|
||||||
"Connected",
|
self.image,
|
||||||
"ERROR",
|
params,
|
||||||
volumes=volumes,
|
"Connected",
|
||||||
gpus=gpus,
|
"ERROR",
|
||||||
ports={"8080/tcp": 8080}
|
volumes=volumes,
|
||||||
)
|
gpus=gpus,
|
||||||
|
ports={"8080/tcp": 8080},
|
||||||
|
)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
if self.container:
|
if self.container:
|
||||||
@ -57,9 +61,11 @@ class TGIDockerRunner(InferenceEngineRunner):
|
|||||||
|
|
||||||
|
|
||||||
class BenchmarkRunner:
|
class BenchmarkRunner:
|
||||||
def __init__(self,
|
def __init__(
|
||||||
image: str = "ghcr.io/huggingface/text-generation-inference-benchmark:latest",
|
self,
|
||||||
volumes: List[Tuple[str, str]] = None):
|
image: str = "ghcr.io/huggingface/text-generation-inference-benchmark:latest",
|
||||||
|
volumes: List[Tuple[str, str]] = None,
|
||||||
|
):
|
||||||
if volumes is None:
|
if volumes is None:
|
||||||
volumes = []
|
volumes = []
|
||||||
self.container = None
|
self.container = None
|
||||||
@ -70,26 +76,41 @@ class BenchmarkRunner:
|
|||||||
params = "text-generation-inference-benchmark"
|
params = "text-generation-inference-benchmark"
|
||||||
for p in parameters:
|
for p in parameters:
|
||||||
params += f" --{p[0]} {str(p[1])}" if p[1] is not None else f" --{p[0]}"
|
params += f" --{p[0]} {str(p[1])}" if p[1] is not None else f" --{p[0]}"
|
||||||
logger.info(f"Running text-generation-inference-benchmarks with parameters: {params}")
|
logger.info(
|
||||||
|
f"Running text-generation-inference-benchmarks with parameters: {params}"
|
||||||
|
)
|
||||||
volumes = {}
|
volumes = {}
|
||||||
for v in self.volumes:
|
for v in self.volumes:
|
||||||
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
|
volumes[v[0]] = {"bind": v[1], "mode": "rw"}
|
||||||
self.container = run_docker(self.image, params,
|
self.container = run_docker(
|
||||||
"Benchmark finished",
|
self.image,
|
||||||
"Fatal:",
|
params,
|
||||||
volumes=volumes,
|
"Benchmark finished",
|
||||||
extra_env={"RUST_LOG": "text_generation_inference_benchmark=info",
|
"Fatal:",
|
||||||
"RUST_BACKTRACE": "full"},
|
volumes=volumes,
|
||||||
network_mode=network_mode)
|
extra_env={
|
||||||
|
"RUST_LOG": "text_generation_inference_benchmark=info",
|
||||||
|
"RUST_BACKTRACE": "full",
|
||||||
|
},
|
||||||
|
network_mode=network_mode,
|
||||||
|
)
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
if self.container:
|
if self.container:
|
||||||
self.container.stop()
|
self.container.stop()
|
||||||
|
|
||||||
|
|
||||||
def run_docker(image: str, args: str, success_sentinel: str,
|
def run_docker(
|
||||||
error_sentinel: str, ports: Dict[str, int] = None, volumes=None, network_mode: str = "bridge",
|
image: str,
|
||||||
gpus: int = 0, extra_env: Dict[str, str] = None) -> Container:
|
args: str,
|
||||||
|
success_sentinel: str,
|
||||||
|
error_sentinel: str,
|
||||||
|
ports: Dict[str, int] = None,
|
||||||
|
volumes=None,
|
||||||
|
network_mode: str = "bridge",
|
||||||
|
gpus: int = 0,
|
||||||
|
extra_env: Dict[str, str] = None,
|
||||||
|
) -> Container:
|
||||||
if ports is None:
|
if ports is None:
|
||||||
ports = {}
|
ports = {}
|
||||||
if volumes is None:
|
if volumes is None:
|
||||||
@ -98,21 +119,24 @@ def run_docker(image: str, args: str, success_sentinel: str,
|
|||||||
extra_env = {}
|
extra_env = {}
|
||||||
client = docker.from_env(timeout=300)
|
client = docker.from_env(timeout=300)
|
||||||
# retrieve the GPU devices from CUDA_VISIBLE_DEVICES
|
# retrieve the GPU devices from CUDA_VISIBLE_DEVICES
|
||||||
devices = [f"{i}" for i in
|
devices = [f"{i}" for i in range(get_num_gpus())][:gpus]
|
||||||
range(get_num_gpus())][:gpus]
|
|
||||||
environment = {"HF_TOKEN": os.environ.get("HF_TOKEN")}
|
environment = {"HF_TOKEN": os.environ.get("HF_TOKEN")}
|
||||||
environment.update(extra_env)
|
environment.update(extra_env)
|
||||||
container = client.containers.run(image, args,
|
container = client.containers.run(
|
||||||
detach=True,
|
image,
|
||||||
device_requests=[
|
args,
|
||||||
docker.types.DeviceRequest(device_ids=devices,
|
detach=True,
|
||||||
capabilities=[['gpu']])
|
device_requests=(
|
||||||
] if gpus > 0 else None,
|
[docker.types.DeviceRequest(device_ids=devices, capabilities=[["gpu"]])]
|
||||||
volumes=volumes,
|
if gpus > 0
|
||||||
shm_size="1g",
|
else None
|
||||||
ports=ports,
|
),
|
||||||
network_mode=network_mode,
|
volumes=volumes,
|
||||||
environment=environment, )
|
shm_size="1g",
|
||||||
|
ports=ports,
|
||||||
|
network_mode=network_mode,
|
||||||
|
environment=environment,
|
||||||
|
)
|
||||||
for line in container.logs(stream=True):
|
for line in container.logs(stream=True):
|
||||||
print(line.decode("utf-8"), end="")
|
print(line.decode("utf-8"), end="")
|
||||||
if success_sentinel.encode("utf-8") in line:
|
if success_sentinel.encode("utf-8") in line:
|
||||||
@ -126,14 +150,14 @@ def run_docker(image: str, args: str, success_sentinel: str,
|
|||||||
def get_gpu_names() -> str:
|
def get_gpu_names() -> str:
|
||||||
gpus = GPUtil.getGPUs()
|
gpus = GPUtil.getGPUs()
|
||||||
if len(gpus) == 0:
|
if len(gpus) == 0:
|
||||||
return ''
|
return ""
|
||||||
return f'{len(gpus)}x{gpus[0].name if gpus else "No GPU available"}'
|
return f'{len(gpus)}x{gpus[0].name if gpus else "No GPU available"}'
|
||||||
|
|
||||||
|
|
||||||
def get_gpu_name() -> str:
|
def get_gpu_name() -> str:
|
||||||
gpus = GPUtil.getGPUs()
|
gpus = GPUtil.getGPUs()
|
||||||
if len(gpus) == 0:
|
if len(gpus) == 0:
|
||||||
return ''
|
return ""
|
||||||
return gpus[0].name
|
return gpus[0].name
|
||||||
|
|
||||||
|
|
||||||
@ -147,29 +171,29 @@ def build_df(model: str, data_files: dict[str, str]) -> pd.DataFrame:
|
|||||||
created_at = now.isoformat() # '2024-10-02T11:53:17.026215+00:00'
|
created_at = now.isoformat() # '2024-10-02T11:53:17.026215+00:00'
|
||||||
# Load the results
|
# Load the results
|
||||||
for key, filename in data_files.items():
|
for key, filename in data_files.items():
|
||||||
with open(filename, 'r') as f:
|
with open(filename, "r") as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
for result in data['results']:
|
for result in data["results"]:
|
||||||
entry = result
|
entry = result
|
||||||
[config] = pd.json_normalize(result['config']).to_dict(orient='records')
|
[config] = pd.json_normalize(result["config"]).to_dict(orient="records")
|
||||||
entry.update(config)
|
entry.update(config)
|
||||||
entry['engine'] = data['config']['meta']['engine']
|
entry["engine"] = data["config"]["meta"]["engine"]
|
||||||
entry['tp'] = data['config']['meta']['tp']
|
entry["tp"] = data["config"]["meta"]["tp"]
|
||||||
entry['version'] = data['config']['meta']['version']
|
entry["version"] = data["config"]["meta"]["version"]
|
||||||
entry['model'] = model
|
entry["model"] = model
|
||||||
entry['created_at'] = created_at
|
entry["created_at"] = created_at
|
||||||
del entry['config']
|
del entry["config"]
|
||||||
df = pd.concat([df, pd.DataFrame(entry, index=[0])])
|
df = pd.concat([df, pd.DataFrame(entry, index=[0])])
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
|
||||||
def main(sha, results_file):
|
def main(sha, results_file):
|
||||||
results_dir = 'results'
|
results_dir = "results"
|
||||||
# get absolute path
|
# get absolute path
|
||||||
results_dir = os.path.join(os.path.dirname(__file__), results_dir)
|
results_dir = os.path.join(os.path.dirname(__file__), results_dir)
|
||||||
logger.info('Starting benchmark')
|
logger.info("Starting benchmark")
|
||||||
models = [
|
models = [
|
||||||
('meta-llama/Llama-3.1-8B-Instruct', 1),
|
("meta-llama/Llama-3.1-8B-Instruct", 1),
|
||||||
# ('meta-llama/Llama-3.1-70B-Instruct', 4),
|
# ('meta-llama/Llama-3.1-70B-Instruct', 4),
|
||||||
# ('mistralai/Mixtral-8x7B-Instruct-v0.1', 2),
|
# ('mistralai/Mixtral-8x7B-Instruct-v0.1', 2),
|
||||||
]
|
]
|
||||||
@ -177,31 +201,42 @@ def main(sha, results_file):
|
|||||||
for model in models:
|
for model in models:
|
||||||
tgi_runner = TGIDockerRunner(model[0])
|
tgi_runner = TGIDockerRunner(model[0])
|
||||||
# create results directory
|
# create results directory
|
||||||
model_dir = os.path.join(results_dir, f'{model[0].replace("/", "_").replace(".", "_")}')
|
model_dir = os.path.join(
|
||||||
|
results_dir, f'{model[0].replace("/", "_").replace(".", "_")}'
|
||||||
|
)
|
||||||
os.makedirs(model_dir, exist_ok=True)
|
os.makedirs(model_dir, exist_ok=True)
|
||||||
runner = BenchmarkRunner(
|
runner = BenchmarkRunner(
|
||||||
volumes=[(model_dir, '/opt/text-generation-inference-benchmark/results')]
|
volumes=[(model_dir, "/opt/text-generation-inference-benchmark/results")]
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
tgi_runner.run([('max-concurrent-requests', 512)], gpus=model[1])
|
tgi_runner.run([("max-concurrent-requests", 512)], gpus=model[1])
|
||||||
logger.info(f'TGI started for model {model[0]}')
|
logger.info(f"TGI started for model {model[0]}")
|
||||||
parameters = [
|
parameters = [
|
||||||
('tokenizer-name', model[0]),
|
("tokenizer-name", model[0]),
|
||||||
('max-vus', 800),
|
("max-vus", 800),
|
||||||
('url', 'http://localhost:8080'),
|
("url", "http://localhost:8080"),
|
||||||
('duration', '120s'),
|
("duration", "120s"),
|
||||||
('warmup', '30s'),
|
("warmup", "30s"),
|
||||||
('benchmark-kind', 'rate'),
|
("benchmark-kind", "rate"),
|
||||||
('prompt-options', 'num_tokens=200,max_tokens=220,min_tokens=180,variance=10'),
|
(
|
||||||
('decode-options', 'num_tokens=200,max_tokens=220,min_tokens=180,variance=10'),
|
"prompt-options",
|
||||||
('extra-meta', f'"engine=TGI,tp={model[1]},version={sha},gpu={get_gpu_name()}"'),
|
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
|
||||||
('no-console', None)
|
),
|
||||||
|
(
|
||||||
|
"decode-options",
|
||||||
|
"num_tokens=200,max_tokens=220,min_tokens=180,variance=10",
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"extra-meta",
|
||||||
|
f'"engine=TGI,tp={model[1]},version={sha},gpu={get_gpu_name()}"',
|
||||||
|
),
|
||||||
|
("no-console", None),
|
||||||
]
|
]
|
||||||
rates = [('rates', f'{r / 10.}') for r in list(range(8, 248, 8))]
|
rates = [("rates", f"{r / 10.}") for r in list(range(8, 248, 8))]
|
||||||
parameters.extend(rates)
|
parameters.extend(rates)
|
||||||
runner.run(parameters, f'container:{tgi_runner.container.id}')
|
runner.run(parameters, f"container:{tgi_runner.container.id}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f'Error running benchmark for model {model[0]}: {e}')
|
logger.error(f"Error running benchmark for model {model[0]}: {e}")
|
||||||
# print the stack trace
|
# print the stack trace
|
||||||
print(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
success = False
|
success = False
|
||||||
@ -209,33 +244,45 @@ def main(sha, results_file):
|
|||||||
tgi_runner.stop()
|
tgi_runner.stop()
|
||||||
runner.stop()
|
runner.stop()
|
||||||
if not success:
|
if not success:
|
||||||
logger.error('Some benchmarks failed')
|
logger.error("Some benchmarks failed")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
df = pd.DataFrame()
|
df = pd.DataFrame()
|
||||||
# list recursively directories
|
# list recursively directories
|
||||||
directories = [f'{results_dir}/{d}' for d in os.listdir(results_dir) if os.path.isdir(f'{results_dir}/{d}')]
|
directories = [
|
||||||
logger.info(f'Found result directories: {directories}')
|
f"{results_dir}/{d}"
|
||||||
|
for d in os.listdir(results_dir)
|
||||||
|
if os.path.isdir(f"{results_dir}/{d}")
|
||||||
|
]
|
||||||
|
logger.info(f"Found result directories: {directories}")
|
||||||
for directory in directories:
|
for directory in directories:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
for filename in os.listdir(directory):
|
for filename in os.listdir(directory):
|
||||||
if filename.endswith('.json'):
|
if filename.endswith(".json"):
|
||||||
data_files[filename.split('.')[-2]] = f'{directory}/{filename}'
|
data_files[filename.split(".")[-2]] = f"{directory}/{filename}"
|
||||||
logger.info(f'Processing directory {directory}')
|
logger.info(f"Processing directory {directory}")
|
||||||
df = pd.concat([df, build_df(directory.split('/')[-1], data_files)])
|
df = pd.concat([df, build_df(directory.split("/")[-1], data_files)])
|
||||||
df['device'] = get_gpu_name()
|
df["device"] = get_gpu_name()
|
||||||
df['error_rate'] = df['failed_requests'] / (df['failed_requests'] + df['successful_requests']) * 100.0
|
df["error_rate"] = (
|
||||||
|
df["failed_requests"]
|
||||||
|
/ (df["failed_requests"] + df["successful_requests"])
|
||||||
|
* 100.0
|
||||||
|
)
|
||||||
df.to_parquet(results_file)
|
df.to_parquet(results_file)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("--sha", help="SHA of the commit to add to the results", required=True)
|
parser.add_argument(
|
||||||
parser.add_argument("--results-file",
|
"--sha", help="SHA of the commit to add to the results", required=True
|
||||||
help="The file where to store the results, can be a local file or a s3 path")
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--results-file",
|
||||||
|
help="The file where to store the results, can be a local file or a s3 path",
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.results_file is None:
|
if args.results_file is None:
|
||||||
results_file = f'{args.sha}.parquet'
|
results_file = f"{args.sha}.parquet"
|
||||||
else:
|
else:
|
||||||
results_file = args.results_file
|
results_file = args.results_file
|
||||||
|
|
||||||
|
@ -16,4 +16,4 @@ pyarrow = "^17.0.0"
|
|||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core"]
|
requires = ["poetry-core"]
|
||||||
build-backend = "poetry.core.masonry.api"
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
@ -803,7 +803,7 @@ mod tests {
|
|||||||
let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap();
|
let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap();
|
||||||
let tool_prompt = "This default prompt will be used".to_string();
|
let tool_prompt = "This default prompt will be used".to_string();
|
||||||
let tools_and_prompt = Some((tools, tool_prompt));
|
let tools_and_prompt = Some((tools, tool_prompt));
|
||||||
let result = ct.apply(None, msgs, tools_and_prompt);
|
let result = ct.apply(msgs, tools_and_prompt);
|
||||||
let expected = "<s>[INST] I'd like to show off how chat templating works! [/INST]Great! How can I help you today?</s> [INST] Just testing\n---\n[{\"type\":\"function\",\"function\":{\"description\":\"Get the current weather\",\"name\":\"get_current_weather\",\"arguments\":{\"properties\":{\"format\":{\"description\":\"The temperature unit to use. Infer this from the users location.\",\"enum\":[\"celsius\",\"fahrenheit\"],\"type\":\"string\"},\"location\":{\"description\":\"The city and state, e.g. San Francisco, CA\",\"type\":\"string\"}},\"required\":[\"location\",\"format\"],\"type\":\"object\"}}}]\nThis default prompt will be used [/INST]".to_string();
|
let expected = "<s>[INST] I'd like to show off how chat templating works! [/INST]Great! How can I help you today?</s> [INST] Just testing\n---\n[{\"type\":\"function\",\"function\":{\"description\":\"Get the current weather\",\"name\":\"get_current_weather\",\"arguments\":{\"properties\":{\"format\":{\"description\":\"The temperature unit to use. Infer this from the users location.\",\"enum\":[\"celsius\",\"fahrenheit\"],\"type\":\"string\"},\"location\":{\"description\":\"The city and state, e.g. San Francisco, CA\",\"type\":\"string\"}},\"required\":[\"location\",\"format\"],\"type\":\"object\"}}}]\nThis default prompt will be used [/INST]".to_string();
|
||||||
assert_eq!(result.unwrap(), expected);
|
assert_eq!(result.unwrap(), expected);
|
||||||
}
|
}
|
||||||
@ -837,7 +837,7 @@ mod tests {
|
|||||||
let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap();
|
let tools: Vec<Tool> = serde_json::from_str(&tools_string).unwrap();
|
||||||
let tool_prompt = "This default prompt will be used".to_string();
|
let tool_prompt = "This default prompt will be used".to_string();
|
||||||
let tools_and_prompt = Some((tools, tool_prompt));
|
let tools_and_prompt = Some((tools, tool_prompt));
|
||||||
let result = ct.apply(None, msgs, tools_and_prompt);
|
let result = ct.apply(msgs, tools_and_prompt);
|
||||||
let expected = "<s><|start_header_id|>system<|end_header_id|>\n\nEnvironment: ipython\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\nYoure a helpful assistant! Answer the users question best you can.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nGiven the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.\n\nRespond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.Do not use variables.\n\n{\n \"function\": {\n \"arguments\": {\n \"properties\": {\n \"format\": {\n \"description\": \"The temperature unit to use. Infer this from the users location.\",\n \"enum\": [\n \"celsius\",\n \"fahrenheit\"\n ],\n \"type\": \"string\"\n },\n \"location\": {\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"location\",\n \"format\"\n ],\n \"type\": \"object\"\n },\n \"description\": \"Get the current weather\",\n \"name\": \"get_current_weather\"\n },\n \"type\": \"function\"\n}\n\nWhat is the weather like in Brooklyn, New York?\n---\nThis default prompt will be used<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n".to_string();
|
let expected = "<s><|start_header_id|>system<|end_header_id|>\n\nEnvironment: ipython\nCutting Knowledge Date: December 2023\nToday Date: 26 Jul 2024\n\nYoure a helpful assistant! Answer the users question best you can.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nGiven the following functions, please respond with a JSON for a function call with its proper arguments that best answers the given prompt.\n\nRespond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.Do not use variables.\n\n{\n \"function\": {\n \"arguments\": {\n \"properties\": {\n \"format\": {\n \"description\": \"The temperature unit to use. Infer this from the users location.\",\n \"enum\": [\n \"celsius\",\n \"fahrenheit\"\n ],\n \"type\": \"string\"\n },\n \"location\": {\n \"description\": \"The city and state, e.g. San Francisco, CA\",\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"location\",\n \"format\"\n ],\n \"type\": \"object\"\n },\n \"description\": \"Get the current weather\",\n \"name\": \"get_current_weather\"\n },\n \"type\": \"function\"\n}\n\nWhat is the weather like in Brooklyn, New York?\n---\nThis default prompt will be used<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n".to_string();
|
||||||
assert_eq!(result.unwrap(), expected);
|
assert_eq!(result.unwrap(), expected);
|
||||||
}
|
}
|
||||||
|
26
server/poetry.lock
generated
26
server/poetry.lock
generated
@ -1288,12 +1288,12 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "marlin-kernels"
|
name = "marlin-kernels"
|
||||||
version = "0.3.5"
|
version = "0.3.6"
|
||||||
description = "Marlin quantization kernels"
|
description = "Marlin quantization kernels"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "marlin_kernels-0.3.5+cu123torch2.4-cp310-cp310-linux_x86_64.whl", hash = "sha256:58d4bf0aa1a9533acc05f1e5bf50f727ed0129848d1fa1feb2c5c3fa482518d4"},
|
{file = "marlin_kernels-0.3.6+cu123torch2.4-cp310-cp310-linux_x86_64.whl", hash = "sha256:afedaa9a15e8991442bc8c81f62833fbf5c1556ae9d7a5a9e13b747ce97beef9"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -1301,16 +1301,16 @@ torch = "*"
|
|||||||
|
|
||||||
[package.source]
|
[package.source]
|
||||||
type = "url"
|
type = "url"
|
||||||
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp310-cp310-linux_x86_64.whl"
|
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp310-cp310-linux_x86_64.whl"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "marlin-kernels"
|
name = "marlin-kernels"
|
||||||
version = "0.3.5"
|
version = "0.3.6"
|
||||||
description = "Marlin quantization kernels"
|
description = "Marlin quantization kernels"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "marlin_kernels-0.3.5+cu123torch2.4-cp311-cp311-linux_x86_64.whl", hash = "sha256:a3a3653e6908db013ca96979a5ee1f6a8bb590ee7506a129e06b87d4a8cbb87d"},
|
{file = "marlin_kernels-0.3.6+cu123torch2.4-cp311-cp311-linux_x86_64.whl", hash = "sha256:c0c05621d5e87144415d8a6e439072bd844d5f3cb55e4c4c69eabdc4c94610f4"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -1318,16 +1318,16 @@ torch = "*"
|
|||||||
|
|
||||||
[package.source]
|
[package.source]
|
||||||
type = "url"
|
type = "url"
|
||||||
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp311-cp311-linux_x86_64.whl"
|
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp311-cp311-linux_x86_64.whl"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "marlin-kernels"
|
name = "marlin-kernels"
|
||||||
version = "0.3.5"
|
version = "0.3.6"
|
||||||
description = "Marlin quantization kernels"
|
description = "Marlin quantization kernels"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "marlin_kernels-0.3.5+cu123torch2.4-cp312-cp312-linux_x86_64.whl", hash = "sha256:967b4765a591530a4b9160ae32f3f352a89ae4c71daf43220c99976987d76723"},
|
{file = "marlin_kernels-0.3.6+cu123torch2.4-cp312-cp312-linux_x86_64.whl", hash = "sha256:3be4662c8d25a3cdb1793dafe0e2e76dd600913a69a468e2c68d1fed4e149255"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -1335,16 +1335,16 @@ torch = "*"
|
|||||||
|
|
||||||
[package.source]
|
[package.source]
|
||||||
type = "url"
|
type = "url"
|
||||||
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp312-cp312-linux_x86_64.whl"
|
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp312-cp312-linux_x86_64.whl"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "marlin-kernels"
|
name = "marlin-kernels"
|
||||||
version = "0.3.5"
|
version = "0.3.6"
|
||||||
description = "Marlin quantization kernels"
|
description = "Marlin quantization kernels"
|
||||||
optional = true
|
optional = true
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "marlin_kernels-0.3.5+cu123torch2.4-cp39-cp39-linux_x86_64.whl", hash = "sha256:fbe607d5afd1e1fca6e294c3594a0ec279d1f9ea6a2fdf7f34ccb6180d15e195"},
|
{file = "marlin_kernels-0.3.6+cu123torch2.4-cp39-cp39-linux_x86_64.whl", hash = "sha256:89eac9d46bc084a256b538afda6053683eb7e505db0e0d4f6dbeca32368caac6"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@ -1352,7 +1352,7 @@ torch = "*"
|
|||||||
|
|
||||||
[package.source]
|
[package.source]
|
||||||
type = "url"
|
type = "url"
|
||||||
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp39-cp39-linux_x86_64.whl"
|
url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp39-cp39-linux_x86_64.whl"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mdurl"
|
name = "mdurl"
|
||||||
@ -4066,4 +4066,4 @@ torch = ["torch"]
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = ">=3.9,<3.13"
|
python-versions = ">=3.9,<3.13"
|
||||||
content-hash = "7082f1983403ff58a1f0304e8bbf1197715b5156ddeea0f3e8287334d52c2617"
|
content-hash = "995bb2a29e3e8cfa6ee8ddb139ec623c2cba4d7ad047066eb4952ccbab5579d5"
|
||||||
|
@ -48,10 +48,10 @@ attention-kernels = [
|
|||||||
{ url = "https://github.com/danieldk/attention-kernels/releases/download/v0.1.1/attention_kernels-0.1.1+cu123torch2.4-cp312-cp312-linux_x86_64.whl", python = "~3.12", optional = true },
|
{ url = "https://github.com/danieldk/attention-kernels/releases/download/v0.1.1/attention_kernels-0.1.1+cu123torch2.4-cp312-cp312-linux_x86_64.whl", python = "~3.12", optional = true },
|
||||||
]
|
]
|
||||||
marlin-kernels = [
|
marlin-kernels = [
|
||||||
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp39-cp39-linux_x86_64.whl", python = "~3.9", optional = true },
|
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp39-cp39-linux_x86_64.whl", python = "~3.9", optional = true },
|
||||||
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp310-cp310-linux_x86_64.whl", python = "~3.10", optional = true },
|
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp310-cp310-linux_x86_64.whl", python = "~3.10", optional = true },
|
||||||
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp311-cp311-linux_x86_64.whl", python = "~3.11", optional = true },
|
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp311-cp311-linux_x86_64.whl", python = "~3.11", optional = true },
|
||||||
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.5/marlin_kernels-0.3.5+cu123torch2.4-cp312-cp312-linux_x86_64.whl", python = "~3.12", optional = true },
|
{ url = "https://github.com/danieldk/marlin-kernels/releases/download/v0.3.6/marlin_kernels-0.3.6+cu123torch2.4-cp312-cp312-linux_x86_64.whl", python = "~3.12", optional = true },
|
||||||
]
|
]
|
||||||
moe-kernels = [
|
moe-kernels = [
|
||||||
{ url = "https://github.com/danieldk/moe-kernels/releases/download/v0.7.0/moe_kernels-0.7.0+cu123torch2.4-cp39-cp39-linux_x86_64.whl", python = "~3.9", optional = true },
|
{ url = "https://github.com/danieldk/moe-kernels/releases/download/v0.7.0/moe_kernels-0.7.0+cu123torch2.4-cp39-cp39-linux_x86_64.whl", python = "~3.9", optional = true },
|
||||||
|
Loading…
Reference in New Issue
Block a user