mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 12:24:53 +00:00
Merge branch 'main' into flash_decoding
This commit is contained in:
commit
068520749c
@ -118,8 +118,8 @@ ENV CCL_ZE_IPC_EXCHANGE=sockets
|
||||
#ENV TORCH_LLM_ALLREDUCE=1
|
||||
#ENV CCL_TOPO_FABRIC_VERTEX_CONNECTION_CHECK=0
|
||||
|
||||
RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout c3e14caf792ad04824dd921e2fc3f16fca0d462e
|
||||
RUN cd intel-extension-for-pytorch && git submodule update --init --recursive && USE_AOT_DEVLIST='pvc' BUILD_SEPARATE_OPS=OFF BUILD_WITH_CPU=OFF USE_XETLA=ON python setup.py install && rm -rf /usr/src/intel-extension-for-pytorch
|
||||
RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout 033af6f63745ac748cccdadee5c6140c7971edf6
|
||||
RUN cd intel-extension-for-pytorch && git submodule update --init --recursive && USE_AOT_DEVLIST='pvc,ats-m150' BUILD_SEPARATE_OPS=OFF BUILD_WITH_CPU=OFF USE_XETLA=ON python setup.py install && rm -rf /usr/src/intel-extension-for-pytorch
|
||||
|
||||
# Install benchmarker
|
||||
COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
|
||||
|
12
README.md
12
README.md
@ -1,7 +1,7 @@
|
||||
<div align="center">
|
||||
|
||||
<a href="https://www.youtube.com/watch?v=jlMAX2Oaht0">
|
||||
<img width=560 width=315 alt="Making TGI deployment optimal" src="https://huggingface.co/datasets/Narsil/tgi_assets/resolve/main/thumbnail.png">
|
||||
<img width=560 alt="Making TGI deployment optimal" src="https://huggingface.co/datasets/Narsil/tgi_assets/resolve/main/thumbnail.png">
|
||||
</a>
|
||||
|
||||
# Text Generation Inference
|
||||
@ -141,8 +141,8 @@ You have the option to utilize the `HF_TOKEN` environment variable for configuri
|
||||
For example, if you want to serve the gated Llama V2 model variants:
|
||||
|
||||
1. Go to https://huggingface.co/settings/tokens
|
||||
2. Copy your cli READ token
|
||||
3. Export `HF_TOKEN=<your cli READ token>`
|
||||
2. Copy your CLI READ token
|
||||
3. Export `HF_TOKEN=<your CLI READ token>`
|
||||
|
||||
or with Docker:
|
||||
|
||||
@ -157,7 +157,7 @@ docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/da
|
||||
### A note on Shared Memory (shm)
|
||||
|
||||
[`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by
|
||||
`PyTorch` to do distributed training/inference. `text-generation-inference` make
|
||||
`PyTorch` to do distributed training/inference. `text-generation-inference` makes
|
||||
use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models.
|
||||
|
||||
In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if
|
||||
@ -196,7 +196,7 @@ Detailed blogpost by Adyen on TGI inner workings: [LLM inference at scale with T
|
||||
|
||||
You can also opt to install `text-generation-inference` locally.
|
||||
|
||||
First clone the repository and change directoy into it:
|
||||
First clone the repository and change directory into it:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/huggingface/text-generation-inference
|
||||
@ -213,7 +213,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
conda create -n text-generation-inference python=3.11
|
||||
conda activate text-generation-inference
|
||||
|
||||
#using pyton venv
|
||||
#using python venv
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
@ -205,6 +205,7 @@ pub async fn kserve_model_infer(
|
||||
let generate_request = GenerateRequest {
|
||||
inputs: str_input.to_string(),
|
||||
parameters: payload.parameters.clone(),
|
||||
add_special_tokens: true,
|
||||
};
|
||||
let infer = infer.clone();
|
||||
let compute_type = compute_type.clone();
|
||||
@ -212,7 +213,7 @@ pub async fn kserve_model_infer(
|
||||
async move {
|
||||
generate_internal(infer, compute_type, Json(generate_request), span)
|
||||
.await
|
||||
.map(|(_, Json(generation))| {
|
||||
.map(|(_, _, Json(generation))| {
|
||||
let generation_as_bytes = generation.generated_text.as_bytes().to_vec();
|
||||
OutputChunk {
|
||||
name: output.name.clone(),
|
||||
|
Loading…
Reference in New Issue
Block a user