mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-07-27 10:20:17 +00:00
Compare commits
11 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
24c2bff659 | ||
|
fc2405c549 | ||
|
ebb26f0ccd | ||
|
778b61c0da | ||
|
3d2e7c8fce | ||
|
f6005d6813 | ||
|
429dcd9c64 | ||
|
9f38d93051 | ||
|
719907410b | ||
|
238fbd4d50 | ||
|
14ee6e7804 |
4
.github/workflows/build.yaml
vendored
4
.github/workflows/build.yaml
vendored
@ -129,9 +129,9 @@ jobs:
|
|||||||
export label_extension="-gaudi"
|
export label_extension="-gaudi"
|
||||||
export docker_volume="/mnt/cache"
|
export docker_volume="/mnt/cache"
|
||||||
export docker_devices=""
|
export docker_devices=""
|
||||||
export runs_on="ubuntu-latest"
|
export runs_on="itac-bm-emr-gaudi3-dell-2gaudi"
|
||||||
export platform=""
|
export platform=""
|
||||||
export extra_pytest=""
|
export extra_pytest="--gaudi"
|
||||||
export target=""
|
export target=""
|
||||||
esac
|
esac
|
||||||
echo $dockerfile
|
echo $dockerfile
|
||||||
|
16
Cargo.lock
generated
16
Cargo.lock
generated
@ -4650,7 +4650,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-backends-trtllm"
|
name = "text-generation-backends-trtllm"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"clap 4.5.32",
|
"clap 4.5.32",
|
||||||
@ -4671,7 +4671,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-benchmark"
|
name = "text-generation-benchmark"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"average",
|
"average",
|
||||||
"clap 4.5.32",
|
"clap 4.5.32",
|
||||||
@ -4691,7 +4691,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-client"
|
name = "text-generation-client"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
@ -4709,7 +4709,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-launcher"
|
name = "text-generation-launcher"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 4.5.32",
|
"clap 4.5.32",
|
||||||
"ctrlc",
|
"ctrlc",
|
||||||
@ -4730,7 +4730,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-router"
|
name = "text-generation-router"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
@ -4782,7 +4782,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-router-llamacpp"
|
name = "text-generation-router-llamacpp"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bindgen 0.71.1",
|
"bindgen 0.71.1",
|
||||||
@ -4800,7 +4800,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-router-v2"
|
name = "text-generation-router-v2"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@ -4849,7 +4849,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "text-generation-router-v3"
|
name = "text-generation-router-v3"
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -21,7 +21,7 @@ default-members = [
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "3.3.3"
|
version = "3.3.4-dev0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
authors = ["Olivier Dehaene"]
|
authors = ["Olivier Dehaene"]
|
||||||
homepage = "https://github.com/huggingface/text-generation-inference"
|
homepage = "https://github.com/huggingface/text-generation-inference"
|
||||||
|
@ -5,7 +5,7 @@ RUN mkdir -p /tgi
|
|||||||
# Fetch the optimum-neuron sources directly to avoid relying on pypi deployments
|
# Fetch the optimum-neuron sources directly to avoid relying on pypi deployments
|
||||||
FROM alpine AS optimum-neuron
|
FROM alpine AS optimum-neuron
|
||||||
RUN mkdir -p /optimum-neuron
|
RUN mkdir -p /optimum-neuron
|
||||||
ADD https://github.com/huggingface/optimum-neuron/archive/refs/tags/v0.2.0.tar.gz /optimum-neuron/sources.tar.gz
|
ADD https://github.com/huggingface/optimum-neuron/archive/refs/tags/v0.2.2.tar.gz /optimum-neuron/sources.tar.gz
|
||||||
RUN tar -C /optimum-neuron -xf /optimum-neuron/sources.tar.gz --strip-components=1
|
RUN tar -C /optimum-neuron -xf /optimum-neuron/sources.tar.gz --strip-components=1
|
||||||
|
|
||||||
# Build cargo components (adapted from TGI original Dockerfile)
|
# Build cargo components (adapted from TGI original Dockerfile)
|
||||||
|
@ -118,9 +118,9 @@ ENTRYPOINT ["./entrypoint.sh"]
|
|||||||
# Final image
|
# Final image
|
||||||
FROM base
|
FROM base
|
||||||
|
|
||||||
ENV HF_HUB_ENABLE_HF_TRANSFER 1
|
ENV HF_HUB_ENABLE_HF_TRANSFER=1
|
||||||
ENV HABANA_VISIBLE_DEVICES all
|
ENV HABANA_VISIBLE_DEVICES=all
|
||||||
ENV OMPI_MCA_btl_vader_single_copy_mechanism NONE
|
ENV OMPI_MCA_btl_vader_single_copy_mechanism=NONE
|
||||||
|
|
||||||
COPY backends/gaudi/tgi-entrypoint.sh /tgi-entrypoint.sh
|
COPY backends/gaudi/tgi-entrypoint.sh /tgi-entrypoint.sh
|
||||||
RUN chmod +x /tgi-entrypoint.sh
|
RUN chmod +x /tgi-entrypoint.sh
|
||||||
|
@ -84,7 +84,7 @@ model=HuggingFaceH4/zephyr-7b-beta
|
|||||||
volume=$PWD/data
|
volume=$PWD/data
|
||||||
|
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3 --model-id $model
|
ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
And then you can make requests like
|
And then you can make requests like
|
||||||
@ -121,7 +121,7 @@ curl localhost:8080/v1/chat/completions \
|
|||||||
|
|
||||||
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
|
**Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 12.2 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar.
|
||||||
|
|
||||||
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/installation_amd#using-tgi-with-amd-gpus). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.3-rocm --model-id $model` instead of the command above.
|
**Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/installation_amd#using-tgi-with-amd-gpus). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4-rocm --model-id $model` instead of the command above.
|
||||||
|
|
||||||
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
|
To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli):
|
||||||
```
|
```
|
||||||
@ -152,7 +152,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
token=<your cli READ token>
|
token=<your cli READ token>
|
||||||
|
|
||||||
docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 1g -e HF_TOKEN=$token -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3 --model-id $model
|
ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
### A note on Shared Memory (shm)
|
### A note on Shared Memory (shm)
|
||||||
|
@ -50,11 +50,14 @@ local-dev-install: install-dependencies
|
|||||||
|
|
||||||
# In order to run the integration tests, you need to first build the image (make -C backends/gaudi image)
|
# In order to run the integration tests, you need to first build the image (make -C backends/gaudi image)
|
||||||
run-integration-tests:
|
run-integration-tests:
|
||||||
pip install -U pip uv
|
|
||||||
uv pip install -r ${root_dir}/backends/gaudi/server/integration-tests/requirements.txt
|
|
||||||
DOCKER_VOLUME=${root_dir}/data \
|
DOCKER_VOLUME=${root_dir}/data \
|
||||||
HF_TOKEN=`cat ${HOME}/.cache/huggingface/token` \
|
HF_TOKEN=`cat ${HOME}/.cache/huggingface/token` \
|
||||||
uv run pytest --durations=0 -sv ${root_dir}/backends/gaudi/server/integration-tests
|
pytest --durations=0 -s -vv ${root_dir}/integration-tests --gaudi
|
||||||
|
|
||||||
|
run-integration-tests-with-all-models:
|
||||||
|
DOCKER_VOLUME=${root_dir}/data \
|
||||||
|
HF_TOKEN=`cat ${HOME}/.cache/huggingface/token` \
|
||||||
|
pytest --durations=0 -s -vv ${root_dir}/integration-tests --gaudi --gaudi-all-models
|
||||||
|
|
||||||
# This is used to capture the expected outputs for the integration tests offering an easy way to add more models to the integration tests
|
# This is used to capture the expected outputs for the integration tests offering an easy way to add more models to the integration tests
|
||||||
capture-expected-outputs-for-integration-tests:
|
capture-expected-outputs-for-integration-tests:
|
||||||
|
@ -99,16 +99,26 @@ curl 127.0.0.1:8080/generate \
|
|||||||
|
|
||||||
### Integration tests
|
### Integration tests
|
||||||
|
|
||||||
|
Install the dependencies:
|
||||||
|
```bash
|
||||||
|
pip install -r integration-tests/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
To run the integration tests, you need to first build the image:
|
To run the integration tests, you need to first build the image:
|
||||||
```bash
|
```bash
|
||||||
make -C backends/gaudi image
|
make -C backends/gaudi image
|
||||||
```
|
```
|
||||||
|
|
||||||
Then run the following command to run the integration tests:
|
Then run the following command to run the integration tests (CI tests):
|
||||||
```bash
|
```bash
|
||||||
make -C backends/gaudi run-integration-tests
|
make -C backends/gaudi run-integration-tests
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To run the integration tests with all models, you can run the following command:
|
||||||
|
```bash
|
||||||
|
make -C backends/gaudi run-integration-tests-with-all-models
|
||||||
|
```
|
||||||
|
|
||||||
To capture the expected outputs for the integration tests, you can run the following command:
|
To capture the expected outputs for the integration tests, you can run the following command:
|
||||||
```bash
|
```bash
|
||||||
make -C backends/gaudi capture-expected-outputs-for-integration-tests
|
make -C backends/gaudi capture-expected-outputs-for-integration-tests
|
||||||
|
@ -19,11 +19,7 @@ docker run -p 8080:80 \
|
|||||||
--ipc=host \
|
--ipc=host \
|
||||||
-v $volume:/data \
|
-v $volume:/data \
|
||||||
-e HF_TOKEN=$hf_token \
|
-e HF_TOKEN=$hf_token \
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=2 \
|
|
||||||
-e BATCH_BUCKET_SIZE=32 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=256 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
--model-id $model \
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
--max-input-tokens 1024 --max-total-tokens 2048 \
|
||||||
--max-batch-prefill-tokens 2048 --max-batch-size 32 \
|
--max-batch-prefill-tokens 2048 --max-batch-size 32 \
|
||||||
@ -43,60 +39,7 @@ docker run -p 8080:80 \
|
|||||||
--ipc=host \
|
--ipc=host \
|
||||||
-v $volume:/data \
|
-v $volume:/data \
|
||||||
-e HF_TOKEN=$hf_token \
|
-e HF_TOKEN=$hf_token \
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
-e BATCH_BUCKET_SIZE=256 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=4 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=64 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--sharded true --num-shard 8 \
|
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
|
||||||
--max-batch-prefill-tokens 4096 --max-batch-size 256 \
|
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 512
|
|
||||||
```
|
|
||||||
|
|
||||||
### Llama2-7B on 1 Card (BF16)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=meta-llama/Llama-2-7b-chat-hf
|
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=2 \
|
|
||||||
-e BATCH_BUCKET_SIZE=32 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=256 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
|
||||||
--max-batch-prefill-tokens 2048 --max-batch-size 32 \
|
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 64
|
|
||||||
```
|
|
||||||
|
|
||||||
### Llama2-70B on 8 cards (BF16)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=meta-llama/Llama-2-70b-chat-hf
|
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
|
||||||
-e BATCH_BUCKET_SIZE=256 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=4 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=64 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
--model-id $model \
|
||||||
--sharded true --num-shard 8 \
|
--sharded true --num-shard 8 \
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
--max-input-tokens 1024 --max-total-tokens 2048 \
|
||||||
@ -115,9 +58,7 @@ docker run -p 8080:80 \
|
|||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
--ipc=host \
|
--ipc=host \
|
||||||
-v $volume:/data \
|
-v $volume:/data \
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=1 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
-e BATCH_BUCKET_SIZE=1 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
--model-id $model \
|
||||||
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
|
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
|
||||||
--max-total-tokens 8192 --max-batch-size 4
|
--max-total-tokens 8192 --max-batch-size 4
|
||||||
@ -125,12 +66,12 @@ docker run -p 8080:80 \
|
|||||||
|
|
||||||
## FP8 Precision
|
## FP8 Precision
|
||||||
|
|
||||||
Please refer to the [FP8 Precision](https://huggingface.co/docs/text-generation-inference/backends/gaudi_new#how-to-use-different-precision-formats) section for more details. You need to measure the statistics of the model first before running the model in FP8 precision.
|
You could also set kv cache dtype to FP8 when launching the server, fp8_e4m3fn is supported in Gaudi
|
||||||
|
|
||||||
## Llama3.1-8B on 1 Card (FP8)
|
## Llama3-8B on 1 Card (FP8)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
model=meta-llama/Meta-Llama-3.1-8B-Instruct
|
model=RedHatAI/Meta-Llama-3-8B-Instruct-FP8-KV
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
hf_token=YOUR_ACCESS_TOKEN
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
|
|
||||||
@ -139,25 +80,19 @@ docker run -p 8080:80 \
|
|||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
--ipc=host \
|
--ipc=host \
|
||||||
-v $volume:/data \
|
-v $volume:/data \
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
-e HF_TOKEN=$hf_token \
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=2 \
|
|
||||||
-e BATCH_BUCKET_SIZE=32 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=256 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
--model-id $model \
|
||||||
|
--kv-cache-dtype fp8_e4m3fn \
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
--max-input-tokens 1024 --max-total-tokens 2048 \
|
||||||
--max-batch-prefill-tokens 2048 --max-batch-size 32 \
|
--max-batch-prefill-tokens 2048 --max-batch-size 32 \
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 64
|
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 64
|
||||||
```
|
```
|
||||||
|
|
||||||
## Llama3.1-70B on 8 cards (FP8)
|
## Llama3-70B on 8 cards (FP8)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
model=meta-llama/Meta-Llama-3.1-70B-Instruct
|
model=RedHatAI/Meta-Llama-3-70B-Instruct-FP8
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
hf_token=YOUR_ACCESS_TOKEN
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
|
|
||||||
@ -166,118 +101,12 @@ docker run -p 8080:80 \
|
|||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
--ipc=host \
|
--ipc=host \
|
||||||
-v $volume:/data \
|
-v $volume:/data \
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
-e HF_TOKEN=$hf_token \
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
-e BATCH_BUCKET_SIZE=256 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=4 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=64 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
--model-id $model \
|
||||||
|
--kv-cache-dtype fp8_e4m3fn \
|
||||||
--sharded true --num-shard 8 \
|
--sharded true --num-shard 8 \
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
--max-input-tokens 1024 --max-total-tokens 2048 \
|
||||||
--max-batch-prefill-tokens 4096 --max-batch-size 256 \
|
--max-batch-prefill-tokens 4096 --max-batch-size 256 \
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 512
|
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 512
|
||||||
```
|
```
|
||||||
|
|
||||||
## Llama2-7B on 1 Card (FP8)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=meta-llama/Llama-2-7b-chat-hf
|
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=2 \
|
|
||||||
-e BATCH_BUCKET_SIZE=32 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=256 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
|
||||||
--max-batch-prefill-tokens 2048 --max-batch-size 32 \
|
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 64
|
|
||||||
```
|
|
||||||
|
|
||||||
## Llama2-70B on 8 Cards (FP8)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=meta-llama/Llama-2-70b-chat-hf
|
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
|
||||||
-e BATCH_BUCKET_SIZE=256 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=4 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=64 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--sharded true --num-shard 8 \
|
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
|
||||||
--max-batch-prefill-tokens 4096 --max-batch-size 256 \
|
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 512
|
|
||||||
```
|
|
||||||
|
|
||||||
## Llava-v1.6-Mistral-7B on 1 Card (FP8)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=llava-hf/llava-v1.6-mistral-7b-hf
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=1 \
|
|
||||||
-e BATCH_BUCKET_SIZE=1 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
|
|
||||||
--max-total-tokens 8192 --max-batch-size 4
|
|
||||||
```
|
|
||||||
|
|
||||||
## Llava-v1.6-Mistral-7B on 8 Cards (FP8)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=llava-hf/llava-v1.6-mistral-7b-hf
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=1 \
|
|
||||||
-e BATCH_BUCKET_SIZE=1 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.1.1-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--sharded true --num-shard 8 \
|
|
||||||
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
|
|
||||||
--max-total-tokens 8192 --max-batch-size 4
|
|
||||||
```
|
|
||||||
|
@ -1,2 +0,0 @@
|
|||||||
[pytest]
|
|
||||||
asyncio_mode = auto
|
|
@ -1,7 +0,0 @@
|
|||||||
pytest >= 8.3.5
|
|
||||||
pytest-asyncio >= 0.26.0
|
|
||||||
docker >= 7.1.0
|
|
||||||
Levenshtein >= 0.27.1
|
|
||||||
loguru >= 0.7.3
|
|
||||||
aiohttp >= 3.11.14
|
|
||||||
text-generation
|
|
@ -2,6 +2,7 @@ from dataclasses import dataclass
|
|||||||
import torch
|
import torch
|
||||||
from typing import Optional, List, Dict
|
from typing import Optional, List, Dict
|
||||||
import collections
|
import collections
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
_TYPE_CACHE = {}
|
_TYPE_CACHE = {}
|
||||||
|
|
||||||
@ -15,6 +16,12 @@ class HPUPagedAttentionMetadata:
|
|||||||
block_usage: Optional[torch.Tensor]
|
block_usage: Optional[torch.Tensor]
|
||||||
block_groups: Optional[torch.Tensor]
|
block_groups: Optional[torch.Tensor]
|
||||||
attn_bias: Optional[torch.Tensor]
|
attn_bias: Optional[torch.Tensor]
|
||||||
|
slots_in_window_mask: Optional[torch.Tensor] = None
|
||||||
|
block_list_in_window: Optional[torch.Tensor] = None
|
||||||
|
block_mapping_in_window: Optional[torch.Tensor] = None
|
||||||
|
block_usage_in_window: Optional[torch.Tensor] = None
|
||||||
|
block_groups_in_window: Optional[torch.Tensor] = None
|
||||||
|
attn_bias_in_window: Optional[torch.Tensor] = None
|
||||||
|
|
||||||
|
|
||||||
def subtuple(
|
def subtuple(
|
||||||
@ -67,6 +74,12 @@ def trim_attn_metadata(metadata: HPUPagedAttentionMetadata) -> object:
|
|||||||
"block_usage",
|
"block_usage",
|
||||||
"block_groups",
|
"block_groups",
|
||||||
"attn_bias",
|
"attn_bias",
|
||||||
|
"slots_in_window_mask",
|
||||||
|
"block_list_in_window",
|
||||||
|
"block_mapping_in_window",
|
||||||
|
"block_usage_in_window",
|
||||||
|
"block_groups_in_window",
|
||||||
|
"attn_bias_in_window",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
return attention_metadata
|
return attention_metadata
|
||||||
@ -75,6 +88,7 @@ def trim_attn_metadata(metadata: HPUPagedAttentionMetadata) -> object:
|
|||||||
@dataclass
|
@dataclass
|
||||||
class Seqlen:
|
class Seqlen:
|
||||||
input_lengths: torch.Tensor
|
input_lengths: torch.Tensor
|
||||||
|
attn_mask: Optional[torch.Tensor] = None
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -86,6 +100,48 @@ class Seqlen:
|
|||||||
# Flash decoding doesn't need to clamp
|
# Flash decoding doesn't need to clamp
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def make_sliding_window_bias(
|
||||||
|
self,
|
||||||
|
seq_lens: List[int],
|
||||||
|
window_size: Optional[int],
|
||||||
|
dtype: torch.dtype,
|
||||||
|
padded_input_len: Optional[int],
|
||||||
|
padded_bs: Optional[int],
|
||||||
|
) -> List[torch.Tensor]:
|
||||||
|
attn_biases = []
|
||||||
|
for seq_len in seq_lens:
|
||||||
|
if seq_len != 0:
|
||||||
|
tensor = torch.full(
|
||||||
|
(1, seq_len, seq_len),
|
||||||
|
dtype=dtype,
|
||||||
|
fill_value=1,
|
||||||
|
)
|
||||||
|
shift = 0
|
||||||
|
mask = torch.tril(tensor, diagonal=shift).to(dtype) # type: ignore
|
||||||
|
if window_size is not None:
|
||||||
|
mask = torch.triu(mask, diagonal=shift - window_size + 1)
|
||||||
|
mask = F.pad(
|
||||||
|
mask,
|
||||||
|
(
|
||||||
|
padded_input_len - seq_len,
|
||||||
|
0,
|
||||||
|
padded_input_len - seq_len,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
),
|
||||||
|
value=0,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
mask = torch.full(
|
||||||
|
(1, padded_input_len, padded_input_len),
|
||||||
|
dtype=dtype,
|
||||||
|
fill_value=0,
|
||||||
|
)
|
||||||
|
attn_biases.append(mask)
|
||||||
|
attn_biases = torch.stack(attn_biases, dim=0)
|
||||||
|
return attn_biases.to(torch.bool)
|
||||||
|
|
||||||
|
|
||||||
def _async_h2d_tensor_copy(source, device="hpu"):
|
def _async_h2d_tensor_copy(source, device="hpu"):
|
||||||
if source is None:
|
if source is None:
|
||||||
@ -124,6 +180,7 @@ def trim_seqlen_metadata(metadata: Seqlen) -> object:
|
|||||||
"TrimmedSeqlen",
|
"TrimmedSeqlen",
|
||||||
[
|
[
|
||||||
"input_lengths",
|
"input_lengths",
|
||||||
|
"attn_mask",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
return attention_metadata
|
return attention_metadata
|
||||||
|
@ -94,13 +94,13 @@ def attention(
|
|||||||
query,
|
query,
|
||||||
key,
|
key,
|
||||||
value,
|
value,
|
||||||
attn_mask=None,
|
attn_mask=seqlen.attn_mask if window_size_left != -1 else None,
|
||||||
dropout_p=0.0,
|
dropout_p=0.0,
|
||||||
is_causal=causal,
|
is_causal=causal if window_size_left == -1 else False,
|
||||||
scale=softmax_scale,
|
scale=softmax_scale,
|
||||||
softmax_mode="None",
|
softmax_mode="None",
|
||||||
recompute_mode=None,
|
recompute_mode=None,
|
||||||
valid_sequence_lengths=seqlen.input_lengths,
|
valid_sequence_lengths=seqlen.input_lengths if window_size_left == -1 else None,
|
||||||
padding_side="left",
|
padding_side="left",
|
||||||
)
|
)
|
||||||
attn_output = attn_output.transpose(1, 2).squeeze(0).contiguous()
|
attn_output = attn_output.transpose(1, 2).squeeze(0).contiguous()
|
||||||
@ -119,6 +119,15 @@ def set_block_mapping(hpu_attention_meta: HPUPagedAttentionMetadata, batch_size)
|
|||||||
hpu_attention_meta = hpu_attention_meta._replace(
|
hpu_attention_meta = hpu_attention_meta._replace(
|
||||||
attn_bias=attn_bias, block_mapping=block_mapping.to(dtype)
|
attn_bias=attn_bias, block_mapping=block_mapping.to(dtype)
|
||||||
)
|
)
|
||||||
|
if hpu_attention_meta.block_groups_in_window is not None:
|
||||||
|
block_mapping = torch.nn.functional.one_hot(
|
||||||
|
hpu_attention_meta.block_groups_in_window, num_classes=batch_size
|
||||||
|
)
|
||||||
|
attn_bias = torch.log(hpu_attention_meta.slots_in_window_mask.float())
|
||||||
|
hpu_attention_meta = hpu_attention_meta._replace(
|
||||||
|
attn_bias_in_window=attn_bias,
|
||||||
|
block_mapping_in_window=block_mapping.to(dtype),
|
||||||
|
)
|
||||||
return hpu_attention_meta
|
return hpu_attention_meta
|
||||||
|
|
||||||
|
|
||||||
@ -132,6 +141,7 @@ def paged_attention(
|
|||||||
kv_scales: KVScales,
|
kv_scales: KVScales,
|
||||||
softcap: Optional[float] = None,
|
softcap: Optional[float] = None,
|
||||||
hpu_attention_meta: HPUPagedAttentionMetadata,
|
hpu_attention_meta: HPUPagedAttentionMetadata,
|
||||||
|
window_size_left: int = -1,
|
||||||
):
|
):
|
||||||
batch_size, head_num, head_size = query.shape
|
batch_size, head_num, head_size = query.shape
|
||||||
fp8_kv = kv_cache.dtype == torch.float8_e4m3fn
|
fp8_kv = kv_cache.dtype == torch.float8_e4m3fn
|
||||||
@ -139,10 +149,26 @@ def paged_attention(
|
|||||||
query=query.view(batch_size, 1, head_num * head_size),
|
query=query.view(batch_size, 1, head_num * head_size),
|
||||||
key_cache=kv_cache.key,
|
key_cache=kv_cache.key,
|
||||||
value_cache=kv_cache.value,
|
value_cache=kv_cache.value,
|
||||||
block_list=hpu_attention_meta.block_list,
|
block_list=(
|
||||||
block_mapping=hpu_attention_meta.block_mapping,
|
hpu_attention_meta.block_list
|
||||||
block_bias=hpu_attention_meta.attn_bias,
|
if window_size_left == -1
|
||||||
block_groups=hpu_attention_meta.block_groups,
|
else hpu_attention_meta.block_list_in_window
|
||||||
|
),
|
||||||
|
block_mapping=(
|
||||||
|
hpu_attention_meta.block_mapping
|
||||||
|
if window_size_left == -1
|
||||||
|
else hpu_attention_meta.block_mapping_in_window
|
||||||
|
),
|
||||||
|
block_bias=(
|
||||||
|
hpu_attention_meta.attn_bias
|
||||||
|
if window_size_left == -1
|
||||||
|
else hpu_attention_meta.attn_bias_in_window
|
||||||
|
),
|
||||||
|
block_groups=(
|
||||||
|
hpu_attention_meta.block_groups
|
||||||
|
if window_size_left == -1
|
||||||
|
else hpu_attention_meta.block_groups_in_window
|
||||||
|
),
|
||||||
block_size=BLOCK_SIZE,
|
block_size=BLOCK_SIZE,
|
||||||
scale=softmax_scale,
|
scale=softmax_scale,
|
||||||
matmul_qk_op=FP8Matmul(kv_scales.key_scale) if fp8_kv else Matmul(),
|
matmul_qk_op=FP8Matmul(kv_scales.key_scale) if fp8_kv else Matmul(),
|
||||||
|
@ -89,13 +89,31 @@ class QuantLinear(nn.Module):
|
|||||||
g_idx_trivial = torch.tensor(
|
g_idx_trivial = torch.tensor(
|
||||||
g_idx_trivial, dtype=torch.int32, device=self.g_idx.device
|
g_idx_trivial, dtype=torch.int32, device=self.g_idx.device
|
||||||
)
|
)
|
||||||
assert torch.equal(
|
sort_zeros = not (torch.equal(self.g_idx, g_idx_trivial))
|
||||||
self.g_idx, g_idx_trivial
|
|
||||||
), "Non-trivial tensor g_idx is not supported"
|
|
||||||
self.qzeros = self.qzeros.cpu()
|
self.qzeros = self.qzeros.cpu()
|
||||||
zeros = self.unpack_zeros_from_cuda_old_format()
|
zeros = self.unpack_zeros_from_cuda_old_format()
|
||||||
new_qzeros = pack_tensor(zeros)
|
if sort_zeros:
|
||||||
self.qzeros = new_qzeros.to(orig_device)
|
zeros_group_1 = torch.zeros(
|
||||||
|
(self.infeatures, self.outfeatures),
|
||||||
|
dtype=zeros.dtype,
|
||||||
|
device=zeros.device,
|
||||||
|
)
|
||||||
|
scales = self.scales.cpu()
|
||||||
|
scale_group_1 = torch.zeros(
|
||||||
|
(self.infeatures, self.outfeatures),
|
||||||
|
dtype=scales.dtype,
|
||||||
|
device=scales.device,
|
||||||
|
)
|
||||||
|
for i in range(self.infeatures):
|
||||||
|
zeros_group_1[i] = zeros[self.g_idx[i]]
|
||||||
|
scale_group_1[i] = self.scales[self.g_idx[i]]
|
||||||
|
self.qzeros = pack_tensor(zeros_group_1).to(orig_device)
|
||||||
|
self.scales = scale_group_1.to(orig_device)
|
||||||
|
self.groupsize = 1
|
||||||
|
self.g_idx = None
|
||||||
|
else:
|
||||||
|
new_qzeros = pack_tensor(zeros)
|
||||||
|
self.qzeros = new_qzeros.to(orig_device)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def new(cls, bits, groupsize, infeatures, outfeatures, bias):
|
def new(cls, bits, groupsize, infeatures, outfeatures, bias):
|
||||||
|
@ -51,10 +51,12 @@ class FP8SparseMoELayer(nn.Module):
|
|||||||
self.rank = weights.process_group.rank()
|
self.rank = weights.process_group.rank()
|
||||||
self.ep_rank = self.rank
|
self.ep_rank = self.rank
|
||||||
self.use_ep = os.getenv("USE_EXPERT_PARALLEL", "true").lower() == "true"
|
self.use_ep = os.getenv("USE_EXPERT_PARALLEL", "true").lower() == "true"
|
||||||
|
if (n_experts + self.world_size - 1) // self.world_size < 4:
|
||||||
|
self.use_ep = False
|
||||||
if self.use_ep:
|
if self.use_ep:
|
||||||
n_experts = (n_experts + self.world_size - 1) // self.world_size
|
n_experts_per_rank = (n_experts + self.world_size - 1) // self.world_size
|
||||||
self.ep_offset = self.ep_rank * n_experts
|
self.ep_offset = self.ep_rank * n_experts_per_rank
|
||||||
|
n_experts = min(n_experts_per_rank, n_experts - self.ep_offset)
|
||||||
else:
|
else:
|
||||||
self.ep_offset = 0
|
self.ep_offset = 0
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@ from text_generation_server.utils.weights import UnquantizedWeight, Weights
|
|||||||
from vllm_hpu_extension.ops import VllmMixtureOfExpertsOp
|
from vllm_hpu_extension.ops import VllmMixtureOfExpertsOp
|
||||||
import habana_frameworks.torch as htorch
|
import habana_frameworks.torch as htorch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
class UnquantizedSparseMoELayer(nn.Module):
|
class UnquantizedSparseMoELayer(nn.Module):
|
||||||
@ -39,6 +40,21 @@ class UnquantizedSparseMoELayer(nn.Module):
|
|||||||
self.weight_block_size = weights.weights_loader.weight_block_size
|
self.weight_block_size = weights.weights_loader.weight_block_size
|
||||||
self.scoring_func = scoring_func
|
self.scoring_func = scoring_func
|
||||||
self.e_score_correction_bias = e_score_correction_bias
|
self.e_score_correction_bias = e_score_correction_bias
|
||||||
|
self.rank = weights.process_group.rank()
|
||||||
|
self.world_size = weights.process_group.size()
|
||||||
|
self.use_ep = os.getenv("USE_EXPERT_PARALLEL", "true").lower() == "true"
|
||||||
|
if (n_experts + self.world_size - 1) // self.world_size < 4:
|
||||||
|
self.use_ep = False
|
||||||
|
if self.use_ep:
|
||||||
|
n_experts_per_rank = (n_experts + self.world_size - 1) // self.world_size
|
||||||
|
self.ep_offset = self.rank * n_experts_per_rank
|
||||||
|
n_experts = min(n_experts_per_rank, n_experts - self.ep_offset)
|
||||||
|
experts_min = self.ep_offset
|
||||||
|
experts_max = self.ep_offset + n_experts - 1
|
||||||
|
else:
|
||||||
|
self.ep_offset = 0
|
||||||
|
experts_min = 0
|
||||||
|
experts_max = n_experts - 1
|
||||||
|
|
||||||
self.gate_up_proj = _load_expert_multi_weights_col(
|
self.gate_up_proj = _load_expert_multi_weights_col(
|
||||||
prefix=prefix,
|
prefix=prefix,
|
||||||
@ -46,6 +62,8 @@ class UnquantizedSparseMoELayer(nn.Module):
|
|||||||
gate_proj_name=gate_proj_name,
|
gate_proj_name=gate_proj_name,
|
||||||
up_proj_name=up_proj_name,
|
up_proj_name=up_proj_name,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
use_ep=self.use_ep,
|
||||||
|
ep_offset=self.ep_offset,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.down_proj = _load_expert_weights_row(
|
self.down_proj = _load_expert_weights_row(
|
||||||
@ -53,9 +71,11 @@ class UnquantizedSparseMoELayer(nn.Module):
|
|||||||
n_experts=n_experts,
|
n_experts=n_experts,
|
||||||
name=down_proj_name,
|
name=down_proj_name,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
use_ep=self.use_ep,
|
||||||
|
ep_offset=self.ep_offset,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.MoeOp = VllmMixtureOfExpertsOp(n_experts, 0, n_experts - 1)
|
self.MoeOp = VllmMixtureOfExpertsOp(n_experts, experts_min, experts_max)
|
||||||
for i in range(n_experts):
|
for i in range(n_experts):
|
||||||
self.MoeOp.w13_list[i].set_weight(self.gate_up_proj[i])
|
self.MoeOp.w13_list[i].set_weight(self.gate_up_proj[i])
|
||||||
self.MoeOp.w2_list[i].set_weight(self.down_proj[i])
|
self.MoeOp.w2_list[i].set_weight(self.down_proj[i])
|
||||||
@ -87,12 +107,23 @@ def _load_expert_multi_weights_col(
|
|||||||
gate_proj_name: str,
|
gate_proj_name: str,
|
||||||
up_proj_name: str,
|
up_proj_name: str,
|
||||||
weights: Weights,
|
weights: Weights,
|
||||||
|
use_ep: bool = False,
|
||||||
|
ep_offset: int = 0,
|
||||||
) -> torch.Tensor:
|
) -> torch.Tensor:
|
||||||
all_weight = None
|
all_weight = None
|
||||||
for i in range(n_experts):
|
for i in range(n_experts):
|
||||||
weight = weights.get_multi_weights_col(
|
if not use_ep:
|
||||||
[f"{prefix}.{i}.{gate_proj_name}", f"{prefix}.{i}.{up_proj_name}"], 0
|
weight = weights.get_multi_weights_col(
|
||||||
)
|
[f"{prefix}.{i}.{gate_proj_name}", f"{prefix}.{i}.{up_proj_name}"], 0
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
weight = weights.get_multi_weights(
|
||||||
|
[
|
||||||
|
f"{prefix}.{i+ep_offset}.{gate_proj_name}",
|
||||||
|
f"{prefix}.{i+ep_offset}.{up_proj_name}",
|
||||||
|
],
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
|
||||||
assert isinstance(weight, UnquantizedWeight)
|
assert isinstance(weight, UnquantizedWeight)
|
||||||
|
|
||||||
@ -116,12 +147,19 @@ def _load_expert_weights_row(
|
|||||||
n_experts: int,
|
n_experts: int,
|
||||||
name: str,
|
name: str,
|
||||||
weights: Weights,
|
weights: Weights,
|
||||||
|
use_ep: bool = False,
|
||||||
|
ep_offset: int = 0,
|
||||||
) -> torch.Tensor:
|
) -> torch.Tensor:
|
||||||
all_weight = None
|
all_weight = None
|
||||||
for i in range(n_experts):
|
for i in range(n_experts):
|
||||||
weight = weights.get_weights_row(
|
if not use_ep:
|
||||||
f"{prefix}.{i}.{name}",
|
weight = weights.get_weights_row(
|
||||||
)
|
f"{prefix}.{i}.{name}",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
weight = weights.get_weights(
|
||||||
|
f"{prefix}.{i+ep_offset}.{name}",
|
||||||
|
)
|
||||||
|
|
||||||
assert isinstance(weight, UnquantizedWeight)
|
assert isinstance(weight, UnquantizedWeight)
|
||||||
|
|
||||||
|
@ -36,7 +36,9 @@ class PositionRotaryEmbedding(nn.Module):
|
|||||||
self._sin_k_cached = None
|
self._sin_k_cached = None
|
||||||
self.scaling_factor = scaling_factor
|
self.scaling_factor = scaling_factor
|
||||||
self.dynamic_args = None
|
self.dynamic_args = None
|
||||||
self.max_position_embeddings = max_position_embeddings
|
self._update_cos_sin_cache(
|
||||||
|
torch.float32, inv_freq.device, max_position_embeddings
|
||||||
|
)
|
||||||
|
|
||||||
def forward(
|
def forward(
|
||||||
self,
|
self,
|
||||||
@ -268,9 +270,7 @@ class PositionRotaryEmbedding(nn.Module):
|
|||||||
self._sin_cached = torch.sin(freqs).to(dtype)
|
self._sin_cached = torch.sin(freqs).to(dtype)
|
||||||
|
|
||||||
def get_cos_sin(self, position_ids: torch.Tensor):
|
def get_cos_sin(self, position_ids: torch.Tensor):
|
||||||
self._update_cos_sin_cache(
|
|
||||||
torch.float32, position_ids.device, seqlen=self.max_position_embeddings
|
|
||||||
)
|
|
||||||
cos = torch.index_select(self._cos_cached, 0, position_ids)
|
cos = torch.index_select(self._cos_cached, 0, position_ids)
|
||||||
sin = torch.index_select(self._sin_cached, 0, position_ids)
|
sin = torch.index_select(self._sin_cached, 0, position_ids)
|
||||||
|
|
||||||
@ -298,6 +298,9 @@ class SuRotaryEmbedding(PositionRotaryEmbedding):
|
|||||||
self._cos_k_cached = None
|
self._cos_k_cached = None
|
||||||
self._sin_k_cached = None
|
self._sin_k_cached = None
|
||||||
self.dynamic_args = None
|
self.dynamic_args = None
|
||||||
|
self._update_cos_sin_cache(
|
||||||
|
torch.float32, short_inv_freq.device, max_position_embeddings
|
||||||
|
)
|
||||||
|
|
||||||
def _update_cos_sin_cache(self, dtype, device, seqlen):
|
def _update_cos_sin_cache(self, dtype, device, seqlen):
|
||||||
# Reset the tables if the sequence length has changed,
|
# Reset the tables if the sequence length has changed,
|
||||||
@ -351,6 +354,9 @@ class Phi3LongRoPEScaledRotaryEmbedding(PositionRotaryEmbedding):
|
|||||||
self._cos_k_cached = None
|
self._cos_k_cached = None
|
||||||
self._sin_k_cached = None
|
self._sin_k_cached = None
|
||||||
self.dynamic_args = None
|
self.dynamic_args = None
|
||||||
|
self._update_cos_sin_cache(
|
||||||
|
torch.float32, short_inv_freq.device, max_position_embeddings
|
||||||
|
)
|
||||||
|
|
||||||
def _update_cos_sin_cache(self, dtype, device, seqlen):
|
def _update_cos_sin_cache(self, dtype, device, seqlen):
|
||||||
if (
|
if (
|
||||||
@ -592,9 +598,6 @@ class RotaryPositionEmbeddingMultimodalSections(PositionRotaryEmbedding):
|
|||||||
position_ids: torch.Tensor,
|
position_ids: torch.Tensor,
|
||||||
):
|
):
|
||||||
slen = position_ids.shape[0]
|
slen = position_ids.shape[0]
|
||||||
self._update_cos_sin_cache(
|
|
||||||
torch.float32, position_ids.device, seqlen=self.max_position_embeddings
|
|
||||||
)
|
|
||||||
|
|
||||||
cos = self._cos_cached[position_ids].gather(1, self._sections[:slen])
|
cos = self._cos_cached[position_ids].gather(1, self._sections[:slen])
|
||||||
sin = self._sin_cached[position_ids].gather(1, self._sections[:slen])
|
sin = self._sin_cached[position_ids].gather(1, self._sections[:slen])
|
||||||
|
@ -67,6 +67,10 @@ try:
|
|||||||
from text_generation_server.models.custom_modeling.flash_gemma2_modeling import (
|
from text_generation_server.models.custom_modeling.flash_gemma2_modeling import (
|
||||||
FlashGemma2ForCausalLM,
|
FlashGemma2ForCausalLM,
|
||||||
)
|
)
|
||||||
|
from text_generation_server.models.custom_modeling.flash_gemma3_modeling import (
|
||||||
|
Gemma3ForConditionalGeneration,
|
||||||
|
FlashGemma3ForCausalLM,
|
||||||
|
)
|
||||||
from text_generation_server.models.custom_modeling.flash_dbrx_modeling import (
|
from text_generation_server.models.custom_modeling.flash_dbrx_modeling import (
|
||||||
FlashDbrxForCausalLM,
|
FlashDbrxForCausalLM,
|
||||||
DbrxConfig,
|
DbrxConfig,
|
||||||
@ -220,6 +224,16 @@ class ModelType(enum.Enum):
|
|||||||
"name": "Gemma2",
|
"name": "Gemma2",
|
||||||
"url": "https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315",
|
"url": "https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315",
|
||||||
}
|
}
|
||||||
|
GEMMA3 = {
|
||||||
|
"type": "gemma3",
|
||||||
|
"name": "Gemma3",
|
||||||
|
"url": "https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d",
|
||||||
|
}
|
||||||
|
GEMMA3_TEXT = {
|
||||||
|
"type": "gemma3_text",
|
||||||
|
"name": "Gemma3 Text",
|
||||||
|
"url": "https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d",
|
||||||
|
}
|
||||||
COHERE = {
|
COHERE = {
|
||||||
"type": "cohere",
|
"type": "cohere",
|
||||||
"name": "Cohere",
|
"name": "Cohere",
|
||||||
@ -630,6 +644,7 @@ def get_model(
|
|||||||
quantize=quantize,
|
quantize=quantize,
|
||||||
speculator=speculator,
|
speculator=speculator,
|
||||||
dtype=dtype,
|
dtype=dtype,
|
||||||
|
kv_cache_dtype=kv_cache_dtype,
|
||||||
default_dtype=torch.bfloat16,
|
default_dtype=torch.bfloat16,
|
||||||
trust_remote_code=trust_remote_code,
|
trust_remote_code=trust_remote_code,
|
||||||
lora_adapter_ids=lora_adapter_ids,
|
lora_adapter_ids=lora_adapter_ids,
|
||||||
@ -675,6 +690,34 @@ def get_model(
|
|||||||
trust_remote_code=trust_remote_code,
|
trust_remote_code=trust_remote_code,
|
||||||
lora_adapter_ids=lora_adapter_ids,
|
lora_adapter_ids=lora_adapter_ids,
|
||||||
)
|
)
|
||||||
|
elif model_type == GEMMA3:
|
||||||
|
return FlashVlmCausalLM(
|
||||||
|
model_id=model_id,
|
||||||
|
model_class=Gemma3ForConditionalGeneration,
|
||||||
|
revision=revision,
|
||||||
|
quantize=quantize,
|
||||||
|
speculator=speculator,
|
||||||
|
dtype=dtype,
|
||||||
|
kv_cache_dtype=kv_cache_dtype,
|
||||||
|
default_dtype=torch.bfloat16,
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
lora_adapter_ids=lora_adapter_ids,
|
||||||
|
support_chunking=False,
|
||||||
|
)
|
||||||
|
elif model_type == GEMMA3_TEXT:
|
||||||
|
return FlashCausalLM(
|
||||||
|
model_id=model_id,
|
||||||
|
model_class=FlashGemma3ForCausalLM,
|
||||||
|
revision=revision,
|
||||||
|
quantize=quantize,
|
||||||
|
speculator=speculator,
|
||||||
|
dtype=dtype,
|
||||||
|
kv_cache_dtype=kv_cache_dtype,
|
||||||
|
# Works better for these models
|
||||||
|
default_dtype=torch.bfloat16,
|
||||||
|
trust_remote_code=trust_remote_code,
|
||||||
|
lora_adapter_ids=lora_adapter_ids,
|
||||||
|
)
|
||||||
elif model_type == COHERE:
|
elif model_type == COHERE:
|
||||||
return FlashCausalLM(
|
return FlashCausalLM(
|
||||||
model_id=model_id,
|
model_id=model_id,
|
||||||
@ -864,6 +907,7 @@ def get_model(
|
|||||||
quantize=quantize,
|
quantize=quantize,
|
||||||
speculator=speculator,
|
speculator=speculator,
|
||||||
dtype=dtype,
|
dtype=dtype,
|
||||||
|
kv_cache_dtype=kv_cache_dtype,
|
||||||
default_dtype=torch.bfloat16,
|
default_dtype=torch.bfloat16,
|
||||||
trust_remote_code=trust_remote_code,
|
trust_remote_code=trust_remote_code,
|
||||||
lora_adapter_ids=lora_adapter_ids,
|
lora_adapter_ids=lora_adapter_ids,
|
||||||
|
@ -160,18 +160,14 @@ class FlashCohereAttention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
|
||||||
self.rotary_emb = CohereRotary.static(
|
self.rotary_emb = rotary_emb
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
@ -325,11 +321,14 @@ class CohereMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashCohereLayer(nn.Module):
|
class FlashCohereLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, layer_id, config, weights):
|
def __init__(self, prefix: str, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.layers.{layer_id}"
|
prefix = f"{prefix}.layers.{layer_id}"
|
||||||
self.self_attn = FlashCohereAttention(
|
self.self_attn = FlashCohereAttention(
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = CohereMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
self.mlp = CohereMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
||||||
|
|
||||||
@ -385,6 +384,12 @@ class FlashCohereModel(torch.nn.Module):
|
|||||||
self.embed_tokens = TensorParallelEmbedding(
|
self.embed_tokens = TensorParallelEmbedding(
|
||||||
prefix=f"{prefix}.embed_tokens", weights=weights
|
prefix=f"{prefix}.embed_tokens", weights=weights
|
||||||
)
|
)
|
||||||
|
rotary_emb = CohereRotary.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.hidden_size // config.num_attention_heads,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashCohereLayer(
|
FlashCohereLayer(
|
||||||
@ -392,6 +397,7 @@ class FlashCohereModel(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -263,6 +263,7 @@ class DbrxAttention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.clip_qkv = config.attn_config.clip_qkv
|
self.clip_qkv = config.attn_config.clip_qkv
|
||||||
@ -270,12 +271,7 @@ class DbrxAttention(torch.nn.Module):
|
|||||||
self.hidden_size = config.d_model
|
self.hidden_size = config.d_model
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
self.rotary_emb = rotary_emb
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.attn_config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
@ -370,13 +366,17 @@ class DbrxNormAttentionNorm(nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.norm_1 = FastLayerNorm.load_no_bias(
|
self.norm_1 = FastLayerNorm.load_no_bias(
|
||||||
prefix=f"{prefix}.norm_1", weights=weights, eps=1e-5
|
prefix=f"{prefix}.norm_1", weights=weights, eps=1e-5
|
||||||
)
|
)
|
||||||
self.self_attn = DbrxAttention(
|
self.self_attn = DbrxAttention(
|
||||||
prefix=f"{prefix}.attn", config=config, weights=weights
|
prefix=f"{prefix}.attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.norm_2 = FastLayerNorm.load_no_bias(
|
self.norm_2 = FastLayerNorm.load_no_bias(
|
||||||
prefix=f"{prefix}.norm_2",
|
prefix=f"{prefix}.norm_2",
|
||||||
@ -601,12 +601,15 @@ class DenseMoE(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class DbrxLayer(nn.Module):
|
class DbrxLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, layer_id, config, weights):
|
def __init__(self, prefix: str, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.blocks.{layer_id}"
|
prefix = f"{prefix}.blocks.{layer_id}"
|
||||||
|
|
||||||
self.attn = DbrxNormAttentionNorm(
|
self.attn = DbrxNormAttentionNorm(
|
||||||
prefix=f"{prefix}.norm_attn_norm", config=config, weights=weights
|
prefix=f"{prefix}.norm_attn_norm",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE
|
moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE
|
||||||
@ -649,6 +652,12 @@ class DbrxModel(torch.nn.Module):
|
|||||||
self.embed_tokens = TensorParallelEmbedding(
|
self.embed_tokens = TensorParallelEmbedding(
|
||||||
prefix=f"{prefix}.wte", weights=weights
|
prefix=f"{prefix}.wte", weights=weights
|
||||||
)
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.d_model // config.n_heads,
|
||||||
|
base=config.attn_config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
@ -657,6 +666,7 @@ class DbrxModel(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.n_layers)
|
for layer_id in range(config.n_layers)
|
||||||
]
|
]
|
||||||
|
@ -28,11 +28,12 @@ from text_generation_server.layers import (
|
|||||||
TensorParallelEmbedding,
|
TensorParallelEmbedding,
|
||||||
TensorParallelRowLinear,
|
TensorParallelRowLinear,
|
||||||
get_linear,
|
get_linear,
|
||||||
|
Fp8Linear,
|
||||||
)
|
)
|
||||||
from text_generation_server.layers.attention import (
|
from text_generation_server.layers.attention import (
|
||||||
Seqlen,
|
Seqlen,
|
||||||
attention,
|
attention,
|
||||||
paged_attention,
|
paged_attention_mla,
|
||||||
set_block_mapping,
|
set_block_mapping,
|
||||||
HPUPagedAttentionMetadata,
|
HPUPagedAttentionMetadata,
|
||||||
)
|
)
|
||||||
@ -44,6 +45,18 @@ from text_generation_server.utils.weights import Weights
|
|||||||
import habana_frameworks.torch as htorch
|
import habana_frameworks.torch as htorch
|
||||||
|
|
||||||
|
|
||||||
|
def get_and_maybe_dequant_weights(layer: torch.nn.Module) -> torch.Tensor:
|
||||||
|
if isinstance(layer, Fp8Linear):
|
||||||
|
eye = torch.eye(
|
||||||
|
layer.qweight.shape[-1], dtype=torch.bfloat16, device=layer.qweight.device
|
||||||
|
)
|
||||||
|
dequant_weights = layer(eye)
|
||||||
|
del eye
|
||||||
|
# standardize to (output, input)
|
||||||
|
return dequant_weights.T
|
||||||
|
return layer.weight
|
||||||
|
|
||||||
|
|
||||||
class DeepseekV2Config(PretrainedConfig):
|
class DeepseekV2Config(PretrainedConfig):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@ -156,6 +169,7 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights: Weights,
|
weights: Weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
@ -167,13 +181,7 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
self.head_size = config.qk_nope_head_dim + config.qk_rope_head_dim
|
self.head_size = config.qk_nope_head_dim + config.qk_rope_head_dim
|
||||||
self.value_head_size = config.v_head_dim
|
self.value_head_size = config.v_head_dim
|
||||||
self.head_pad_size = max(self.head_size, self.value_head_size)
|
self.head_pad_size = max(self.head_size, self.value_head_size)
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.qk_rope_head_dim,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
mscale = get_mscale(
|
mscale = get_mscale(
|
||||||
self.rotary_emb.scaling_factor, self.rotary_emb.mscale_all_dim
|
self.rotary_emb.scaling_factor, self.rotary_emb.mscale_all_dim
|
||||||
@ -251,6 +259,45 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
|
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
|
||||||
).repeat_interleave(self.num_groups)
|
).repeat_interleave(self.num_groups)
|
||||||
|
|
||||||
|
kv_b_proj_weight = get_and_maybe_dequant_weights(self.kv_b_proj.linear).T
|
||||||
|
kv_b_proj_weight = kv_b_proj_weight.view(
|
||||||
|
self.kv_lora_rank,
|
||||||
|
self.num_heads,
|
||||||
|
self.qk_nope_head_dim + self.value_head_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
W_UK, W_UV = kv_b_proj_weight.split(
|
||||||
|
[self.qk_nope_head_dim, self.value_head_size], dim=-1
|
||||||
|
)
|
||||||
|
# Convert from (L, N, V) to (N, L, V)
|
||||||
|
self.W_UV = W_UV.transpose(0, 1)
|
||||||
|
# Convert from (L, N, P) to (N, P, L)
|
||||||
|
self.W_UK_T = W_UK.permute(1, 2, 0)
|
||||||
|
|
||||||
|
def _q_proj_and_k_up_proj(self, x):
|
||||||
|
q_proj = self.q_proj if self.q_lora_rank is None else self.q_b_proj
|
||||||
|
q_nope, q_pe = (
|
||||||
|
q_proj(x)
|
||||||
|
.view(-1, self.num_heads, self.head_size)
|
||||||
|
.split([self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert from (B, N, P) to (N, B, P)
|
||||||
|
q_nope = q_nope.transpose(0, 1)
|
||||||
|
# Multiply (N, B, P) x (N, P, L) -> (N, B, L)
|
||||||
|
ql_nope = torch.bmm(q_nope, self.W_UK_T)
|
||||||
|
# Convert from (N, B, L) to (B, N, L)
|
||||||
|
return ql_nope.transpose(0, 1), q_pe
|
||||||
|
|
||||||
|
def _v_up_proj_and_o_proj(self, x):
|
||||||
|
# Convert from (B, N, L) to (N, B, L)
|
||||||
|
x = x.view(-1, self.num_heads, self.kv_lora_rank).transpose(0, 1)
|
||||||
|
# Multiply (N, B, L) x (N, L, V) -> (N, B, V)
|
||||||
|
x = torch.bmm(x, self.W_UV)
|
||||||
|
# Convert from (N, B, V) to (B, N * V)
|
||||||
|
x = x.transpose(0, 1).reshape(-1, self.num_heads * self.value_head_size)
|
||||||
|
return self.o_proj(x)
|
||||||
|
|
||||||
def forward(
|
def forward(
|
||||||
self,
|
self,
|
||||||
hidden_states: torch.Tensor,
|
hidden_states: torch.Tensor,
|
||||||
@ -263,14 +310,9 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
||||||
):
|
):
|
||||||
if self.q_lora_rank is None:
|
if self.q_lora_rank is None:
|
||||||
query = self.q_proj(hidden_states)
|
hidden_states_or_q_c = hidden_states
|
||||||
else:
|
else:
|
||||||
query = self.q_b_proj(self.q_a_layernorm(self.q_a_proj(hidden_states))[0])
|
hidden_states_or_q_c = self.q_a_layernorm(self.q_a_proj(hidden_states))[0]
|
||||||
query = query.view(-1, self.num_heads, self.head_size)
|
|
||||||
|
|
||||||
_, query_pe = torch.split(
|
|
||||||
query, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
|
|
||||||
)
|
|
||||||
|
|
||||||
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
|
compressed_kv = self.kv_a_proj_with_mqa(hidden_states)
|
||||||
compressed_kv, key_pe = torch.split(
|
compressed_kv, key_pe = torch.split(
|
||||||
@ -278,13 +320,18 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
)
|
)
|
||||||
|
|
||||||
key_pe = key_pe.view(-1, 1, self.qk_rope_head_dim)
|
key_pe = key_pe.view(-1, 1, self.qk_rope_head_dim)
|
||||||
kv = self.kv_b_proj(self.kv_a_layernorm(compressed_kv.contiguous())[0]).view(
|
kv_c_normed = self.kv_a_layernorm(compressed_kv.contiguous())[0]
|
||||||
-1, self.num_key_value_heads, self.qk_nope_head_dim + self.value_head_size
|
|
||||||
)
|
|
||||||
|
|
||||||
key_nope, value = torch.split(
|
# Prefill
|
||||||
kv, [self.qk_nope_head_dim, self.value_head_size], dim=-1
|
if cu_seqlen_prefill is not None:
|
||||||
)
|
q_proj = self.q_proj if self.q_lora_rank is None else self.q_b_proj
|
||||||
|
query = q_proj(hidden_states_or_q_c)
|
||||||
|
query = query.view(-1, self.num_heads, self.head_size)
|
||||||
|
query_nope, query_pe = torch.split(
|
||||||
|
query, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
query_nope, query_pe = self._q_proj_and_k_up_proj(hidden_states_or_q_c)
|
||||||
|
|
||||||
batch_size, heads, head_dim = query_pe.shape
|
batch_size, heads, head_dim = query_pe.shape
|
||||||
query_pe = (
|
query_pe = (
|
||||||
@ -299,33 +346,47 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
.reshape(batch_size, heads, head_dim)
|
.reshape(batch_size, heads, head_dim)
|
||||||
)
|
)
|
||||||
self.rotary_emb(query_pe, key_pe, cos, sin)
|
self.rotary_emb(query_pe, key_pe, cos, sin)
|
||||||
|
latent_vec_k = torch.concat(
|
||||||
|
(kv_c_normed, key_pe.view(-1, self.qk_rope_head_dim)), dim=-1
|
||||||
|
)
|
||||||
|
latent_vec_k = latent_vec_k.view(-1, self.qk_rope_head_dim + self.kv_lora_rank)
|
||||||
|
|
||||||
query[..., self.qk_nope_head_dim :] = query_pe
|
latent_vec_k = latent_vec_k.unflatten(0, (slots.size(0), -1))
|
||||||
key = torch.empty_like(query)
|
|
||||||
key[..., : self.qk_nope_head_dim] = key_nope
|
|
||||||
key[..., self.qk_nope_head_dim :] = key_pe
|
|
||||||
|
|
||||||
# We need to pad the heads because Flash Attention does not support
|
|
||||||
# qk and v with different head sizes.
|
|
||||||
query = torch.nn.functional.pad(
|
|
||||||
query, (0, self.head_pad_size - self.head_size), value=0
|
|
||||||
)
|
|
||||||
key = torch.nn.functional.pad(
|
|
||||||
key, (0, self.head_pad_size - self.head_size), value=0
|
|
||||||
)
|
|
||||||
value = torch.nn.functional.pad(
|
|
||||||
value, (0, self.head_pad_size - self.value_head_size), value=0
|
|
||||||
)
|
|
||||||
|
|
||||||
kv_cache.store(
|
kv_cache.store(
|
||||||
key=key,
|
key=latent_vec_k,
|
||||||
value=value,
|
value=None,
|
||||||
slots=slots,
|
slots=slots,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Prefill
|
|
||||||
if cu_seqlen_prefill is not None:
|
if cu_seqlen_prefill is not None:
|
||||||
|
kv = self.kv_b_proj(kv_c_normed).view(
|
||||||
|
-1,
|
||||||
|
self.num_key_value_heads,
|
||||||
|
self.qk_nope_head_dim + self.value_head_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
key_nope, value = torch.split(
|
||||||
|
kv, [self.qk_nope_head_dim, self.value_head_size], dim=-1
|
||||||
|
)
|
||||||
|
query[..., self.qk_nope_head_dim :] = query_pe
|
||||||
|
key = torch.empty_like(query)
|
||||||
|
key[..., : self.qk_nope_head_dim] = key_nope
|
||||||
|
key[..., self.qk_nope_head_dim :] = key_pe
|
||||||
|
|
||||||
|
# We need to pad the heads because Flash Attention does not support
|
||||||
|
# qk and v with different head sizes.
|
||||||
|
query = torch.nn.functional.pad(
|
||||||
|
query, (0, self.head_pad_size - self.head_size), value=0
|
||||||
|
)
|
||||||
|
key = torch.nn.functional.pad(
|
||||||
|
key, (0, self.head_pad_size - self.head_size), value=0
|
||||||
|
)
|
||||||
|
value = torch.nn.functional.pad(
|
||||||
|
value, (0, self.head_pad_size - self.value_head_size), value=0
|
||||||
|
)
|
||||||
|
|
||||||
# flash attention
|
# flash attention
|
||||||
attn_output = attention(
|
attn_output = attention(
|
||||||
query=query,
|
query=query,
|
||||||
@ -336,9 +397,15 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
seqlen=seqlen,
|
seqlen=seqlen,
|
||||||
softmax_scale=self.softmax_scale,
|
softmax_scale=self.softmax_scale,
|
||||||
)
|
)
|
||||||
# Decode
|
attn_output = attn_output[..., : self.value_head_size]
|
||||||
|
|
||||||
|
return self.o_proj(
|
||||||
|
attn_output.reshape(-1, self.num_heads * self.value_head_size)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
attn_output = paged_attention(
|
# Decode
|
||||||
|
query = torch.cat([query_nope, query_pe], dim=-1)
|
||||||
|
attn_output = paged_attention_mla(
|
||||||
query,
|
query,
|
||||||
kv_cache,
|
kv_cache,
|
||||||
self.kv_head_mapping,
|
self.kv_head_mapping,
|
||||||
@ -346,14 +413,10 @@ class DeepseekV2Attention(torch.nn.Module):
|
|||||||
seqlen,
|
seqlen,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
kv_lora_rank=self.kv_lora_rank,
|
||||||
)
|
)
|
||||||
|
attn_output = self._v_up_proj_and_o_proj(attn_output)
|
||||||
# Remove padding.
|
return attn_output
|
||||||
attn_output = attn_output[..., : self.value_head_size]
|
|
||||||
|
|
||||||
return self.o_proj(
|
|
||||||
attn_output.reshape(-1, self.num_heads * self.value_head_size)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class DeepseekV2MLP(nn.Module):
|
class DeepseekV2MLP(nn.Module):
|
||||||
@ -459,7 +522,7 @@ class DeepseekV2MoE(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class DeepseekV2Layer(nn.Module):
|
class DeepseekV2Layer(nn.Module):
|
||||||
def __init__(self, prefix, layer_id, config, weights):
|
def __init__(self, prefix, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.layers.{layer_id}"
|
prefix = f"{prefix}.layers.{layer_id}"
|
||||||
|
|
||||||
@ -467,6 +530,7 @@ class DeepseekV2Layer(nn.Module):
|
|||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
@ -541,6 +605,12 @@ class DeepseekV2Model(torch.nn.Module):
|
|||||||
prefix=f"{prefix}.embed_tokens", weights=weights
|
prefix=f"{prefix}.embed_tokens", weights=weights
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.qk_rope_head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
DeepseekV2Layer(
|
DeepseekV2Layer(
|
||||||
@ -548,6 +618,7 @@ class DeepseekV2Model(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -169,6 +169,7 @@ class DeepseekV3Attention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights: Weights,
|
weights: Weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
@ -180,13 +181,7 @@ class DeepseekV3Attention(torch.nn.Module):
|
|||||||
self.head_size = config.qk_nope_head_dim + config.qk_rope_head_dim
|
self.head_size = config.qk_nope_head_dim + config.qk_rope_head_dim
|
||||||
self.value_head_size = config.v_head_dim
|
self.value_head_size = config.v_head_dim
|
||||||
self.head_pad_size = max(self.head_size, self.value_head_size)
|
self.head_pad_size = max(self.head_size, self.value_head_size)
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.qk_rope_head_dim,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
mscale = get_mscale(
|
mscale = get_mscale(
|
||||||
self.rotary_emb.scaling_factor, self.rotary_emb.mscale_all_dim
|
self.rotary_emb.scaling_factor, self.rotary_emb.mscale_all_dim
|
||||||
@ -535,7 +530,7 @@ class DeepseekV3MoE(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class DeepseekV3Layer(nn.Module):
|
class DeepseekV3Layer(nn.Module):
|
||||||
def __init__(self, prefix, layer_id, config, weights):
|
def __init__(self, prefix, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.layers.{layer_id}"
|
prefix = f"{prefix}.layers.{layer_id}"
|
||||||
|
|
||||||
@ -543,6 +538,7 @@ class DeepseekV3Layer(nn.Module):
|
|||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
if (
|
if (
|
||||||
@ -616,6 +612,12 @@ class DeepseekV3Model(torch.nn.Module):
|
|||||||
self.embed_tokens = TensorParallelEmbedding(
|
self.embed_tokens = TensorParallelEmbedding(
|
||||||
prefix=f"{prefix}.embed_tokens", weights=weights
|
prefix=f"{prefix}.embed_tokens", weights=weights
|
||||||
)
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.qk_rope_head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
@ -624,6 +626,7 @@ class DeepseekV3Model(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -166,7 +166,14 @@ def _load_gqa(config, prefix: str, weights):
|
|||||||
|
|
||||||
class FlashGemma2Attention(torch.nn.Module):
|
class FlashGemma2Attention(torch.nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, prefix: str, config, weights, layer_id, causal: bool, is_sliding: bool
|
self,
|
||||||
|
prefix: str,
|
||||||
|
config,
|
||||||
|
weights,
|
||||||
|
layer_id,
|
||||||
|
causal: bool,
|
||||||
|
is_sliding: bool,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
@ -176,13 +183,7 @@ class FlashGemma2Attention(torch.nn.Module):
|
|||||||
self.window_size = config.sliding_window
|
self.window_size = config.sliding_window
|
||||||
else:
|
else:
|
||||||
self.window_size = -1
|
self.window_size = -1
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
# self.softmax_scale = self.head_size**-0.5
|
# self.softmax_scale = self.head_size**-0.5
|
||||||
self.softmax_scale = config.query_pre_attn_scalar**-0.5
|
self.softmax_scale = config.query_pre_attn_scalar**-0.5
|
||||||
@ -287,6 +288,7 @@ class FlashGemma2Attention(torch.nn.Module):
|
|||||||
softcap=self.softcap,
|
softcap=self.softcap,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.window_size,
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.o_proj(
|
return self.o_proj(
|
||||||
@ -354,7 +356,14 @@ class Gemma2MLP(nn.Module):
|
|||||||
|
|
||||||
class FlashGemma2Layer(nn.Module):
|
class FlashGemma2Layer(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, prefix: str, config, weights, layer_id, causal: bool, is_sliding: bool
|
self,
|
||||||
|
prefix: str,
|
||||||
|
config,
|
||||||
|
weights,
|
||||||
|
layer_id,
|
||||||
|
causal: bool,
|
||||||
|
is_sliding: bool,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.self_attn = FlashGemma2Attention(
|
self.self_attn = FlashGemma2Attention(
|
||||||
@ -364,6 +373,7 @@ class FlashGemma2Layer(nn.Module):
|
|||||||
layer_id=layer_id,
|
layer_id=layer_id,
|
||||||
causal=causal,
|
causal=causal,
|
||||||
is_sliding=is_sliding,
|
is_sliding=is_sliding,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = Gemma2MLP(
|
self.mlp = Gemma2MLP(
|
||||||
prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id
|
prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id
|
||||||
@ -435,6 +445,13 @@ class FlashGemma2Model(torch.nn.Module):
|
|||||||
process_group = weights.process_group
|
process_group = weights.process_group
|
||||||
self.tp_rank = process_group.rank()
|
self.tp_rank = process_group.rank()
|
||||||
self.tp_world_size = process_group.size()
|
self.tp_world_size = process_group.size()
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashGemma2Layer(
|
FlashGemma2Layer(
|
||||||
@ -444,6 +461,7 @@ class FlashGemma2Model(torch.nn.Module):
|
|||||||
layer_id=layer_id,
|
layer_id=layer_id,
|
||||||
causal=causal,
|
causal=causal,
|
||||||
is_sliding=layer_id % 2 == 0,
|
is_sliding=layer_id % 2 == 0,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -0,0 +1,755 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.distributed
|
||||||
|
from torch import nn
|
||||||
|
from typing import Optional, List, Tuple
|
||||||
|
import copy
|
||||||
|
|
||||||
|
from text_generation_server.layers import (
|
||||||
|
TensorParallelColumnLinear,
|
||||||
|
TensorParallelEmbedding,
|
||||||
|
TensorParallelRowLinear,
|
||||||
|
get_linear,
|
||||||
|
#
|
||||||
|
SpeculativeHead,
|
||||||
|
TensorParallelMultiAdapterLinear,
|
||||||
|
TensorParallelAdapterRowLinear,
|
||||||
|
)
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
from text_generation_server.models.custom_modeling.vlm import (
|
||||||
|
load_text_model,
|
||||||
|
load_vision_model,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
from text_generation_server.layers.attention.kv_cache import get_kv_scales
|
||||||
|
from text_generation_server.layers.rotary import PositionRotaryEmbedding
|
||||||
|
from text_generation_server.layers.layernorm import (
|
||||||
|
FastRMSNorm,
|
||||||
|
)
|
||||||
|
from text_generation_server.utils.weights import UnquantizedWeight
|
||||||
|
from transformers.activations import ACT2FN
|
||||||
|
from text_generation_server.layers.attention import (
|
||||||
|
paged_attention,
|
||||||
|
attention,
|
||||||
|
Seqlen,
|
||||||
|
set_block_mapping,
|
||||||
|
HPUPagedAttentionMetadata,
|
||||||
|
)
|
||||||
|
import habana_frameworks.torch as htorch
|
||||||
|
|
||||||
|
ATTENTION_TYPE_GLOBAL = "global"
|
||||||
|
ATTENTION_TYPE_LOCAL = "local_sliding"
|
||||||
|
|
||||||
|
|
||||||
|
class Gemma3FastRMSNorm(FastRMSNorm):
|
||||||
|
@classmethod
|
||||||
|
def load(cls, prefix: str, weights, eps=1e-6):
|
||||||
|
dtype = weights.dtype
|
||||||
|
weights.dtype = torch.float32
|
||||||
|
weight = weights.get_tensor(f"{prefix}.weight") + 1
|
||||||
|
weights.dtype = dtype
|
||||||
|
new = cls(weight, eps)
|
||||||
|
new.dtype = dtype
|
||||||
|
return new
|
||||||
|
|
||||||
|
# perform the multiplication in full precision and downcast after
|
||||||
|
def forward(self, hidden_states, residual=None):
|
||||||
|
if residual is not None:
|
||||||
|
hidden_states += residual
|
||||||
|
residual = hidden_states
|
||||||
|
hidden_states = hidden_states.to(torch.float32)
|
||||||
|
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
||||||
|
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
||||||
|
hidden_states = hidden_states * self.weight
|
||||||
|
return hidden_states.to(self.dtype), residual
|
||||||
|
|
||||||
|
|
||||||
|
def load_attention(config, prefix: str, weights):
|
||||||
|
if config.num_attention_heads != config.num_key_value_heads:
|
||||||
|
return _load_gqa(config, prefix, weights)
|
||||||
|
else:
|
||||||
|
return TensorParallelColumnLinear.load_multi(
|
||||||
|
config,
|
||||||
|
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
|
||||||
|
dim=0,
|
||||||
|
weights=weights,
|
||||||
|
bias=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _load_gqa(config, prefix: str, weights):
|
||||||
|
assert config.num_attention_heads % weights.process_group.size() == 0
|
||||||
|
|
||||||
|
weight = weights.get_multi_weights_col(
|
||||||
|
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
|
||||||
|
dim=0,
|
||||||
|
)
|
||||||
|
|
||||||
|
if isinstance(weight, UnquantizedWeight):
|
||||||
|
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
|
||||||
|
|
||||||
|
head_size = config.head_dim
|
||||||
|
num_heads = config.num_attention_heads // weights.process_group.size()
|
||||||
|
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
|
||||||
|
assert list(weight.weight.shape) == [
|
||||||
|
(num_heads + 2 * num_key_value_heads) * head_size,
|
||||||
|
config.hidden_size,
|
||||||
|
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
|
||||||
|
|
||||||
|
return TensorParallelColumnLinear(get_linear(weight, bias=None))
|
||||||
|
|
||||||
|
|
||||||
|
class FlashGemma3Attention(torch.nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
prefix: str,
|
||||||
|
config,
|
||||||
|
weights,
|
||||||
|
layer_id,
|
||||||
|
causal: bool,
|
||||||
|
is_sliding: bool,
|
||||||
|
local_rotary_emb,
|
||||||
|
global_rotary_emb,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.num_heads = config.num_attention_heads
|
||||||
|
self.head_size = config.head_dim
|
||||||
|
self.causal = causal
|
||||||
|
if is_sliding:
|
||||||
|
self.window_size = config.sliding_window
|
||||||
|
self.rotary_emb = local_rotary_emb
|
||||||
|
else:
|
||||||
|
self.window_size = -1
|
||||||
|
self.rotary_emb = global_rotary_emb
|
||||||
|
|
||||||
|
self.softmax_scale = (
|
||||||
|
config.query_pre_attn_scalar**-0.5
|
||||||
|
if config.query_pre_attn_scalar is not None
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
if self.num_heads % weights.process_group.size() != 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
|
||||||
|
f"and `num_shards`: {weights.process_group.size()}"
|
||||||
|
)
|
||||||
|
self.num_heads = self.num_heads // weights.process_group.size()
|
||||||
|
self.num_key_value_heads = (
|
||||||
|
config.num_key_value_heads // weights.process_group.size()
|
||||||
|
)
|
||||||
|
self.softcap = None # config.attn_logit_softcapping
|
||||||
|
|
||||||
|
query_key_value = load_attention(config, prefix, weights)
|
||||||
|
self.query_key_value = TensorParallelMultiAdapterLinear.load(
|
||||||
|
query_key_value,
|
||||||
|
layer_id,
|
||||||
|
["q_proj", "k_proj", "v_proj"],
|
||||||
|
sizes=[
|
||||||
|
self.head_size * config.num_attention_heads,
|
||||||
|
self.head_size * config.num_key_value_heads,
|
||||||
|
self.head_size * config.num_key_value_heads,
|
||||||
|
],
|
||||||
|
process_group=weights.process_group,
|
||||||
|
)
|
||||||
|
self.kv_scales = get_kv_scales(weights, f"{prefix}")
|
||||||
|
|
||||||
|
o_proj = TensorParallelRowLinear.load(
|
||||||
|
config,
|
||||||
|
prefix=f"{prefix}.o_proj",
|
||||||
|
weights=weights,
|
||||||
|
bias=False,
|
||||||
|
)
|
||||||
|
self.o_proj = TensorParallelAdapterRowLinear.load(
|
||||||
|
o_proj,
|
||||||
|
layer_id,
|
||||||
|
"o_proj",
|
||||||
|
process_group=weights.process_group,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.num_groups = self.num_heads // self.num_key_value_heads
|
||||||
|
self.kv_head_mapping = torch.arange(
|
||||||
|
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
|
||||||
|
).repeat_interleave(self.num_groups)
|
||||||
|
self.q_norm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.q_norm", weights=weights, eps=config.rms_norm_eps
|
||||||
|
)
|
||||||
|
self.k_norm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.k_norm", weights=weights, eps=config.rms_norm_eps
|
||||||
|
)
|
||||||
|
self.enable_gqa = self.num_heads != self.num_key_value_heads
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
hidden_states,
|
||||||
|
cos,
|
||||||
|
sin,
|
||||||
|
cu_seqlen_prefill,
|
||||||
|
kv_cache,
|
||||||
|
slots,
|
||||||
|
seqlen,
|
||||||
|
adapter_data,
|
||||||
|
hpu_attention_meta,
|
||||||
|
):
|
||||||
|
|
||||||
|
qkv = self.query_key_value(hidden_states, adapter_data)
|
||||||
|
query, kv = qkv.split(
|
||||||
|
[
|
||||||
|
self.head_size * self.num_heads,
|
||||||
|
2 * self.head_size * self.num_key_value_heads,
|
||||||
|
],
|
||||||
|
dim=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
kv = kv.view(-1, 2, self.num_key_value_heads * self.head_size)
|
||||||
|
key = kv[:, 0]
|
||||||
|
value = kv[:, 1]
|
||||||
|
|
||||||
|
query = query.reshape(-1, self.head_size)
|
||||||
|
key = key.reshape(-1, self.head_size)
|
||||||
|
|
||||||
|
query, _ = self.q_norm(query.contiguous())
|
||||||
|
key, _ = self.k_norm(key.contiguous())
|
||||||
|
|
||||||
|
query = query.view(-1, self.num_heads, self.head_size)
|
||||||
|
key = key.view(-1, self.num_key_value_heads, self.head_size)
|
||||||
|
value = value.view(-1, self.num_key_value_heads, self.head_size)
|
||||||
|
|
||||||
|
self.rotary_emb(query, key, cos, sin)
|
||||||
|
|
||||||
|
kv_cache.store(
|
||||||
|
key=key,
|
||||||
|
value=value,
|
||||||
|
slots=slots,
|
||||||
|
kv_scales=self.kv_scales,
|
||||||
|
)
|
||||||
|
# Prefill
|
||||||
|
if cu_seqlen_prefill is not None:
|
||||||
|
# sdpa
|
||||||
|
attn_output = attention(
|
||||||
|
query=query,
|
||||||
|
key=key,
|
||||||
|
value=value,
|
||||||
|
kv_cache=kv_cache,
|
||||||
|
kv_scales=self.kv_scales,
|
||||||
|
seqlen=seqlen,
|
||||||
|
softmax_scale=self.softmax_scale,
|
||||||
|
window_size_left=self.window_size,
|
||||||
|
softcap=self.softcap,
|
||||||
|
)
|
||||||
|
# Decode
|
||||||
|
else:
|
||||||
|
attn_output = paged_attention(
|
||||||
|
query,
|
||||||
|
kv_cache,
|
||||||
|
self.kv_head_mapping,
|
||||||
|
self.softmax_scale,
|
||||||
|
seqlen,
|
||||||
|
softcap=self.softcap,
|
||||||
|
kv_scales=self.kv_scales,
|
||||||
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.window_size,
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.o_proj(
|
||||||
|
attn_output.view(-1, self.num_heads * self.head_size), adapter_data
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Gemma3MLP(nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights, layer_id):
|
||||||
|
super().__init__()
|
||||||
|
act = config.hidden_activation
|
||||||
|
self.act = (
|
||||||
|
ACT2FN[act]
|
||||||
|
if "gelu" not in act
|
||||||
|
else lambda x: torch.nn.functional.gelu(
|
||||||
|
x,
|
||||||
|
approximate=(
|
||||||
|
"tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none"
|
||||||
|
),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# Fuse gate and up proj
|
||||||
|
gate_up_proj = TensorParallelColumnLinear.load_multi(
|
||||||
|
config,
|
||||||
|
prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"],
|
||||||
|
weights=weights,
|
||||||
|
dim=0,
|
||||||
|
bias=False,
|
||||||
|
)
|
||||||
|
self.gate_up_proj = TensorParallelMultiAdapterLinear.load(
|
||||||
|
gate_up_proj,
|
||||||
|
layer_id,
|
||||||
|
["gate_proj", "up_proj"],
|
||||||
|
sizes=[
|
||||||
|
config.intermediate_size,
|
||||||
|
config.intermediate_size,
|
||||||
|
],
|
||||||
|
process_group=weights.process_group,
|
||||||
|
)
|
||||||
|
|
||||||
|
down_proj = TensorParallelRowLinear.load(
|
||||||
|
config,
|
||||||
|
prefix=f"{prefix}.down_proj",
|
||||||
|
weights=weights,
|
||||||
|
bias=False,
|
||||||
|
)
|
||||||
|
self.down_proj = TensorParallelAdapterRowLinear.load(
|
||||||
|
down_proj,
|
||||||
|
layer_id,
|
||||||
|
"down_proj",
|
||||||
|
process_group=weights.process_group,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.intermediate_size = (
|
||||||
|
config.intermediate_size // weights.process_group.size()
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, hidden_states, adapter_data):
|
||||||
|
gate_up_states = self.gate_up_proj(hidden_states, adapter_data)
|
||||||
|
gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size)
|
||||||
|
return self.down_proj(
|
||||||
|
self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FlashGemma3Layer(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
prefix: str,
|
||||||
|
config,
|
||||||
|
weights,
|
||||||
|
layer_id,
|
||||||
|
causal: bool,
|
||||||
|
is_sliding: bool,
|
||||||
|
local_rotary_emb,
|
||||||
|
global_rotary_emb,
|
||||||
|
):
|
||||||
|
super().__init__()
|
||||||
|
self.self_attn = FlashGemma3Attention(
|
||||||
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
layer_id=layer_id,
|
||||||
|
causal=causal,
|
||||||
|
is_sliding=is_sliding,
|
||||||
|
local_rotary_emb=local_rotary_emb,
|
||||||
|
global_rotary_emb=global_rotary_emb,
|
||||||
|
)
|
||||||
|
self.mlp = Gemma3MLP(
|
||||||
|
prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.input_layernorm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
|
||||||
|
)
|
||||||
|
self.post_attention_layernorm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.post_attention_layernorm",
|
||||||
|
weights=weights,
|
||||||
|
eps=config.rms_norm_eps,
|
||||||
|
)
|
||||||
|
self.pre_feedforward_layernorm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.pre_feedforward_layernorm",
|
||||||
|
weights=weights,
|
||||||
|
eps=config.rms_norm_eps,
|
||||||
|
)
|
||||||
|
self.post_feedforward_layernorm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.post_feedforward_layernorm",
|
||||||
|
weights=weights,
|
||||||
|
eps=config.rms_norm_eps,
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
hidden_states,
|
||||||
|
residual,
|
||||||
|
cos,
|
||||||
|
sin,
|
||||||
|
cu_seqlen_prefill,
|
||||||
|
kv_cache,
|
||||||
|
slots,
|
||||||
|
seqlen,
|
||||||
|
adapter_data,
|
||||||
|
hpu_attention_meta,
|
||||||
|
):
|
||||||
|
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
|
||||||
|
|
||||||
|
# Self Attention
|
||||||
|
attn_output = self.self_attn(
|
||||||
|
normed_hidden_states,
|
||||||
|
cos,
|
||||||
|
sin,
|
||||||
|
cu_seqlen_prefill,
|
||||||
|
kv_cache,
|
||||||
|
slots,
|
||||||
|
seqlen,
|
||||||
|
adapter_data,
|
||||||
|
hpu_attention_meta,
|
||||||
|
)
|
||||||
|
|
||||||
|
# faster post attention rms norm
|
||||||
|
normed_attn_res_output, _ = self.post_attention_layernorm(attn_output)
|
||||||
|
normed_attn_res_output = normed_attn_res_output + res
|
||||||
|
res = normed_attn_res_output
|
||||||
|
|
||||||
|
pre_normed, _ = self.pre_feedforward_layernorm(normed_attn_res_output)
|
||||||
|
mlp_output = self.mlp(pre_normed, adapter_data)
|
||||||
|
post_hidden_states, _ = self.post_feedforward_layernorm(mlp_output)
|
||||||
|
|
||||||
|
return post_hidden_states, normed_attn_res_output
|
||||||
|
|
||||||
|
|
||||||
|
class FlashGemma3Model(torch.nn.Module):
|
||||||
|
def __init__(self, prefix: str, config, weights, causal: bool):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
process_group = weights.process_group
|
||||||
|
self.tp_rank = process_group.rank()
|
||||||
|
self.tp_world_size = process_group.size()
|
||||||
|
local_config = copy.deepcopy(config)
|
||||||
|
local_config.rope_scaling = dict(rope_type="default")
|
||||||
|
local_rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=local_config,
|
||||||
|
dim=config.head_dim,
|
||||||
|
base=config.rope_local_base_freq,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
global_rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.layers = nn.ModuleList(
|
||||||
|
[
|
||||||
|
FlashGemma3Layer(
|
||||||
|
prefix=f"{prefix}.layers.{layer_id}",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
layer_id=layer_id,
|
||||||
|
causal=causal,
|
||||||
|
is_sliding=bool((layer_id + 1) % config.sliding_window_pattern),
|
||||||
|
local_rotary_emb=local_rotary_emb,
|
||||||
|
global_rotary_emb=global_rotary_emb,
|
||||||
|
)
|
||||||
|
for layer_id in range(config.num_hidden_layers)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.norm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps
|
||||||
|
)
|
||||||
|
|
||||||
|
self.head_size = self.layers[0].self_attn.head_size
|
||||||
|
self.num_heads = self.layers[0].self_attn.num_heads
|
||||||
|
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
inputs_embeds: torch.Tensor,
|
||||||
|
position_ids: torch.Tensor,
|
||||||
|
cu_seqlen_prefill: Optional[torch.Tensor],
|
||||||
|
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
|
||||||
|
slots: torch.Tensor,
|
||||||
|
seqlen: Seqlen,
|
||||||
|
adapter_data: Optional[torch.Tensor],
|
||||||
|
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
||||||
|
) -> torch.Tensor:
|
||||||
|
if hpu_attention_meta is not None:
|
||||||
|
hpu_attention_meta = set_block_mapping(
|
||||||
|
hpu_attention_meta, inputs_embeds.shape[0]
|
||||||
|
)
|
||||||
|
hidden_states = inputs_embeds
|
||||||
|
|
||||||
|
residual = None
|
||||||
|
lazy_mode = htorch.utils.internal.is_lazy()
|
||||||
|
if lazy_mode:
|
||||||
|
htorch.core.mark_step()
|
||||||
|
|
||||||
|
# Get rotary cos and sin for this forward
|
||||||
|
# Avoid to index in each layer
|
||||||
|
|
||||||
|
residual = None
|
||||||
|
for i, layer in enumerate(self.layers):
|
||||||
|
# Get rotary cos and sin for this forward
|
||||||
|
# Avoid to index in each layer
|
||||||
|
cos, sin = layer.self_attn.rotary_emb.get_cos_sin(position_ids)
|
||||||
|
hidden_states, residual = layer(
|
||||||
|
hidden_states,
|
||||||
|
residual,
|
||||||
|
cos,
|
||||||
|
sin,
|
||||||
|
cu_seqlen_prefill,
|
||||||
|
kv_cache[i],
|
||||||
|
slots,
|
||||||
|
seqlen,
|
||||||
|
adapter_data,
|
||||||
|
hpu_attention_meta,
|
||||||
|
)
|
||||||
|
if lazy_mode:
|
||||||
|
htorch.core.mark_step()
|
||||||
|
|
||||||
|
hidden_states, _ = self.norm(hidden_states, residual)
|
||||||
|
|
||||||
|
return hidden_states
|
||||||
|
|
||||||
|
|
||||||
|
class FlashGemma3ForCausalLM(torch.nn.Module):
|
||||||
|
def __init__(self, prefix: str, config, weights, *, causal: bool = True):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
embed_norm = config.hidden_size**0.5
|
||||||
|
if not prefix:
|
||||||
|
prefix = "model"
|
||||||
|
else:
|
||||||
|
prefix = f"{prefix}.model"
|
||||||
|
|
||||||
|
self.embed_tokens = TensorParallelEmbedding(
|
||||||
|
prefix=f"{prefix}.embed_tokens", weights=weights
|
||||||
|
)
|
||||||
|
self.embed_tokens.weight *= embed_norm
|
||||||
|
|
||||||
|
self.model = FlashGemma3Model(
|
||||||
|
prefix=prefix, config=config, weights=weights, causal=causal
|
||||||
|
)
|
||||||
|
self.lm_head = SpeculativeHead.load(
|
||||||
|
prefix=(
|
||||||
|
f"{prefix}.embed_tokens"
|
||||||
|
if config.tie_word_embeddings
|
||||||
|
else f"{prefix}.lm_head"
|
||||||
|
),
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
)
|
||||||
|
# self.softcap = config.attn_logit_softcapping
|
||||||
|
# assert isinstance(self.softcap, float)
|
||||||
|
self.softcap = None
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
position_ids: torch.Tensor,
|
||||||
|
cu_seqlen_prefill: Optional[torch.Tensor],
|
||||||
|
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
|
||||||
|
slots: torch.Tensor,
|
||||||
|
seqlen: Seqlen,
|
||||||
|
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
||||||
|
lm_head_indices: Optional[torch.Tensor] = None,
|
||||||
|
adapter_data: Optional[torch.Tensor] = None,
|
||||||
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
||||||
|
input_embeds = self.embed_tokens(input_ids)
|
||||||
|
|
||||||
|
hidden_states = self.model(
|
||||||
|
input_embeds,
|
||||||
|
position_ids,
|
||||||
|
cu_seqlen_prefill,
|
||||||
|
kv_cache,
|
||||||
|
slots,
|
||||||
|
seqlen,
|
||||||
|
adapter_data,
|
||||||
|
hpu_attention_meta,
|
||||||
|
)
|
||||||
|
if lm_head_indices is not None:
|
||||||
|
hidden_states = hidden_states[lm_head_indices]
|
||||||
|
logits, speculative_logits = self.lm_head(hidden_states)
|
||||||
|
|
||||||
|
return logits, speculative_logits
|
||||||
|
|
||||||
|
|
||||||
|
class Gemma3MultimodalInputProjection(torch.nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.mm_input_projection_weight = weights.get_tensor(
|
||||||
|
"multi_modal_projector.mm_input_projection_weight"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.mm_soft_emb_norm = Gemma3FastRMSNorm.load(
|
||||||
|
prefix=f"{prefix}.mm_soft_emb_norm",
|
||||||
|
weights=weights,
|
||||||
|
eps=config.vision_config.layer_norm_eps,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.patches_per_image = int(
|
||||||
|
config.vision_config.image_size // config.vision_config.patch_size
|
||||||
|
)
|
||||||
|
self.tokens_per_side = int(config.mm_tokens_per_image**0.5)
|
||||||
|
self.kernel_size = self.patches_per_image // self.tokens_per_side
|
||||||
|
self.avg_pool = nn.AvgPool2d(
|
||||||
|
kernel_size=self.kernel_size, stride=self.kernel_size
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, vision_outputs: torch.Tensor):
|
||||||
|
batch_size, _, seq_length = vision_outputs.shape
|
||||||
|
|
||||||
|
reshaped_vision_outputs = vision_outputs.transpose(1, 2)
|
||||||
|
reshaped_vision_outputs = reshaped_vision_outputs.reshape(
|
||||||
|
batch_size, seq_length, self.patches_per_image, self.patches_per_image
|
||||||
|
)
|
||||||
|
reshaped_vision_outputs = reshaped_vision_outputs.contiguous()
|
||||||
|
|
||||||
|
pooled_vision_outputs = self.avg_pool(reshaped_vision_outputs)
|
||||||
|
pooled_vision_outputs = pooled_vision_outputs.flatten(2)
|
||||||
|
pooled_vision_outputs = pooled_vision_outputs.transpose(1, 2)
|
||||||
|
|
||||||
|
normed_vision_outputs, _ = self.mm_soft_emb_norm(pooled_vision_outputs)
|
||||||
|
|
||||||
|
projected_vision_outputs = torch.matmul(
|
||||||
|
normed_vision_outputs, self.mm_input_projection_weight
|
||||||
|
)
|
||||||
|
return projected_vision_outputs.type_as(vision_outputs)
|
||||||
|
|
||||||
|
|
||||||
|
class Gemma3ForConditionalGeneration(nn.Module):
|
||||||
|
def __init__(self, prefix, config, weights):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
if config.vision_config is not None:
|
||||||
|
|
||||||
|
config.vision_config.quantize = config.quantize
|
||||||
|
|
||||||
|
self.post_vision_model_layernorm = nn.LayerNorm.load(
|
||||||
|
prefix="vision_tower.vision_model.post_layernorm",
|
||||||
|
weights=weights,
|
||||||
|
eps=config.vision_config.layer_norm_eps,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.multimodal_projector = Gemma3MultimodalInputProjection(
|
||||||
|
prefix="multi_modal_projector",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
)
|
||||||
|
|
||||||
|
text_config = config.text_config
|
||||||
|
text_config.speculator = config.speculator
|
||||||
|
text_config.quantize = config.quantize
|
||||||
|
|
||||||
|
self.vision_model = load_vision_model(
|
||||||
|
prefix="vision_tower" if not prefix else f"{prefix}.vision_tower",
|
||||||
|
config=config.vision_config,
|
||||||
|
weights=weights,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.text_model = load_text_model(
|
||||||
|
prefix="language_model" if not prefix else f"{prefix}.language_model",
|
||||||
|
config=config.text_config,
|
||||||
|
weights=weights,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
config.text_config.quantize = config.quantize
|
||||||
|
config.text_config.speculator = config.speculator
|
||||||
|
self.text_model = load_text_model(
|
||||||
|
prefix=prefix,
|
||||||
|
config=config.text_config,
|
||||||
|
weights=weights,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.pad_token_id = (
|
||||||
|
config.pad_token_id if config.pad_token_id is not None else -1
|
||||||
|
)
|
||||||
|
self.dtype = weights.dtype
|
||||||
|
|
||||||
|
def get_vision_embeds(
|
||||||
|
self,
|
||||||
|
pixel_values: torch.FloatTensor,
|
||||||
|
pixel_attention_mask: Optional[torch.FloatTensor] = None,
|
||||||
|
image_sizes: Optional[torch.Tensor] = None,
|
||||||
|
image_grid_thw: Optional[torch.LongTensor] = None,
|
||||||
|
):
|
||||||
|
pixel_values = pixel_values.to(dtype=self.dtype)
|
||||||
|
image_outputs = self.vision_model(pixel_values)
|
||||||
|
vision_outputs = self.post_vision_model_layernorm(
|
||||||
|
image_outputs.last_hidden_state
|
||||||
|
)
|
||||||
|
image_features = self.multimodal_projector(vision_outputs)
|
||||||
|
image_features = image_features.view(-1, image_features.shape[-1])
|
||||||
|
return image_features
|
||||||
|
|
||||||
|
def get_inputs_embeds(
|
||||||
|
self,
|
||||||
|
input_ids: torch.Tensor,
|
||||||
|
vision_embeds: torch.Tensor = None,
|
||||||
|
):
|
||||||
|
inputs_embeds = self.text_model.embed_tokens(input_ids)
|
||||||
|
|
||||||
|
if vision_embeds is not None:
|
||||||
|
# Replace the image token embeddings with the vision features
|
||||||
|
image_token_mask = (input_ids == self.config.image_token_index).to(
|
||||||
|
input_ids.device
|
||||||
|
)
|
||||||
|
inputs_embeds[image_token_mask] = vision_embeds.view(
|
||||||
|
-1, vision_embeds.shape[-1]
|
||||||
|
)
|
||||||
|
return inputs_embeds
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
inputs_embeds: torch.Tensor,
|
||||||
|
position_ids: torch.Tensor,
|
||||||
|
cu_seqlen_prefill: Optional[torch.Tensor],
|
||||||
|
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
|
||||||
|
slots: torch.Tensor,
|
||||||
|
seqlen: Seqlen,
|
||||||
|
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
||||||
|
lm_head_indices: Optional[torch.Tensor] = None,
|
||||||
|
attention_mask: Optional[torch.BoolTensor] = None,
|
||||||
|
adapter_data: Optional[torch.Tensor] = None,
|
||||||
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
|
||||||
|
if cu_seqlen_prefill is not None:
|
||||||
|
position_ids += 1
|
||||||
|
|
||||||
|
if attention_mask is not None:
|
||||||
|
min_dtype = torch.finfo(inputs_embeds.dtype).min
|
||||||
|
# prefill may be larger than sliding window
|
||||||
|
effective_seq_len = max(
|
||||||
|
position_ids.shape[0], self.config.text_config.sliding_window
|
||||||
|
)
|
||||||
|
sliding_window_mask = torch.tril(
|
||||||
|
torch.ones_like(attention_mask, dtype=torch.bool),
|
||||||
|
diagonal=-self.config.text_config.sliding_window,
|
||||||
|
)
|
||||||
|
attention_mask_local = torch.where(
|
||||||
|
sliding_window_mask, min_dtype, attention_mask
|
||||||
|
)
|
||||||
|
offset = max(0, position_ids.shape[0] - effective_seq_len)
|
||||||
|
attention_mask_local = attention_mask_local[
|
||||||
|
:, :, :, offset : offset + effective_seq_len
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
attention_mask_local = None
|
||||||
|
|
||||||
|
hidden_states = self.text_model.model(
|
||||||
|
inputs_embeds=inputs_embeds,
|
||||||
|
position_ids=position_ids,
|
||||||
|
cu_seqlen_prefill=cu_seqlen_prefill,
|
||||||
|
kv_cache=kv_cache,
|
||||||
|
slots=slots,
|
||||||
|
seqlen=seqlen,
|
||||||
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
adapter_data=adapter_data,
|
||||||
|
)
|
||||||
|
|
||||||
|
if lm_head_indices is not None:
|
||||||
|
hidden_states = hidden_states[lm_head_indices]
|
||||||
|
logits, speculative_logits = self.text_model.lm_head(hidden_states)
|
||||||
|
|
||||||
|
return logits, speculative_logits
|
@ -163,19 +163,12 @@ def _load_gqa(config, prefix: str, weights):
|
|||||||
|
|
||||||
|
|
||||||
class FlashGemmaAttention(torch.nn.Module):
|
class FlashGemmaAttention(torch.nn.Module):
|
||||||
def __init__(self, prefix: str, config, weights, causal: bool):
|
def __init__(self, prefix: str, config, weights, causal: bool, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.head_size = config.head_dim
|
self.head_size = config.head_dim
|
||||||
self.causal = causal
|
self.causal = causal
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
if self.num_heads % weights.process_group.size() != 0:
|
if self.num_heads % weights.process_group.size() != 0:
|
||||||
@ -300,10 +293,14 @@ class GemmaMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashGemmaLayer(nn.Module):
|
class FlashGemmaLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, config, weights, causal: bool):
|
def __init__(self, prefix: str, config, weights, causal: bool, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.self_attn = FlashGemmaAttention(
|
self.self_attn = FlashGemmaAttention(
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights, causal=causal
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
causal=causal,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = GemmaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
self.mlp = GemmaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
||||||
|
|
||||||
@ -359,6 +356,13 @@ class FlashGemmaModel(torch.nn.Module):
|
|||||||
process_group = weights.process_group
|
process_group = weights.process_group
|
||||||
self.tp_rank = process_group.rank()
|
self.tp_rank = process_group.rank()
|
||||||
self.tp_world_size = process_group.size()
|
self.tp_world_size = process_group.size()
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashGemmaLayer(
|
FlashGemmaLayer(
|
||||||
@ -366,6 +370,7 @@ class FlashGemmaModel(torch.nn.Module):
|
|||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
causal=causal,
|
causal=causal,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -110,6 +110,7 @@ class FlashGPTJAttention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
@ -143,13 +144,7 @@ class FlashGPTJAttention(torch.nn.Module):
|
|||||||
self.kv_head_mapping = torch.arange(
|
self.kv_head_mapping = torch.arange(
|
||||||
0, self.num_heads, dtype=torch.int32, device=weights.device
|
0, self.num_heads, dtype=torch.int32, device=weights.device
|
||||||
)
|
)
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = GPTJRotary.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.rotary_dim,
|
|
||||||
base=10000,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(
|
def forward(
|
||||||
self,
|
self,
|
||||||
@ -244,10 +239,13 @@ class GPTJMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashGPTJLayer(nn.Module):
|
class FlashGPTJLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, config, weights):
|
def __init__(self, prefix: str, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.self_attn = FlashGPTJAttention(
|
self.self_attn = FlashGPTJAttention(
|
||||||
prefix=f"{prefix}.attn", config=config, weights=weights
|
prefix=f"{prefix}.attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = GPTJMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
self.mlp = GPTJMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
||||||
|
|
||||||
@ -291,6 +289,12 @@ class FlashGPTJModel(torch.nn.Module):
|
|||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
self.wte = TensorParallelEmbedding(prefix=f"{prefix}.wte", weights=weights)
|
self.wte = TensorParallelEmbedding(prefix=f"{prefix}.wte", weights=weights)
|
||||||
|
rotary_emb = GPTJRotary.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.rotary_dim,
|
||||||
|
base=10000,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashGPTJLayer(
|
FlashGPTJLayer(
|
||||||
@ -299,6 +303,7 @@ class FlashGPTJModel(torch.nn.Module):
|
|||||||
),
|
),
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -303,7 +303,7 @@ class Llama4TextAttention(FlashLlamaAttention):
|
|||||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||||
|
|
||||||
def __init__(self, prefix, config, weights, layer_idx):
|
def __init__(self, prefix, config, weights, layer_idx):
|
||||||
super().__init__(layer_idx, prefix, config, weights)
|
super().__init__(layer_idx, prefix, config, weights, None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.layer_idx = layer_idx
|
self.layer_idx = layer_idx
|
||||||
self.head_dim = getattr(
|
self.head_dim = getattr(
|
||||||
|
@ -133,25 +133,14 @@ class FlashLlamaAttention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
|
||||||
# Setting defaults for baichuan custom config which doesn't apply them.
|
self.rotary_emb = rotary_emb
|
||||||
config.rope_theta = getattr(config, "rope_theta", 10000)
|
|
||||||
config.num_key_value_heads = getattr(
|
|
||||||
config, "num_key_value_heads", config.num_attention_heads
|
|
||||||
)
|
|
||||||
|
|
||||||
if config.model_type != "llama4_text":
|
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
# `config.attention_multiplier` is used in Granite
|
# `config.attention_multiplier` is used in Granite
|
||||||
self.softmax_scale = getattr(
|
self.softmax_scale = getattr(
|
||||||
@ -376,7 +365,7 @@ class LlamaMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashLlamaLayer(nn.Module):
|
class FlashLlamaLayer(nn.Module):
|
||||||
def __init__(self, index, prefix, config, weights):
|
def __init__(self, index, prefix, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
with no_fp8(weights):
|
with no_fp8(weights):
|
||||||
@ -385,6 +374,7 @@ class FlashLlamaLayer(nn.Module):
|
|||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.model_type == "phimoe":
|
if config.model_type == "phimoe":
|
||||||
@ -480,6 +470,17 @@ class FlashLlamaModel(torch.nn.Module):
|
|||||||
# Skip fp8 quant for first and last layers
|
# Skip fp8 quant for first and last layers
|
||||||
self.layers = nn.ModuleList()
|
self.layers = nn.ModuleList()
|
||||||
self.cross_attention_layers = getattr(config, "cross_attention_layers", [])
|
self.cross_attention_layers = getattr(config, "cross_attention_layers", [])
|
||||||
|
# Setting defaults for baichuan custom config which doesn't apply them.
|
||||||
|
config.rope_theta = getattr(config, "rope_theta", 10000)
|
||||||
|
config.num_key_value_heads = getattr(
|
||||||
|
config, "num_key_value_heads", config.num_attention_heads
|
||||||
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.hidden_size // config.num_attention_heads,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
with no_fp8(weights):
|
with no_fp8(weights):
|
||||||
self.layers.append(
|
self.layers.append(
|
||||||
FlashLlamaLayer(
|
FlashLlamaLayer(
|
||||||
@ -487,6 +488,7 @@ class FlashLlamaModel(torch.nn.Module):
|
|||||||
prefix=f"{prefix}.layers.0",
|
prefix=f"{prefix}.layers.0",
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -512,6 +514,7 @@ class FlashLlamaModel(torch.nn.Module):
|
|||||||
prefix=(f"{prefix}.layers.{layer_id}"),
|
prefix=(f"{prefix}.layers.{layer_id}"),
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -523,6 +526,7 @@ class FlashLlamaModel(torch.nn.Module):
|
|||||||
prefix=(f"{prefix}.layers.{last_layer_id}"),
|
prefix=(f"{prefix}.layers.{last_layer_id}"),
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ class MistralConfig(PretrainedConfig):
|
|||||||
|
|
||||||
|
|
||||||
class MistralAttention(torch.nn.Module):
|
class MistralAttention(torch.nn.Module):
|
||||||
def __init__(self, prefix: str, config, weights, layer_id):
|
def __init__(self, prefix: str, config, weights, layer_id, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.max_past = (
|
self.max_past = (
|
||||||
config.sliding_window if config.sliding_window is not None else -1
|
config.sliding_window if config.sliding_window is not None else -1
|
||||||
@ -117,12 +117,7 @@ class MistralAttention(torch.nn.Module):
|
|||||||
else:
|
else:
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
self.rotary_emb = rotary_emb
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
@ -229,6 +224,7 @@ class MistralAttention(torch.nn.Module):
|
|||||||
seqlen,
|
seqlen,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.max_past,
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.o_proj(
|
return self.o_proj(
|
||||||
@ -300,13 +296,14 @@ class MistralMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class MistralLayer(nn.Module):
|
class MistralLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, config, weights, layer_id):
|
def __init__(self, prefix: str, config, weights, layer_id, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.self_attn = MistralAttention(
|
self.self_attn = MistralAttention(
|
||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_id=layer_id,
|
layer_id=layer_id,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = MistralMLP(
|
self.mlp = MistralMLP(
|
||||||
prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id
|
prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id
|
||||||
@ -366,6 +363,19 @@ class MistralModel(torch.nn.Module):
|
|||||||
process_group = weights.process_group
|
process_group = weights.process_group
|
||||||
self.tp_rank = process_group.rank()
|
self.tp_rank = process_group.rank()
|
||||||
self.tp_world_size = process_group.size()
|
self.tp_world_size = process_group.size()
|
||||||
|
|
||||||
|
if getattr(config, "head_dim", None) is not None:
|
||||||
|
head_dim = config.head_dim
|
||||||
|
else:
|
||||||
|
head_dim = config.hidden_size // config.num_attention_heads
|
||||||
|
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
MistralLayer(
|
MistralLayer(
|
||||||
@ -373,6 +383,7 @@ class MistralModel(torch.nn.Module):
|
|||||||
config=config,
|
config=config,
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_id=layer_id,
|
layer_id=layer_id,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -188,6 +188,7 @@ class MixtralAttention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.max_past = (
|
self.max_past = (
|
||||||
@ -196,13 +197,7 @@ class MixtralAttention(torch.nn.Module):
|
|||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
@ -345,12 +340,15 @@ class MixtralMoE(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class MixtralLayer(nn.Module):
|
class MixtralLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, layer_id, config, weights):
|
def __init__(self, prefix: str, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.layers.{layer_id}"
|
prefix = f"{prefix}.layers.{layer_id}"
|
||||||
|
|
||||||
self.self_attn = MixtralAttention(
|
self.self_attn = MixtralAttention(
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
moe_layer_cls = (
|
moe_layer_cls = (
|
||||||
@ -416,6 +414,12 @@ class MixtralModel(torch.nn.Module):
|
|||||||
weights=weights,
|
weights=weights,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.hidden_size // config.num_attention_heads,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
MixtralLayer(
|
MixtralLayer(
|
||||||
@ -423,6 +427,7 @@ class MixtralModel(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -99,7 +99,7 @@ def load_qkv(config, prefix: str, weights, num_heads, head_size, hidden_size):
|
|||||||
|
|
||||||
|
|
||||||
class FlashNeoxAttention(torch.nn.Module):
|
class FlashNeoxAttention(torch.nn.Module):
|
||||||
def __init__(self, config, prefix, weights):
|
def __init__(self, config, prefix, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
num_heads = config.num_attention_heads
|
num_heads = config.num_attention_heads
|
||||||
hidden_size = config.hidden_size
|
hidden_size = config.hidden_size
|
||||||
@ -116,14 +116,7 @@ class FlashNeoxAttention(torch.nn.Module):
|
|||||||
f"and `num_shards`: {weights.process_group.size()}"
|
f"and `num_shards`: {weights.process_group.size()}"
|
||||||
)
|
)
|
||||||
self.num_heads = self.num_heads // weights.process_group.size()
|
self.num_heads = self.num_heads // weights.process_group.size()
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.rotary_dim,
|
|
||||||
base=config.rotary_emb_base,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size ** (-0.5)
|
self.softmax_scale = self.head_size ** (-0.5)
|
||||||
|
|
||||||
self.query_key_value = load_qkv(
|
self.query_key_value = load_qkv(
|
||||||
@ -231,7 +224,7 @@ class FlashMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashNeoXLayer(nn.Module):
|
class FlashNeoXLayer(nn.Module):
|
||||||
def __init__(self, layer_id, config, weights):
|
def __init__(self, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
layer_norm_eps = config.layer_norm_eps
|
layer_norm_eps = config.layer_norm_eps
|
||||||
@ -248,7 +241,10 @@ class FlashNeoXLayer(nn.Module):
|
|||||||
eps=layer_norm_eps,
|
eps=layer_norm_eps,
|
||||||
)
|
)
|
||||||
self.attention = FlashNeoxAttention(
|
self.attention = FlashNeoxAttention(
|
||||||
config, prefix=f"{prefix}.attention", weights=weights
|
config,
|
||||||
|
prefix=f"{prefix}.attention",
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.mlp = FlashMLP(config, prefix=f"{prefix}.mlp", weights=weights)
|
self.mlp = FlashMLP(config, prefix=f"{prefix}.mlp", weights=weights)
|
||||||
@ -328,9 +324,18 @@ class FlashGPTNeoXModel(FlashGPTNeoXPreTrainedModel):
|
|||||||
prefix=f"{prefix}.embed_in", weights=weights
|
prefix=f"{prefix}.embed_in", weights=weights
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=int(
|
||||||
|
config.rotary_pct * (config.hidden_size // config.num_attention_heads)
|
||||||
|
),
|
||||||
|
base=config.rotary_emb_base,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashNeoXLayer(layer_id, config, weights)
|
FlashNeoXLayer(layer_id, config, weights, rotary_emb)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -113,6 +113,7 @@ class FlashPhiAttention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
@ -121,13 +122,7 @@ class FlashPhiAttention(torch.nn.Module):
|
|||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
self.rotary_dim = int(config.partial_rotary_factor * self.head_size)
|
self.rotary_dim = int(config.partial_rotary_factor * self.head_size)
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.rotary_dim,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.num_heads % weights.process_group.size() != 0:
|
if self.num_heads % weights.process_group.size() != 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@ -259,11 +254,14 @@ class PhiMLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashPhiLayer(nn.Module):
|
class FlashPhiLayer(nn.Module):
|
||||||
def __init__(self, prefix: str, layer_id, config, weights):
|
def __init__(self, prefix: str, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.layers.{layer_id}"
|
prefix = f"{prefix}.layers.{layer_id}"
|
||||||
self.self_attn = FlashPhiAttention(
|
self.self_attn = FlashPhiAttention(
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
self.mlp = PhiMLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
||||||
self.input_layernorm = FastLayerNorm.load(
|
self.input_layernorm = FastLayerNorm.load(
|
||||||
@ -315,6 +313,16 @@ class FlashPhiModel(torch.nn.Module):
|
|||||||
self.embed_tokens = TensorParallelEmbedding(
|
self.embed_tokens = TensorParallelEmbedding(
|
||||||
prefix=f"{prefix}.embed_tokens", weights=weights
|
prefix=f"{prefix}.embed_tokens", weights=weights
|
||||||
)
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=int(
|
||||||
|
config.partial_rotary_factor
|
||||||
|
* (config.hidden_size // config.num_attention_heads)
|
||||||
|
),
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashPhiLayer(
|
FlashPhiLayer(
|
||||||
@ -322,6 +330,7 @@ class FlashPhiModel(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -58,21 +58,18 @@ class Qwen2Attention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.max_past = (
|
self.max_past = (
|
||||||
config.sliding_window if config.sliding_window is not None else -1
|
config.sliding_window
|
||||||
|
if config.use_sliding_window and config.sliding_window is not None
|
||||||
|
else -1
|
||||||
)
|
)
|
||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
@ -155,6 +152,7 @@ class Qwen2Attention(torch.nn.Module):
|
|||||||
seqlen,
|
seqlen,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.max_past,
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
|
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
|
||||||
@ -199,11 +197,14 @@ class Qwen2MLP(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class Qwen2Layer(nn.Module):
|
class Qwen2Layer(nn.Module):
|
||||||
def __init__(self, prefix, layer_id, config, weights):
|
def __init__(self, prefix, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.layers.{layer_id}"
|
prefix = f"{prefix}.layers.{layer_id}"
|
||||||
self.self_attn = Qwen2Attention(
|
self.self_attn = Qwen2Attention(
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = Qwen2MLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
self.mlp = Qwen2MLP(prefix=f"{prefix}.mlp", config=config, weights=weights)
|
||||||
self.input_layernorm = FastRMSNorm.load(
|
self.input_layernorm = FastRMSNorm.load(
|
||||||
@ -258,6 +259,14 @@ class Qwen2Model(torch.nn.Module):
|
|||||||
process_group = weights.process_group
|
process_group = weights.process_group
|
||||||
self.tp_rank = process_group.rank()
|
self.tp_rank = process_group.rank()
|
||||||
self.tp_world_size = process_group.size()
|
self.tp_world_size = process_group.size()
|
||||||
|
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.hidden_size // config.num_attention_heads,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
Qwen2Layer(
|
Qwen2Layer(
|
||||||
@ -265,6 +274,7 @@ class Qwen2Model(torch.nn.Module):
|
|||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -41,7 +41,7 @@ from text_generation_server.layers.rotary import PositionRotaryEmbedding
|
|||||||
class Qwen3Attention(nn.Module):
|
class Qwen3Attention(nn.Module):
|
||||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||||
|
|
||||||
def __init__(self, config, prefix, weights, layer_idx):
|
def __init__(self, config, prefix, weights, layer_idx, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.config = config
|
self.config = config
|
||||||
self.layer_idx = layer_idx
|
self.layer_idx = layer_idx
|
||||||
@ -54,12 +54,7 @@ class Qwen3Attention(nn.Module):
|
|||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.attention_dropout = config.attention_dropout
|
self.attention_dropout = config.attention_dropout
|
||||||
self.softmax_scale = self.head_dim**-0.5
|
self.softmax_scale = self.head_dim**-0.5
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
self.rotary_emb = rotary_emb
|
||||||
config=config,
|
|
||||||
dim=self.head_dim,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.num_heads % weights.process_group.size() != 0:
|
if self.num_heads % weights.process_group.size() != 0:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
@ -172,6 +167,7 @@ class Qwen3Attention(nn.Module):
|
|||||||
seqlen,
|
seqlen,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.max_past,
|
||||||
)
|
)
|
||||||
|
|
||||||
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
||||||
@ -179,7 +175,7 @@ class Qwen3Attention(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class Qwen3DecoderLayer(nn.Module):
|
class Qwen3DecoderLayer(nn.Module):
|
||||||
def __init__(self, config, prefix, weights, layer_idx: int):
|
def __init__(self, config, prefix, weights, layer_idx: int, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.self_attn = Qwen3Attention(
|
self.self_attn = Qwen3Attention(
|
||||||
@ -187,6 +183,7 @@ class Qwen3DecoderLayer(nn.Module):
|
|||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_idx=layer_idx,
|
layer_idx=layer_idx,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.mlp = Qwen2MLP(config=config, prefix=f"{prefix}.mlp", weights=weights)
|
self.mlp = Qwen2MLP(config=config, prefix=f"{prefix}.mlp", weights=weights)
|
||||||
self.input_layernorm = FastRMSNorm.load(
|
self.input_layernorm = FastRMSNorm.load(
|
||||||
@ -241,6 +238,15 @@ class Qwen3Model(nn.Module):
|
|||||||
self.config = config
|
self.config = config
|
||||||
self.padding_idx = config.pad_token_id
|
self.padding_idx = config.pad_token_id
|
||||||
self.vocab_size = config.vocab_size
|
self.vocab_size = config.vocab_size
|
||||||
|
head_dim = getattr(
|
||||||
|
config, "head_dim", config.hidden_size // config.num_attention_heads
|
||||||
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
@ -249,6 +255,7 @@ class Qwen3Model(nn.Module):
|
|||||||
prefix=f"{prefix}.layers.{layer_idx}",
|
prefix=f"{prefix}.layers.{layer_idx}",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_idx=layer_idx,
|
layer_idx=layer_idx,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_idx in range(config.num_hidden_layers)
|
for layer_idx in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -21,6 +21,7 @@ import torch.nn.functional as F
|
|||||||
from text_generation_server.layers.attention import (
|
from text_generation_server.layers.attention import (
|
||||||
attention,
|
attention,
|
||||||
paged_attention,
|
paged_attention,
|
||||||
|
set_block_mapping,
|
||||||
Seqlen,
|
Seqlen,
|
||||||
HPUPagedAttentionMetadata,
|
HPUPagedAttentionMetadata,
|
||||||
)
|
)
|
||||||
@ -80,7 +81,7 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
|||||||
class Qwen3MoeAttention(nn.Module):
|
class Qwen3MoeAttention(nn.Module):
|
||||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||||
|
|
||||||
def __init__(self, config, prefix, weights, layer_idx):
|
def __init__(self, config, prefix, weights, layer_idx, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.config = config
|
self.config = config
|
||||||
self.layer_idx = layer_idx
|
self.layer_idx = layer_idx
|
||||||
@ -108,13 +109,7 @@ class Qwen3MoeAttention(nn.Module):
|
|||||||
self.o_proj = FastLinear.load(
|
self.o_proj = FastLinear.load(
|
||||||
config, f"{prefix}.o_proj", weights, bias=config.attention_bias
|
config, f"{prefix}.o_proj", weights, bias=config.attention_bias
|
||||||
)
|
)
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_dim,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.q_norm = FastRMSNorm.load(
|
self.q_norm = FastRMSNorm.load(
|
||||||
prefix=f"{prefix}.q_norm",
|
prefix=f"{prefix}.q_norm",
|
||||||
@ -196,6 +191,7 @@ class Qwen3MoeAttention(nn.Module):
|
|||||||
seqlen,
|
seqlen,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.max_past,
|
||||||
)
|
)
|
||||||
|
|
||||||
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
||||||
@ -345,7 +341,7 @@ class Qwen3MoeSparseMoeBlock(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class Qwen3MoeDecoderLayer(nn.Module):
|
class Qwen3MoeDecoderLayer(nn.Module):
|
||||||
def __init__(self, config, prefix, weights, layer_idx: int):
|
def __init__(self, config, prefix, weights, layer_idx: int, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
|
|
||||||
@ -355,6 +351,7 @@ class Qwen3MoeDecoderLayer(nn.Module):
|
|||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_idx=layer_idx,
|
layer_idx=layer_idx,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.self_attn = Qwen3MoeAttention(
|
self.self_attn = Qwen3MoeAttention(
|
||||||
@ -362,6 +359,7 @@ class Qwen3MoeDecoderLayer(nn.Module):
|
|||||||
prefix=f"{prefix}.self_attn",
|
prefix=f"{prefix}.self_attn",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_idx=layer_idx,
|
layer_idx=layer_idx,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
moe_layer_cls = (
|
moe_layer_cls = (
|
||||||
@ -433,6 +431,15 @@ class Qwen3MoeModel(nn.Module):
|
|||||||
self.config = config
|
self.config = config
|
||||||
self.padding_idx = config.pad_token_id
|
self.padding_idx = config.pad_token_id
|
||||||
self.vocab_size = config.vocab_size
|
self.vocab_size = config.vocab_size
|
||||||
|
head_dim = getattr(
|
||||||
|
config, "head_dim", config.hidden_size // config.num_attention_heads
|
||||||
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=head_dim,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
@ -441,6 +448,7 @@ class Qwen3MoeModel(nn.Module):
|
|||||||
prefix=f"{prefix}.layers.{layer_idx}",
|
prefix=f"{prefix}.layers.{layer_idx}",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
layer_idx=layer_idx,
|
layer_idx=layer_idx,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_idx in range(config.num_hidden_layers)
|
for layer_idx in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
@ -459,6 +467,10 @@ class Qwen3MoeModel(nn.Module):
|
|||||||
seqlen: Seqlen,
|
seqlen: Seqlen,
|
||||||
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
hpu_attention_meta: Optional[HPUPagedAttentionMetadata],
|
||||||
) -> torch.Tensor:
|
) -> torch.Tensor:
|
||||||
|
if hpu_attention_meta is not None:
|
||||||
|
hpu_attention_meta = set_block_mapping(
|
||||||
|
hpu_attention_meta, inputs_embeds.shape[0]
|
||||||
|
)
|
||||||
|
|
||||||
hidden_states = inputs_embeds
|
hidden_states = inputs_embeds
|
||||||
|
|
||||||
|
@ -134,6 +134,7 @@ class FlashRWAttention(torch.nn.Module):
|
|||||||
config,
|
config,
|
||||||
prefix: str,
|
prefix: str,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.num_heads = config.n_head
|
self.num_heads = config.n_head
|
||||||
@ -141,13 +142,8 @@ class FlashRWAttention(torch.nn.Module):
|
|||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
self.rope_theta = config.rope_theta
|
self.rope_theta = config.rope_theta
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
|
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=self.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
self.softmax_scale = self.head_size ** (-0.5)
|
self.softmax_scale = self.head_size ** (-0.5)
|
||||||
|
|
||||||
if self.num_heads % weights.process_group.size() != 0:
|
if self.num_heads % weights.process_group.size() != 0:
|
||||||
@ -243,6 +239,7 @@ class FlashRWLargeAttention(torch.nn.Module):
|
|||||||
config,
|
config,
|
||||||
prefix: str,
|
prefix: str,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
@ -255,13 +252,8 @@ class FlashRWLargeAttention(torch.nn.Module):
|
|||||||
self.head_size = hidden_size // num_heads
|
self.head_size = hidden_size // num_heads
|
||||||
self.num_groups = num_groups
|
self.num_groups = num_groups
|
||||||
self.rope_theta = config.rope_theta
|
self.rope_theta = config.rope_theta
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
|
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=self.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
self.softmax_scale = self.head_size ** (-0.5)
|
self.softmax_scale = self.head_size ** (-0.5)
|
||||||
|
|
||||||
# self.num_groups = num_heads // (num_heads_kv * 2)
|
# self.num_groups = num_heads // (num_heads_kv * 2)
|
||||||
@ -382,6 +374,7 @@ class FlashRWLayer(nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
@ -404,6 +397,7 @@ class FlashRWLayer(nn.Module):
|
|||||||
config,
|
config,
|
||||||
prefix=f"{prefix}.self_attention",
|
prefix=f"{prefix}.self_attention",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
self.post_attention_layernorm = (
|
self.post_attention_layernorm = (
|
||||||
FastLayerNorm.load(
|
FastLayerNorm.load(
|
||||||
@ -526,7 +520,7 @@ class FlashRWLayerNorm(nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
class FlashRWLargeLayer(nn.Module):
|
class FlashRWLargeLayer(nn.Module):
|
||||||
def __init__(self, layer_id, prefix: str, config, weights):
|
def __init__(self, layer_id, prefix: str, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"{prefix}.h.{layer_id}"
|
prefix = f"{prefix}.h.{layer_id}"
|
||||||
|
|
||||||
@ -536,6 +530,7 @@ class FlashRWLargeLayer(nn.Module):
|
|||||||
config,
|
config,
|
||||||
prefix=f"{prefix}.self_attention",
|
prefix=f"{prefix}.self_attention",
|
||||||
weights=weights,
|
weights=weights,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
assert config.parallel_attn, "This version doesn't support non parallel_attn"
|
assert config.parallel_attn, "This version doesn't support non parallel_attn"
|
||||||
|
|
||||||
@ -593,11 +588,17 @@ class FlashRWModel(FlashRWPreTrainedModel):
|
|||||||
self.word_embeddings = TensorParallelEmbedding(
|
self.word_embeddings = TensorParallelEmbedding(
|
||||||
prefix=f"{prefix}.word_embeddings", weights=weights
|
prefix=f"{prefix}.word_embeddings", weights=weights
|
||||||
)
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.hidden_size // config.n_head,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
|
|
||||||
if config.new_decoder_architecture:
|
if config.new_decoder_architecture:
|
||||||
self.h = nn.ModuleList(
|
self.h = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashRWLargeLayer(layer_id, prefix, config, weights)
|
FlashRWLargeLayer(layer_id, prefix, config, weights, rotary_emb)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
@ -605,7 +606,7 @@ class FlashRWModel(FlashRWPreTrainedModel):
|
|||||||
else:
|
else:
|
||||||
self.h = nn.ModuleList(
|
self.h = nn.ModuleList(
|
||||||
[
|
[
|
||||||
FlashRWLayer(layer_id, prefix, config, weights)
|
FlashRWLayer(layer_id, prefix, config, weights, rotary_emb)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -180,6 +180,7 @@ class Starcoder2Attention(torch.nn.Module):
|
|||||||
prefix: str,
|
prefix: str,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
):
|
):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.max_past = (
|
self.max_past = (
|
||||||
@ -188,13 +189,7 @@ class Starcoder2Attention(torch.nn.Module):
|
|||||||
self.num_heads = config.num_attention_heads
|
self.num_heads = config.num_attention_heads
|
||||||
self.hidden_size = config.hidden_size
|
self.hidden_size = config.hidden_size
|
||||||
self.head_size = self.hidden_size // self.num_heads
|
self.head_size = self.hidden_size // self.num_heads
|
||||||
|
self.rotary_emb = rotary_emb
|
||||||
self.rotary_emb = PositionRotaryEmbedding.static(
|
|
||||||
config=config,
|
|
||||||
dim=self.head_size,
|
|
||||||
base=config.rope_theta,
|
|
||||||
device=weights.device,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.softmax_scale = self.head_size**-0.5
|
self.softmax_scale = self.head_size**-0.5
|
||||||
|
|
||||||
@ -285,6 +280,7 @@ class Starcoder2Attention(torch.nn.Module):
|
|||||||
seqlen,
|
seqlen,
|
||||||
kv_scales=self.kv_scales,
|
kv_scales=self.kv_scales,
|
||||||
hpu_attention_meta=hpu_attention_meta,
|
hpu_attention_meta=hpu_attention_meta,
|
||||||
|
window_size_left=self.max_past,
|
||||||
)
|
)
|
||||||
|
|
||||||
return self.o_proj(
|
return self.o_proj(
|
||||||
@ -411,11 +407,15 @@ STARCODER2_MLP_CLASSES = {
|
|||||||
|
|
||||||
|
|
||||||
class Starcoder2Layer(nn.Module):
|
class Starcoder2Layer(nn.Module):
|
||||||
def __init__(self, layer_id, config, weights):
|
def __init__(self, layer_id, config, weights, rotary_emb):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
prefix = f"model.layers.{layer_id}"
|
prefix = f"model.layers.{layer_id}"
|
||||||
self.self_attn = Starcoder2Attention(
|
self.self_attn = Starcoder2Attention(
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights, index=layer_id
|
prefix=f"{prefix}.self_attn",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
|
index=layer_id,
|
||||||
|
rotary_emb=rotary_emb,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.mlp = STARCODER2_MLP_CLASSES[config.mlp_type](
|
self.mlp = STARCODER2_MLP_CLASSES[config.mlp_type](
|
||||||
@ -481,12 +481,19 @@ class Starcoder2Model(torch.nn.Module):
|
|||||||
self.embed_tokens = TensorParallelEmbedding(
|
self.embed_tokens = TensorParallelEmbedding(
|
||||||
prefix=f"{prefix}.embed_tokens", weights=weights
|
prefix=f"{prefix}.embed_tokens", weights=weights
|
||||||
)
|
)
|
||||||
|
rotary_emb = PositionRotaryEmbedding.static(
|
||||||
|
config=config,
|
||||||
|
dim=config.hidden_size // config.num_attention_heads,
|
||||||
|
base=config.rope_theta,
|
||||||
|
device=weights.device,
|
||||||
|
)
|
||||||
self.layers = nn.ModuleList(
|
self.layers = nn.ModuleList(
|
||||||
[
|
[
|
||||||
Starcoder2Layer(
|
Starcoder2Layer(
|
||||||
layer_id,
|
layer_id,
|
||||||
config,
|
config,
|
||||||
weights,
|
weights,
|
||||||
|
rotary_emb,
|
||||||
)
|
)
|
||||||
for layer_id in range(config.num_hidden_layers)
|
for layer_id in range(config.num_hidden_layers)
|
||||||
]
|
]
|
||||||
|
@ -1,326 +0,0 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
|
||||||
#
|
|
||||||
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
|
||||||
# and OPT implementations in this library. It has been modified from its
|
|
||||||
# original forms to accommodate minor architectural differences compared
|
|
||||||
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""Idefics model configuration"""
|
|
||||||
import copy
|
|
||||||
|
|
||||||
from transformers import PretrainedConfig
|
|
||||||
|
|
||||||
IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
|
||||||
"HuggingFaceM4/idefics-9b": "https://huggingface.co/HuggingFaceM4/idefics-9b/blob/main/config.json",
|
|
||||||
"HuggingFaceM4/idefics-80b": "https://huggingface.co/HuggingFaceM4/idefics-80b/blob/main/config.json",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsVisionConfig(PretrainedConfig):
|
|
||||||
r"""
|
|
||||||
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
|
|
||||||
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
||||||
with the defaults will yield a similar configuration to that of the Idefics-9B.
|
|
||||||
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
|
||||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
|
||||||
documentation from [`PretrainedConfig`] for more information.
|
|
||||||
Args:
|
|
||||||
hidden_size (`int`, *optional*, defaults to 768):
|
|
||||||
Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
|
|
||||||
image_size (`int`, *optional*, defaults to 224):
|
|
||||||
The size (resolution) of each image.
|
|
||||||
intermediate_size (`int`, *optional*, defaults to 5120):
|
|
||||||
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
|
||||||
patch_size (`int`, *optional*, defaults to 14):
|
|
||||||
The size (resolution) of each patch.
|
|
||||||
num_hidden_layers (`int`, *optional*, defaults to 32):
|
|
||||||
Number of hidden layers in the Transformer encoder.
|
|
||||||
num_attention_heads (`int`, *optional*, defaults to 16):
|
|
||||||
Number of attention heads for each attention layer in the Transformer encoder.
|
|
||||||
image_num_channels (`int`, *optional*, defaults to `3`):
|
|
||||||
Number of image channels.
|
|
||||||
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
|
||||||
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
|
||||||
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
|
|
||||||
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
|
|
||||||
The epsilon used by the layer normalization layers.
|
|
||||||
attention_dropout (`float`, *optional*, defaults to 0.0):
|
|
||||||
The dropout ratio for the attention probabilities.
|
|
||||||
initializer_range (`float`, *optional*, defaults to 0.02):
|
|
||||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
|
||||||
initializer_factor (`float`, *optional*, defaults to 1.0):
|
|
||||||
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
|
|
||||||
testing).
|
|
||||||
initializer_range (`float`, *optional*, defaults to 0.02):
|
|
||||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
|
||||||
"""
|
|
||||||
|
|
||||||
model_type = "idefics"
|
|
||||||
attribute_map = {
|
|
||||||
"hidden_size": "embed_dim",
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
embed_dim=768,
|
|
||||||
image_size=224,
|
|
||||||
intermediate_size=5120,
|
|
||||||
patch_size=14,
|
|
||||||
num_hidden_layers=32,
|
|
||||||
num_attention_heads=16,
|
|
||||||
num_channels=3,
|
|
||||||
hidden_act="gelu",
|
|
||||||
layer_norm_eps=1e-5,
|
|
||||||
attention_dropout=0.0,
|
|
||||||
initializer_range=0.02,
|
|
||||||
initializer_factor=1.0,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
self.embed_dim = embed_dim
|
|
||||||
self.image_size = image_size
|
|
||||||
self.intermediate_size = intermediate_size
|
|
||||||
self.patch_size = patch_size
|
|
||||||
self.num_hidden_layers = num_hidden_layers
|
|
||||||
self.num_attention_heads = num_attention_heads
|
|
||||||
self.num_channels = num_channels
|
|
||||||
self.layer_norm_eps = layer_norm_eps
|
|
||||||
self.attention_dropout = attention_dropout
|
|
||||||
self.initializer_range = initializer_range
|
|
||||||
self.initializer_factor = initializer_factor
|
|
||||||
self.hidden_act = hidden_act
|
|
||||||
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsPerceiverConfig(PretrainedConfig):
|
|
||||||
r"""
|
|
||||||
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
|
|
||||||
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
||||||
with the defaults will yield a similar configuration to that of the Idefics-9B.
|
|
||||||
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
|
||||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
|
||||||
documentation from [`PretrainedConfig`] for more information.
|
|
||||||
Args:
|
|
||||||
use_resampler (`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether or not to use the resampler
|
|
||||||
resampler_n_latents (`int`, *optional*, defaults to ):
|
|
||||||
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
|
|
||||||
resampler_depth (`int`, *optional*, defaults to 6):
|
|
||||||
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
|
|
||||||
resampler_n_heads (`int`, *optional*, defaults to 16):
|
|
||||||
Number of heads in each Transformer block (for multi-headed self-attention).
|
|
||||||
resampler_head_dim (`int`, *optional*, defaults to 96):
|
|
||||||
Dimensionality of each head projection in the Transformer block.
|
|
||||||
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether or not to use qk layer norms in perceiver
|
|
||||||
"""
|
|
||||||
|
|
||||||
model_type = "idefics"
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
use_resampler=False,
|
|
||||||
resampler_n_latents=64,
|
|
||||||
resampler_depth=6,
|
|
||||||
resampler_n_heads=16,
|
|
||||||
resampler_head_dim=96,
|
|
||||||
qk_layer_norms_perceiver=False,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
self.use_resampler = use_resampler
|
|
||||||
self.resampler_n_latents = resampler_n_latents
|
|
||||||
self.resampler_depth = resampler_depth
|
|
||||||
self.resampler_n_heads = resampler_n_heads
|
|
||||||
self.resampler_head_dim = resampler_head_dim
|
|
||||||
self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
|
|
||||||
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsConfig(PretrainedConfig):
|
|
||||||
r"""
|
|
||||||
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
|
|
||||||
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
|
||||||
with the defaults will yield a similar configuration to that of the Idefics-9B.
|
|
||||||
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
|
||||||
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
|
||||||
documentation from [`PretrainedConfig`] for more information.
|
|
||||||
Args:
|
|
||||||
additional_vocab_size (`int`, *optional`, defaults to 0):
|
|
||||||
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
|
|
||||||
are always trainable whereas regular vocab tokens can be frozen or not.
|
|
||||||
vocab_size (`int`, *optional*, defaults to 32000):
|
|
||||||
Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
|
|
||||||
`inputs_ids` passed when calling [`~IdeficsModel`]
|
|
||||||
hidden_size (`int`, *optional*, defaults to 4096):
|
|
||||||
Dimension of the hidden representations.
|
|
||||||
intermediate_size (`int`, *optional*, defaults to 11008):
|
|
||||||
Dimension of the MLP representations.
|
|
||||||
num_hidden_layers (`int`, *optional*, defaults to 32):
|
|
||||||
Number of hidden layers in the Transformer encoder.
|
|
||||||
num_attention_heads (`int`, *optional*, defaults to 32):
|
|
||||||
Number of attention heads for each attention layer in the Transformer encoder.
|
|
||||||
dropout (`float`, *optional*, defaults to 0.0):
|
|
||||||
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
|
||||||
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
|
||||||
The non-linear activation function (function or string) in the decoder.
|
|
||||||
initializer_range (`float`, *optional*, defaults to 0.02):
|
|
||||||
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
|
||||||
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
|
|
||||||
Initialization type for the alphas.
|
|
||||||
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
|
|
||||||
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
|
|
||||||
Attention.
|
|
||||||
alpha_type (`str`, *optional*, defaults to `"float"`):
|
|
||||||
Whether the gating alphas should be vectors or single floats.
|
|
||||||
rms_norm_eps (`float`, *optional*, defaults to 1e-6):
|
|
||||||
The epsilon used by the rms normalization layers.
|
|
||||||
use_cache (`bool`, *optional*, defaults to `True`):
|
|
||||||
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
|
||||||
relevant if `config.is_decoder=True`.
|
|
||||||
pad_token_id (`int`, *optional*, defaults to 0)
|
|
||||||
Padding token id.
|
|
||||||
bos_token_id (`int`, *optional*, defaults to 1)
|
|
||||||
Beginning of stream token id.
|
|
||||||
eos_token_id (`int`, *optional*, defaults to 2)
|
|
||||||
End of stream token id.
|
|
||||||
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
|
||||||
Whether to tie weight embeddings
|
|
||||||
cross_layer_interval (`int`, *optional*, default to 1)
|
|
||||||
Interval for cross attention (from text to image) layers.
|
|
||||||
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
|
|
||||||
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
|
|
||||||
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
|
|
||||||
Exceptions to freezing text layers when `freeze_text_layers` is `True`
|
|
||||||
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
|
|
||||||
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
|
|
||||||
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
|
|
||||||
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
|
|
||||||
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
|
|
||||||
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
|
|
||||||
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
|
|
||||||
Example:
|
|
||||||
```python
|
|
||||||
>>> from transformers import IdeficsModel, IdeficsConfig
|
|
||||||
>>> # Initializing a Idefics idefics-9b style configuration
|
|
||||||
>>> configuration = IdeficsConfig()
|
|
||||||
>>> # Initializing a model from the idefics-9b style configuration
|
|
||||||
>>> model = IdeficsModel(configuration)
|
|
||||||
>>> # Accessing the model configuration
|
|
||||||
>>> configuration = model.config
|
|
||||||
```"""
|
|
||||||
|
|
||||||
model_type = "idefics"
|
|
||||||
is_composition = True
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
vocab_size=32000,
|
|
||||||
additional_vocab_size=0,
|
|
||||||
hidden_size=4096,
|
|
||||||
intermediate_size=11008,
|
|
||||||
num_hidden_layers=32,
|
|
||||||
num_attention_heads=32,
|
|
||||||
dropout=0.0,
|
|
||||||
hidden_act="silu",
|
|
||||||
initializer_range=0.02,
|
|
||||||
alpha_initializer="zeros",
|
|
||||||
alphas_initializer_range=0.0,
|
|
||||||
alpha_type="float",
|
|
||||||
rms_norm_eps=1e-6,
|
|
||||||
use_cache=True,
|
|
||||||
pad_token_id=0,
|
|
||||||
bos_token_id=1,
|
|
||||||
eos_token_id=2,
|
|
||||||
tie_word_embeddings=False,
|
|
||||||
cross_layer_interval=1,
|
|
||||||
qk_layer_norms=False,
|
|
||||||
freeze_text_layers=True,
|
|
||||||
freeze_text_module_exceptions=[],
|
|
||||||
freeze_lm_head=False,
|
|
||||||
freeze_vision_layers=True,
|
|
||||||
freeze_vision_module_exceptions=[],
|
|
||||||
use_resampler=False,
|
|
||||||
vision_config=None,
|
|
||||||
perceiver_config=None,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
self.vocab_size = vocab_size
|
|
||||||
self.additional_vocab_size = additional_vocab_size
|
|
||||||
self.hidden_size = hidden_size
|
|
||||||
self.intermediate_size = intermediate_size
|
|
||||||
self.num_hidden_layers = num_hidden_layers
|
|
||||||
self.num_attention_heads = num_attention_heads
|
|
||||||
self.dropout = dropout
|
|
||||||
self.hidden_act = hidden_act
|
|
||||||
self.initializer_range = initializer_range
|
|
||||||
self.alpha_initializer = alpha_initializer
|
|
||||||
self.alphas_initializer_range = alphas_initializer_range
|
|
||||||
self.alpha_type = alpha_type
|
|
||||||
self.rms_norm_eps = rms_norm_eps
|
|
||||||
self.use_cache = use_cache
|
|
||||||
|
|
||||||
self.cross_layer_interval = cross_layer_interval
|
|
||||||
self.qk_layer_norms = qk_layer_norms
|
|
||||||
self.freeze_vision_layers = freeze_vision_layers
|
|
||||||
|
|
||||||
self.freeze_text_layers = freeze_text_layers
|
|
||||||
self.freeze_text_module_exceptions = freeze_text_module_exceptions
|
|
||||||
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
|
|
||||||
self.freeze_lm_head = freeze_lm_head
|
|
||||||
|
|
||||||
self.use_resampler = use_resampler
|
|
||||||
|
|
||||||
if perceiver_config is None:
|
|
||||||
self.perceiver_config = IdeficsPerceiverConfig()
|
|
||||||
elif isinstance(perceiver_config, dict):
|
|
||||||
self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
|
|
||||||
elif isinstance(perceiver_config, IdeficsPerceiverConfig):
|
|
||||||
self.perceiver_config = perceiver_config
|
|
||||||
|
|
||||||
if vision_config is None:
|
|
||||||
self.vision_config = IdeficsVisionConfig()
|
|
||||||
elif isinstance(vision_config, dict):
|
|
||||||
self.vision_config = IdeficsVisionConfig(**vision_config)
|
|
||||||
elif isinstance(vision_config, IdeficsVisionConfig):
|
|
||||||
self.vision_config = vision_config
|
|
||||||
|
|
||||||
super().__init__(
|
|
||||||
pad_token_id=pad_token_id,
|
|
||||||
bos_token_id=bos_token_id,
|
|
||||||
eos_token_id=eos_token_id,
|
|
||||||
tie_word_embeddings=tie_word_embeddings,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
# IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
|
|
||||||
# PretrainedConfig.from_dict first instantiates the class with the config dict and only then
|
|
||||||
# updates the config object with `kwargs` from from_pretrained, so during the instantiation
|
|
||||||
# of this object many attributes have default values and haven't yet been overridden.
|
|
||||||
# Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
|
|
||||||
|
|
||||||
def to_dict(self):
|
|
||||||
"""
|
|
||||||
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
|
||||||
Returns:
|
|
||||||
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
|
||||||
"""
|
|
||||||
output = copy.deepcopy(self.__dict__)
|
|
||||||
|
|
||||||
output["vision_config"] = self.vision_config.to_dict()
|
|
||||||
output["perceiver_config"] = self.perceiver_config.to_dict()
|
|
||||||
output["model_type"] = self.__class__.model_type
|
|
||||||
|
|
||||||
return output
|
|
@ -1,297 +0,0 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""Image processor class for Idefics."""
|
|
||||||
|
|
||||||
from typing import Callable, Dict, List, Optional, Union, Iterable
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
import transformers
|
|
||||||
from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
|
|
||||||
from transformers.image_transforms import (
|
|
||||||
resize,
|
|
||||||
to_channel_dimension_format,
|
|
||||||
rescale,
|
|
||||||
normalize,
|
|
||||||
)
|
|
||||||
from transformers.image_utils import (
|
|
||||||
ChannelDimension,
|
|
||||||
ImageInput,
|
|
||||||
PILImageResampling,
|
|
||||||
make_list_of_images,
|
|
||||||
to_numpy_array,
|
|
||||||
valid_images,
|
|
||||||
)
|
|
||||||
from io import BytesIO
|
|
||||||
import base64
|
|
||||||
import requests
|
|
||||||
from transformers import TensorType, is_torch_available
|
|
||||||
|
|
||||||
|
|
||||||
IDEFICS_STANDARD_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
|
||||||
IDEFICS_STANDARD_STD = [0.26862954, 0.26130258, 0.27577711]
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_rgb(image):
|
|
||||||
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
|
|
||||||
# for transparent images. The call to `alpha_composite` handles this case
|
|
||||||
if image.mode == "RGB":
|
|
||||||
return image
|
|
||||||
|
|
||||||
image_rgba = image.convert("RGBA")
|
|
||||||
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
|
|
||||||
alpha_composite = Image.alpha_composite(background, image_rgba)
|
|
||||||
alpha_composite = alpha_composite.convert("RGB")
|
|
||||||
return alpha_composite
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsImageProcessor(BaseImageProcessor):
|
|
||||||
r"""
|
|
||||||
Constructs a Idefics image processor.
|
|
||||||
Args:
|
|
||||||
image_size (`int`, *optional*, defaults to `224`):
|
|
||||||
Resize to image size
|
|
||||||
image_num_channels (`int`, *optional*, defaults to `3`):
|
|
||||||
Number of image channels.
|
|
||||||
image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
|
|
||||||
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
|
||||||
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
|
|
||||||
overridden by the `image_mean` parameter in the `preprocess` method.
|
|
||||||
image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
|
|
||||||
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
|
||||||
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
|
||||||
Can be overridden by the `image_std` parameter in the `preprocess` method.
|
|
||||||
"""
|
|
||||||
|
|
||||||
model_input_names = ["pixel_values"]
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
image_size: int = 224,
|
|
||||||
image_mean: Optional[Union[float, List[float]]] = None,
|
|
||||||
image_std: Optional[Union[float, List[float]]] = None,
|
|
||||||
image_num_channels: Optional[int] = 3,
|
|
||||||
**kwargs,
|
|
||||||
) -> None:
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
|
|
||||||
self.image_size = image_size
|
|
||||||
self.image_num_channels = image_num_channels
|
|
||||||
self.image_mean = image_mean
|
|
||||||
self.image_std = image_std
|
|
||||||
|
|
||||||
def preprocess(
|
|
||||||
self,
|
|
||||||
images: ImageInput,
|
|
||||||
image_num_channels: Optional[int] = 3,
|
|
||||||
image_size: Optional[Dict[str, int]] = None,
|
|
||||||
image_mean: Optional[Union[float, List[float]]] = None,
|
|
||||||
image_std: Optional[Union[float, List[float]]] = None,
|
|
||||||
transform: Callable = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> TensorType.PYTORCH:
|
|
||||||
"""
|
|
||||||
Preprocess a batch of images.
|
|
||||||
Args:
|
|
||||||
images (`ImageInput`):
|
|
||||||
A list of images to preprocess.
|
|
||||||
image_size (`int`, *optional*, defaults to `self.image_size`):
|
|
||||||
Resize to image size
|
|
||||||
image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
|
|
||||||
Number of image channels.
|
|
||||||
image_mean (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
|
|
||||||
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
|
|
||||||
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
|
|
||||||
be overridden by the `image_mean` parameter in the `preprocess` method.
|
|
||||||
image_std (`float` or `List[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
|
|
||||||
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
|
|
||||||
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
|
|
||||||
method. Can be overridden by the `image_std` parameter in the `preprocess` method.
|
|
||||||
transform (`Callable`, *optional*, defaults to `None`):
|
|
||||||
A custom transform function that accepts a single image can be passed for training. For example,
|
|
||||||
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
|
|
||||||
assumed - and then a preset of inference-specific transforms will be applied to the images
|
|
||||||
Returns:
|
|
||||||
a PyTorch tensor of the processed images
|
|
||||||
"""
|
|
||||||
image_size = image_size if image_size is not None else self.image_size
|
|
||||||
image_num_channels = (
|
|
||||||
image_num_channels
|
|
||||||
if image_num_channels is not None
|
|
||||||
else self.image_num_channels
|
|
||||||
)
|
|
||||||
image_mean = image_mean if image_mean is not None else self.image_mean
|
|
||||||
image_std = image_std if image_std is not None else self.image_std
|
|
||||||
size = (image_size, image_size)
|
|
||||||
|
|
||||||
if len(images) == 0:
|
|
||||||
return []
|
|
||||||
|
|
||||||
images = make_list_of_images(images)
|
|
||||||
|
|
||||||
if not valid_images(images):
|
|
||||||
raise ValueError(
|
|
||||||
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
|
||||||
"torch.Tensor, tf.Tensor or jax.ndarray."
|
|
||||||
)
|
|
||||||
|
|
||||||
# For training a user needs to pass their own set of transforms as a Callable.
|
|
||||||
# For reference this is what was used in the original IDEFICS training:
|
|
||||||
# transform = transforms.Compose([
|
|
||||||
# convert_to_rgb,
|
|
||||||
# transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
|
|
||||||
# transforms.ToTensor(),
|
|
||||||
# transforms.Normalize(mean=image_mean, std=image_std),
|
|
||||||
# ])
|
|
||||||
if transform is not None:
|
|
||||||
if not is_torch_available():
|
|
||||||
raise ImportError("To pass in `transform` torch must be installed")
|
|
||||||
import torch
|
|
||||||
|
|
||||||
images = [transform(x) for x in images]
|
|
||||||
return torch.stack(images)
|
|
||||||
|
|
||||||
# for inference we do the exact transforms that were used to train IDEFICS
|
|
||||||
images = [convert_to_rgb(x) for x in images]
|
|
||||||
# further transforms expect numpy arrays
|
|
||||||
images = [to_numpy_array(x) for x in images]
|
|
||||||
images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
|
|
||||||
images = [self.rescale(image=image, scale=1 / 255) for image in images]
|
|
||||||
images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
|
|
||||||
images = [
|
|
||||||
to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images
|
|
||||||
]
|
|
||||||
# TODO: this converts to torch tensors - switch to convert_to_tensors once it becomes available
|
|
||||||
images = BatchFeature(
|
|
||||||
data={"pixel_values": images}, tensor_type=TensorType.PYTORCH
|
|
||||||
)["pixel_values"]
|
|
||||||
|
|
||||||
return images
|
|
||||||
|
|
||||||
def fetch_images(self, image_url_or_urls: Union[str, List[str]]):
|
|
||||||
"""
|
|
||||||
Convert a single or a list of urls into the corresponding `PIL.Image` objects.
|
|
||||||
If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
|
|
||||||
returned.
|
|
||||||
"""
|
|
||||||
headers = {
|
|
||||||
"User-Agent": (
|
|
||||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0"
|
|
||||||
" Safari/537.36"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if isinstance(image_url_or_urls, list):
|
|
||||||
return [self.fetch_images(x) for x in image_url_or_urls]
|
|
||||||
elif isinstance(image_url_or_urls, str):
|
|
||||||
image = image_url_or_urls
|
|
||||||
|
|
||||||
if image.startswith("http://") or image.startswith("https://"):
|
|
||||||
response = requests.get(
|
|
||||||
image_url_or_urls, stream=True, headers=headers, timeout=(1, 5)
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
content = response.content
|
|
||||||
elif image.startswith("data:"):
|
|
||||||
# https://stackoverflow.com/questions/17090571/is-there-a-way-to-set-background-image-as-a-base64-encoded-image
|
|
||||||
# data:image/png;base64,xxx
|
|
||||||
image = image.split(",")[-1]
|
|
||||||
content = base64.b64decode(image)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unrecognized image {image}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
image = Image.open(BytesIO(content))
|
|
||||||
# image.verify()
|
|
||||||
except Exception:
|
|
||||||
raise ValueError(f"Could not load image from url {image_url_or_urls}")
|
|
||||||
return image
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def rescale(
|
|
||||||
self,
|
|
||||||
image: np.ndarray,
|
|
||||||
scale: float,
|
|
||||||
data_format: Optional[Union[str, ChannelDimension]] = None,
|
|
||||||
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> np.ndarray:
|
|
||||||
"""
|
|
||||||
Rescale an image by a scale factor. image = image * scale.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image (`np.ndarray`):
|
|
||||||
Image to rescale.
|
|
||||||
scale (`float`):
|
|
||||||
The scaling factor to rescale pixel values by.
|
|
||||||
data_format (`str` or `ChannelDimension`, *optional*):
|
|
||||||
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
|
||||||
image is used. Can be one of:
|
|
||||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
||||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
||||||
input_data_format (`ChannelDimension` or `str`, *optional*):
|
|
||||||
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
|
||||||
from the input image. Can be one of:
|
|
||||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
||||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
`np.ndarray`: The rescaled image.
|
|
||||||
"""
|
|
||||||
# return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)
|
|
||||||
# requires 4.32
|
|
||||||
return rescale(image, scale=scale, data_format=data_format, **kwargs)
|
|
||||||
|
|
||||||
def normalize(
|
|
||||||
self,
|
|
||||||
image: np.ndarray,
|
|
||||||
mean: Union[float, Iterable[float]],
|
|
||||||
std: Union[float, Iterable[float]],
|
|
||||||
data_format: Optional[Union[str, ChannelDimension]] = None,
|
|
||||||
input_data_format: Optional[Union[str, ChannelDimension]] = None,
|
|
||||||
**kwargs,
|
|
||||||
) -> np.ndarray:
|
|
||||||
"""
|
|
||||||
Normalize an image. image = (image - image_mean) / image_std.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image (`np.ndarray`):
|
|
||||||
Image to normalize.
|
|
||||||
mean (`float` or `Iterable[float]`):
|
|
||||||
Image mean to use for normalization.
|
|
||||||
std (`float` or `Iterable[float]`):
|
|
||||||
Image standard deviation to use for normalization.
|
|
||||||
data_format (`str` or `ChannelDimension`, *optional*):
|
|
||||||
The channel dimension format for the output image. If unset, the channel dimension format of the input
|
|
||||||
image is used. Can be one of:
|
|
||||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
||||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
||||||
input_data_format (`ChannelDimension` or `str`, *optional*):
|
|
||||||
The channel dimension format for the input image. If unset, the channel dimension format is inferred
|
|
||||||
from the input image. Can be one of:
|
|
||||||
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
|
|
||||||
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
`np.ndarray`: The normalized image.
|
|
||||||
"""
|
|
||||||
# TODO 4.32
|
|
||||||
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
transformers.IdeficsImageProcessor = IdeficsImageProcessor
|
|
File diff suppressed because it is too large
Load Diff
@ -1,276 +0,0 @@
|
|||||||
# This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License.
|
|
||||||
#
|
|
||||||
# MIT License
|
|
||||||
#
|
|
||||||
# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially
|
|
||||||
time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note
|
|
||||||
that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to
|
|
||||||
prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that
|
|
||||||
to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore.
|
|
||||||
|
|
||||||
References:
|
|
||||||
- DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model
|
|
||||||
- Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch
|
|
||||||
|
|
||||||
"""
|
|
||||||
from typing import Optional, Tuple
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.nn as nn
|
|
||||||
|
|
||||||
from text_generation_server.layers import (
|
|
||||||
TensorParallelColumnLinear,
|
|
||||||
TensorParallelRowLinear,
|
|
||||||
)
|
|
||||||
|
|
||||||
EPS = 1e-5
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsPerceiverResampler(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
prefix,
|
|
||||||
config,
|
|
||||||
embed_dim: int,
|
|
||||||
depth: int,
|
|
||||||
n_heads: int,
|
|
||||||
head_dim: int,
|
|
||||||
n_latents: int,
|
|
||||||
weights,
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
|
|
||||||
MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
|
|
||||||
returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
|
|
||||||
to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
|
|
||||||
Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config (`IdeficsConfig`): config object
|
|
||||||
embed_dim (`int`): The size of each embedding vector
|
|
||||||
depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
|
|
||||||
n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
|
|
||||||
head_dim (`int`): Dimensionality of each head projection in the Transformer block.
|
|
||||||
n_latents (`int`):
|
|
||||||
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
|
|
||||||
|
|
||||||
"""
|
|
||||||
super().__init__()
|
|
||||||
self.embed_dim, self.n_heads, self.head_dim, self.n_latents = (
|
|
||||||
embed_dim,
|
|
||||||
n_heads,
|
|
||||||
head_dim,
|
|
||||||
n_latents,
|
|
||||||
)
|
|
||||||
self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver
|
|
||||||
|
|
||||||
# Create Latents for Perceiver
|
|
||||||
self.latents = nn.Parameter(weights.get_tensor(f"{prefix}.latents"))
|
|
||||||
|
|
||||||
self.intermediate_dim = (
|
|
||||||
self.embed_dim * 4
|
|
||||||
if not hasattr(config.vision_config, "embed_dim")
|
|
||||||
else config.vision_config.embed_dim * 4
|
|
||||||
)
|
|
||||||
# Create Transformer Blocks
|
|
||||||
self.blocks = nn.ModuleList(
|
|
||||||
[
|
|
||||||
nn.ModuleList(
|
|
||||||
[
|
|
||||||
IdeficsPerceiverAttention(
|
|
||||||
prefix=f"{prefix}.blocks.{layer_id}.0",
|
|
||||||
config=config,
|
|
||||||
embed_dim=self.embed_dim,
|
|
||||||
n_heads=self.n_heads,
|
|
||||||
head_dim=self.head_dim,
|
|
||||||
qk_layer_norms=self.qk_layer_norms,
|
|
||||||
weights=weights,
|
|
||||||
),
|
|
||||||
IdeficsMLP(
|
|
||||||
prefix=f"{prefix}.blocks.{layer_id}.1",
|
|
||||||
intermediate_size=self.intermediate_dim,
|
|
||||||
config=config,
|
|
||||||
weights=weights,
|
|
||||||
),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
for layer_id in range(depth)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
self.layer_norm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.layer_norm", weights=weights, eps=EPS
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, context: torch.Tensor) -> torch.Tensor:
|
|
||||||
"""Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
|
|
||||||
# einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0])
|
|
||||||
latents = self.latents.repeat(context.shape[0], 1, 1)
|
|
||||||
|
|
||||||
# Feed through Perceiver Attention blocks...
|
|
||||||
for attn, ff in self.blocks:
|
|
||||||
latents = attn(context, latents) + latents
|
|
||||||
latents = ff(latents) + latents
|
|
||||||
|
|
||||||
return self.layer_norm(latents)
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsPerceiverAttention(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
prefix,
|
|
||||||
config,
|
|
||||||
embed_dim: int,
|
|
||||||
n_heads: int,
|
|
||||||
head_dim: int,
|
|
||||||
qk_layer_norms: bool,
|
|
||||||
weights,
|
|
||||||
) -> None:
|
|
||||||
"""Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
|
|
||||||
super().__init__()
|
|
||||||
self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim
|
|
||||||
self.qk_layer_norms = qk_layer_norms
|
|
||||||
# Normalization & Scaling
|
|
||||||
self.context_layer_norm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.context_layer_norm", weights=weights, eps=EPS
|
|
||||||
)
|
|
||||||
self.latents_layer_norm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.latents_layer_norm", weights=weights, eps=EPS
|
|
||||||
)
|
|
||||||
if self.qk_layer_norms:
|
|
||||||
self.q_layer_norm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.q_layer_norm", weights=weights, eps=EPS
|
|
||||||
)
|
|
||||||
self.k_layer_norm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.k_layer_norm", weights=weights, eps=EPS
|
|
||||||
)
|
|
||||||
|
|
||||||
self.qk_scale = self.head_dim**-0.5
|
|
||||||
|
|
||||||
if n_heads % weights.process_group.size() != 0:
|
|
||||||
raise ValueError(
|
|
||||||
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {n_heads} "
|
|
||||||
f"and `num_shards`: {weights.process_group.size()}"
|
|
||||||
)
|
|
||||||
self.n_heads //= weights.process_group.size()
|
|
||||||
|
|
||||||
# Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers).
|
|
||||||
self.q_proj = TensorParallelColumnLinear.load(
|
|
||||||
config=config, prefix=f"{prefix}.q_proj", weights=weights, bias=False
|
|
||||||
)
|
|
||||||
self.k_proj = TensorParallelColumnLinear.load(
|
|
||||||
config=config, prefix=f"{prefix}.k_proj", weights=weights, bias=False
|
|
||||||
)
|
|
||||||
self.v_proj = TensorParallelColumnLinear.load(
|
|
||||||
config=config, prefix=f"{prefix}.v_proj", weights=weights, bias=False
|
|
||||||
)
|
|
||||||
|
|
||||||
self.output_proj = TensorParallelRowLinear.load(
|
|
||||||
config=config, prefix=f"{prefix}.output_proj", weights=weights, bias=False
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
|
|
||||||
"""
|
|
||||||
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
|
|
||||||
|
|
||||||
Args:
|
|
||||||
context (`torch.Tensor`):
|
|
||||||
Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
|
|
||||||
latents (`torch.Tensor`):
|
|
||||||
Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
`torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
|
|
||||||
from context.
|
|
||||||
"""
|
|
||||||
context = self.context_layer_norm(context)
|
|
||||||
latents = self.latents_layer_norm(latents)
|
|
||||||
batch_size, seq_length, embed_dim = context.shape[:3]
|
|
||||||
|
|
||||||
# Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
|
|
||||||
# Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
|
|
||||||
q = self.q_proj(latents)
|
|
||||||
k = self.k_proj(torch.cat([context, latents], dim=-2))
|
|
||||||
v = self.v_proj(torch.cat([context, latents], dim=-2))
|
|
||||||
|
|
||||||
# Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call)
|
|
||||||
# =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)]
|
|
||||||
# einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads)
|
|
||||||
q, k, v = [
|
|
||||||
x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(
|
|
||||||
1, 2
|
|
||||||
)
|
|
||||||
for x in (q, k, v)
|
|
||||||
]
|
|
||||||
|
|
||||||
if self.qk_layer_norms:
|
|
||||||
q = self.q_layer_norm(q)
|
|
||||||
k = self.k_layer_norm(k)
|
|
||||||
|
|
||||||
scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k)
|
|
||||||
stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach())
|
|
||||||
attn = stabilized_scores.softmax(dim=-1)
|
|
||||||
|
|
||||||
# Attend & project back to output...
|
|
||||||
resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v)
|
|
||||||
# einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads)
|
|
||||||
return self.output_proj(resampled.transpose(1, 2).flatten(-2))
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsMLP(nn.Module):
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
prefix,
|
|
||||||
intermediate_size,
|
|
||||||
config,
|
|
||||||
weights,
|
|
||||||
):
|
|
||||||
"""Simple MLP block with intermediate_size and embedding size"""
|
|
||||||
super().__init__()
|
|
||||||
self.embed_dim = config.vision_config.embed_dim
|
|
||||||
self.ln = nn.LayerNorm.load(prefix=f"{prefix}.ln", weights=weights, eps=EPS)
|
|
||||||
self.fc = TensorParallelColumnLinear.load(
|
|
||||||
config=config,
|
|
||||||
prefix=f"{prefix}.fc",
|
|
||||||
weights=weights,
|
|
||||||
bias=False,
|
|
||||||
)
|
|
||||||
self.act = nn.ReLU()
|
|
||||||
self.c_proj = TensorParallelRowLinear.load(
|
|
||||||
config=config,
|
|
||||||
prefix=f"{prefix}.c_proj",
|
|
||||||
weights=weights,
|
|
||||||
bias=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self, hidden_states: Optional[Tuple[torch.FloatTensor]]
|
|
||||||
) -> torch.FloatTensor:
|
|
||||||
hidden_states = self.ln(hidden_states)
|
|
||||||
hidden_states = self.fc(hidden_states)
|
|
||||||
hidden_states = self.act(hidden_states)
|
|
||||||
hidden_states = self.c_proj(hidden_states)
|
|
||||||
|
|
||||||
return hidden_states
|
|
@ -1,443 +0,0 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Copyright 2022 The HuggingFace Inc. team.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""
|
|
||||||
Processor class for IDEFICS.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from typing import Callable, List, Optional, Union
|
|
||||||
from urllib.parse import urlparse
|
|
||||||
|
|
||||||
from transformers.feature_extraction_utils import BatchFeature
|
|
||||||
from transformers.processing_utils import ProcessorMixin
|
|
||||||
from transformers.tokenization_utils_base import (
|
|
||||||
BatchEncoding,
|
|
||||||
PaddingStrategy,
|
|
||||||
TextInput,
|
|
||||||
TruncationStrategy,
|
|
||||||
)
|
|
||||||
from transformers.utils import TensorType, is_torch_available
|
|
||||||
|
|
||||||
|
|
||||||
if is_torch_available():
|
|
||||||
import torch
|
|
||||||
|
|
||||||
|
|
||||||
IMAGE_TOKEN = "<image>"
|
|
||||||
|
|
||||||
|
|
||||||
# copied from m4.training.packing
|
|
||||||
def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1):
|
|
||||||
# This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]]
|
|
||||||
|
|
||||||
# If any of images index are more than num_classes, set them to -1.
|
|
||||||
# Words after the max number of images allowed have been seen don't attend on anything
|
|
||||||
if num_classes != -1:
|
|
||||||
incremental_mask[incremental_mask >= num_classes] = -1
|
|
||||||
|
|
||||||
negatives = incremental_mask == -1
|
|
||||||
incremental_mask[negatives] = 0
|
|
||||||
attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
|
|
||||||
attn_mask[negatives, :] = 0
|
|
||||||
return attn_mask
|
|
||||||
|
|
||||||
|
|
||||||
# copied from m4.training.packing
|
|
||||||
def image_attention_mask_for_packed_input_ids(input_ids, tokenizer):
|
|
||||||
image_attention_mask = torch.full_like(input_ids, fill_value=-1)
|
|
||||||
next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
|
|
||||||
image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
|
||||||
eod_token_id = tokenizer.eos_token_id
|
|
||||||
for batch_idx in range(input_ids.size(0)):
|
|
||||||
count = -1
|
|
||||||
seen_eod = False
|
|
||||||
for idx, token_id in enumerate(input_ids[batch_idx]):
|
|
||||||
if token_id == image_token_id:
|
|
||||||
count += 1
|
|
||||||
image_attention_mask[batch_idx][idx] = count
|
|
||||||
seen_eod = False
|
|
||||||
else:
|
|
||||||
image_attention_mask[batch_idx][idx] = count
|
|
||||||
|
|
||||||
if seen_eod:
|
|
||||||
image_attention_mask[batch_idx][idx] = -1
|
|
||||||
|
|
||||||
if token_id == eod_token_id:
|
|
||||||
seen_eod = True
|
|
||||||
|
|
||||||
for batch_idx in range(input_ids.size(0)):
|
|
||||||
count = -1
|
|
||||||
seen_eod = False
|
|
||||||
for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
|
|
||||||
token_id = input_ids[batch_idx][idx]
|
|
||||||
if token_id == image_token_id:
|
|
||||||
count += 1
|
|
||||||
next_image_attention_mask[batch_idx][idx] = count
|
|
||||||
seen_eod = False
|
|
||||||
else:
|
|
||||||
next_image_attention_mask[batch_idx][idx] = count
|
|
||||||
|
|
||||||
if token_id == eod_token_id:
|
|
||||||
seen_eod = True
|
|
||||||
|
|
||||||
if seen_eod:
|
|
||||||
next_image_attention_mask[batch_idx][idx] = -1
|
|
||||||
|
|
||||||
non_negative_indices = next_image_attention_mask[batch_idx] != -1
|
|
||||||
next_image_attention_mask[batch_idx][non_negative_indices] -= count
|
|
||||||
next_image_attention_mask[batch_idx][non_negative_indices] *= -1
|
|
||||||
|
|
||||||
return image_attention_mask, next_image_attention_mask
|
|
||||||
|
|
||||||
|
|
||||||
def is_url(string):
|
|
||||||
"""Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
|
|
||||||
invalidated the url"""
|
|
||||||
if " " in string:
|
|
||||||
return False
|
|
||||||
result = urlparse(string)
|
|
||||||
return all([result.scheme, result.netloc])
|
|
||||||
|
|
||||||
|
|
||||||
def is_image(string):
|
|
||||||
"""Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
|
|
||||||
invalidated the url"""
|
|
||||||
return is_url(string) or string.startswith("data:")
|
|
||||||
|
|
||||||
|
|
||||||
class IdeficsProcessor(ProcessorMixin):
|
|
||||||
r"""
|
|
||||||
Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
|
|
||||||
|
|
||||||
[`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
|
|
||||||
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image_processor (`IdeficsImageProcessor`):
|
|
||||||
An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
|
|
||||||
tokenizer (`LlamaTokenizerFast`):
|
|
||||||
An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
|
|
||||||
image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image)
|
|
||||||
"""
|
|
||||||
|
|
||||||
attributes = ["image_processor", "tokenizer"]
|
|
||||||
image_processor_class = "IdeficsImageProcessor"
|
|
||||||
tokenizer_class = "LlamaTokenizerFast"
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
image_processor,
|
|
||||||
tokenizer=None,
|
|
||||||
image_size=224,
|
|
||||||
add_end_of_utterance_token=None,
|
|
||||||
**kwargs,
|
|
||||||
):
|
|
||||||
if image_processor is None:
|
|
||||||
raise ValueError("You need to specify an `image_processor`.")
|
|
||||||
if tokenizer is None:
|
|
||||||
raise ValueError("You need to specify a `tokenizer`.")
|
|
||||||
|
|
||||||
super().__init__(image_processor, tokenizer)
|
|
||||||
self.current_processor = self.image_processor
|
|
||||||
self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
|
|
||||||
|
|
||||||
self.default_image_dims = (
|
|
||||||
self.image_processor.image_num_channels,
|
|
||||||
self.image_processor.image_size,
|
|
||||||
self.image_processor.image_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.tokenizer_was_trained_with_end_of_utterance_token = (
|
|
||||||
True
|
|
||||||
if "<end_of_utterance>"
|
|
||||||
in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
|
|
||||||
else False
|
|
||||||
)
|
|
||||||
|
|
||||||
def __call__(
|
|
||||||
self,
|
|
||||||
prompts: Union[List[TextInput], List[List[TextInput]]],
|
|
||||||
padding: Union[bool, str, PaddingStrategy] = False,
|
|
||||||
truncation: Union[bool, str, TruncationStrategy] = None,
|
|
||||||
max_length: Optional[int] = None,
|
|
||||||
transform: Callable = None,
|
|
||||||
add_eos_token=False,
|
|
||||||
add_end_of_utterance_token=None,
|
|
||||||
debug=False,
|
|
||||||
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
|
|
||||||
) -> BatchEncoding:
|
|
||||||
"""This method takes batched or non-batched prompts made of text and images and converts them into prompts that
|
|
||||||
the model was trained on and prepares the image pixel values for the model to process.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
prompts (`Union[List[TextInput], [List[List[TextInput]]]]`):
|
|
||||||
either a single prompt or a batched list of prompts - see the detailed description immediately after
|
|
||||||
the end of the arguments doc section.
|
|
||||||
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
|
|
||||||
Select a strategy to pad the returned sequences (according to the model's padding side and padding
|
|
||||||
index) among:
|
|
||||||
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
|
||||||
sequence if provided).
|
|
||||||
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
|
|
||||||
acceptable input length for the model if that argument is not provided.
|
|
||||||
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
|
|
||||||
lengths).
|
|
||||||
max_length (`int`, *optional*):
|
|
||||||
Maximum length of the returned list and optionally padding length (see above).
|
|
||||||
truncation (`bool`, *optional*):
|
|
||||||
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
|
|
||||||
transform (`Callable`, *optional*):
|
|
||||||
A custom transform function that accepts a single image can be passed for training. For example,
|
|
||||||
`torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific
|
|
||||||
set of transforms will be applied to the images
|
|
||||||
add_eos_token (`bool`, *optional*, defaults to `False`):
|
|
||||||
Adds `eos_token` at the end of the final prompt if True`
|
|
||||||
add_end_of_utterance_token (`bool`, *optional*)
|
|
||||||
Whether to automatically add `<end_of_utterance>` after each prompt's text input (unless followed by an
|
|
||||||
image). If `None` the tokenizer will be checked instead and if this token is found in
|
|
||||||
`additional_special_tokens` then the value will be `True`.
|
|
||||||
debug (`bool`, *optional*, defaults to `False`):
|
|
||||||
`True` value will help debug prompt generation by dumping useful information
|
|
||||||
return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
|
|
||||||
The type of tensors to return. Can be one of:
|
|
||||||
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
|
|
||||||
directly passed to `model.generate`
|
|
||||||
|
|
||||||
Detailed explanation:
|
|
||||||
|
|
||||||
Each entry in `prompts` is either a text to be passed as is or an image that will be processed.
|
|
||||||
|
|
||||||
An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
|
|
||||||
|
|
||||||
When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
|
|
||||||
entry into the prompt.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```python
|
|
||||||
checkpoint = "HuggingFaceM4/idefics-9b"
|
|
||||||
processor = AutoProcessor.from_pretrained(checkpoint)
|
|
||||||
url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
|
|
||||||
img = processor.image_processor.fetch_images([url])[0]
|
|
||||||
|
|
||||||
prompts = [
|
|
||||||
"User:",
|
|
||||||
img,
|
|
||||||
"Describe this image.\nAssistant: An image of two kittens in grass.\n",
|
|
||||||
"User:",
|
|
||||||
"https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
|
|
||||||
"Describe this image.\nAssistant:",
|
|
||||||
]
|
|
||||||
|
|
||||||
inputs = processor(prompts, return_tensors="pt")
|
|
||||||
generated_ids = model.generate(**inputs, max_length=100)
|
|
||||||
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example the `prompts` will be converted into:
|
|
||||||
|
|
||||||
```
|
|
||||||
<s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
|
|
||||||
Assistant: An image of two kittens in grass.
|
|
||||||
User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
|
|
||||||
Assistant:'
|
|
||||||
```
|
|
||||||
|
|
||||||
and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
|
|
||||||
`pixel_values` dict entry of the return value.
|
|
||||||
|
|
||||||
This example also examplifies that images can be passed as objects or as text urls. It can be seen that the
|
|
||||||
first image is passed as object and the second one as a url.
|
|
||||||
|
|
||||||
To do training do:
|
|
||||||
|
|
||||||
```python
|
|
||||||
image_transform = transforms.Compose(
|
|
||||||
[
|
|
||||||
transforms.RandomResizedCrop(
|
|
||||||
(w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
|
|
||||||
),
|
|
||||||
transforms.ToTensor(),
|
|
||||||
transforms.Normalize(mean=self.image_mean, std=self.image_std),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
inputs = processor(prompts, transform=image_transform, return_tensors="pt")
|
|
||||||
```
|
|
||||||
|
|
||||||
In order to help debug prompt generation enable `debug=True` which will show you what's happening.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
# if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it
|
|
||||||
if add_end_of_utterance_token is None:
|
|
||||||
add_end_of_utterance_token = (
|
|
||||||
self.tokenizer_was_trained_with_end_of_utterance_token
|
|
||||||
)
|
|
||||||
|
|
||||||
# turn non-batched prompts into batched
|
|
||||||
if not any(isinstance(i, list) for i in prompts):
|
|
||||||
prompts = [prompts]
|
|
||||||
|
|
||||||
fake_token = "<fake_token_around_image>"
|
|
||||||
image_token = "<image>"
|
|
||||||
end_of_utterance_token = "<end_of_utterance>"
|
|
||||||
|
|
||||||
def image_tokens(last_was_image):
|
|
||||||
if last_was_image:
|
|
||||||
return image_token + fake_token
|
|
||||||
else:
|
|
||||||
return fake_token + image_token + fake_token
|
|
||||||
|
|
||||||
all_texts = []
|
|
||||||
all_images = []
|
|
||||||
for sample in prompts:
|
|
||||||
# the model was trained on samples starting with <s>
|
|
||||||
full_text = f"{self.tokenizer.bos_token}"
|
|
||||||
|
|
||||||
# an image can either be an image object in the item or the url, everything else is a verbatim prompt text
|
|
||||||
image_objects = []
|
|
||||||
last_was_image = False
|
|
||||||
last_was_text = False
|
|
||||||
for i, item in enumerate(sample):
|
|
||||||
if i > 0:
|
|
||||||
last_was_text = True if not last_was_image else False
|
|
||||||
|
|
||||||
if isinstance(item, str):
|
|
||||||
item = item.strip(" ")
|
|
||||||
if is_image(item):
|
|
||||||
image = self.image_processor.fetch_images(item)
|
|
||||||
full_text += image_tokens(last_was_image)
|
|
||||||
image_objects.append(image)
|
|
||||||
last_was_image = True
|
|
||||||
else:
|
|
||||||
# we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
|
|
||||||
if add_end_of_utterance_token and last_was_text:
|
|
||||||
full_text += end_of_utterance_token
|
|
||||||
full_text += item
|
|
||||||
last_was_image = False
|
|
||||||
else:
|
|
||||||
# must be an image obj
|
|
||||||
full_text += image_tokens(last_was_image)
|
|
||||||
image_objects.append(item)
|
|
||||||
last_was_image = True
|
|
||||||
|
|
||||||
if add_eos_token:
|
|
||||||
full_text += self.tokenizer.eos_token
|
|
||||||
|
|
||||||
if debug is True:
|
|
||||||
print(f"{full_text=}")
|
|
||||||
|
|
||||||
image_objects = self.image_processor(image_objects, transform=transform)
|
|
||||||
|
|
||||||
text_encoding = self.tokenizer(
|
|
||||||
text=full_text,
|
|
||||||
add_special_tokens=False,
|
|
||||||
padding=padding,
|
|
||||||
truncation=truncation,
|
|
||||||
max_length=max_length,
|
|
||||||
)
|
|
||||||
|
|
||||||
all_texts.append(text_encoding["input_ids"])
|
|
||||||
all_images.append(image_objects)
|
|
||||||
|
|
||||||
max_seq_len = max(len(x) for x in all_texts)
|
|
||||||
|
|
||||||
# max_num_images has to be at least 1 even when there are no images
|
|
||||||
max_num_images = max(len(x) for x in all_images)
|
|
||||||
max_num_images = max(1, max_num_images)
|
|
||||||
|
|
||||||
at_least_one_image = sum(len(x) for x in all_images) > 0
|
|
||||||
output_input_ids = []
|
|
||||||
output_images = []
|
|
||||||
output_attention_masks = []
|
|
||||||
for text, images in zip(all_texts, all_images):
|
|
||||||
padded_input_ids = [self.tokenizer.pad_token_id] * max_seq_len
|
|
||||||
unpadded_seq_len = len(text)
|
|
||||||
start = max_seq_len - unpadded_seq_len
|
|
||||||
padded_input_ids[start:] = text[:max_seq_len]
|
|
||||||
|
|
||||||
attention_mask = torch.zeros((max_seq_len,), dtype=torch.long)
|
|
||||||
attention_mask[start:] = 1
|
|
||||||
|
|
||||||
image_count = padded_input_ids.count(self.image_token_id)
|
|
||||||
local_max_num_images = min(image_count, max_num_images)
|
|
||||||
|
|
||||||
current_images = images[:local_max_num_images]
|
|
||||||
|
|
||||||
if len(current_images) > 0:
|
|
||||||
padded_image_tensor = torch.zeros(
|
|
||||||
max_num_images, *current_images.size()[1:]
|
|
||||||
)
|
|
||||||
padded_image_tensor[: current_images.size(0)] = current_images
|
|
||||||
else:
|
|
||||||
padded_image_tensor = torch.zeros(
|
|
||||||
max_num_images, *self.default_image_dims
|
|
||||||
)
|
|
||||||
|
|
||||||
output_images.append(padded_image_tensor)
|
|
||||||
output_input_ids.append(torch.tensor(padded_input_ids))
|
|
||||||
|
|
||||||
output_attention_masks.append(attention_mask)
|
|
||||||
|
|
||||||
output_input_ids = torch.stack(output_input_ids)
|
|
||||||
output_images = torch.stack(output_images)
|
|
||||||
output_attention_masks = torch.stack(output_attention_masks)
|
|
||||||
|
|
||||||
if at_least_one_image:
|
|
||||||
image_attention_mask, _ = image_attention_mask_for_packed_input_ids(
|
|
||||||
output_input_ids, self.tokenizer
|
|
||||||
)
|
|
||||||
image_attention_mask = incremental_to_binary_attention_mask(
|
|
||||||
image_attention_mask, num_classes=max_num_images
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# in full language mode we set the image mask to all-0s
|
|
||||||
image_attention_mask = torch.zeros(
|
|
||||||
output_input_ids.shape[0],
|
|
||||||
output_input_ids.shape[1],
|
|
||||||
1,
|
|
||||||
dtype=torch.bool,
|
|
||||||
)
|
|
||||||
|
|
||||||
return BatchFeature(
|
|
||||||
data={
|
|
||||||
"input_ids": output_input_ids,
|
|
||||||
"attention_mask": output_attention_masks,
|
|
||||||
"pixel_values": output_images,
|
|
||||||
"image_attention_mask": image_attention_mask,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
def batch_decode(self, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
|
|
||||||
refer to the docstring of this method for more information.
|
|
||||||
"""
|
|
||||||
return self.tokenizer.batch_decode(*args, **kwargs)
|
|
||||||
|
|
||||||
def decode(self, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
|
|
||||||
the docstring of this method for more information.
|
|
||||||
"""
|
|
||||||
return self.tokenizer.decode(*args, **kwargs)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def model_input_names(self):
|
|
||||||
tokenizer_input_names = self.tokenizer.model_input_names
|
|
||||||
image_processor_input_names = self.image_processor.model_input_names
|
|
||||||
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
|
@ -1,529 +0,0 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
"""PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object"""
|
|
||||||
|
|
||||||
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from typing import Optional, Tuple, Union
|
|
||||||
|
|
||||||
import torch
|
|
||||||
import torch.utils.checkpoint
|
|
||||||
from torch import nn
|
|
||||||
|
|
||||||
from transformers.activations import ACT2FN
|
|
||||||
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
|
|
||||||
from transformers.utils import (
|
|
||||||
ModelOutput,
|
|
||||||
logging,
|
|
||||||
)
|
|
||||||
from text_generation_server.layers import (
|
|
||||||
TensorParallelColumnLinear,
|
|
||||||
TensorParallelRowLinear,
|
|
||||||
TensorParallelEmbedding,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = logging.get_logger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class IdeficsVisionModelOutput(ModelOutput):
|
|
||||||
"""
|
|
||||||
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
|
||||||
The image embeddings obtained by applying the projection layer to the pooler_output.
|
|
||||||
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
||||||
Sequence of hidden-states at the output of the last layer of the model.
|
|
||||||
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
||||||
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
||||||
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
||||||
|
|
||||||
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
||||||
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
||||||
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
||||||
sequence_length)`.
|
|
||||||
|
|
||||||
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
||||||
heads.
|
|
||||||
"""
|
|
||||||
|
|
||||||
image_embeds: Optional[torch.FloatTensor] = None
|
|
||||||
last_hidden_state: torch.FloatTensor = None
|
|
||||||
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
|
||||||
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
||||||
|
|
||||||
|
|
||||||
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Idefics
|
|
||||||
class IdeficsVisionEmbeddings(nn.Module):
|
|
||||||
def __init__(self, prefix, config, weights):
|
|
||||||
super().__init__()
|
|
||||||
self.config = config
|
|
||||||
self.embed_dim = config.hidden_size
|
|
||||||
self.image_size = config.image_size
|
|
||||||
self.patch_size = config.patch_size
|
|
||||||
|
|
||||||
self.class_embedding = nn.Parameter(
|
|
||||||
weights.get_tensor(f"{prefix}.class_embedding")
|
|
||||||
)
|
|
||||||
|
|
||||||
self.patch_embedding = nn.Conv2d.load_no_bias(
|
|
||||||
prefix=f"{prefix}.patch_embedding",
|
|
||||||
weights=weights,
|
|
||||||
in_channels=config.num_channels,
|
|
||||||
out_channels=self.embed_dim,
|
|
||||||
kernel_size=self.patch_size,
|
|
||||||
stride=self.patch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
self.num_patches = (self.image_size // self.patch_size) ** 2
|
|
||||||
self.num_positions = self.num_patches + 1
|
|
||||||
self.position_embedding = TensorParallelEmbedding(
|
|
||||||
prefix="model.vision_model.embeddings.position_embedding", weights=weights
|
|
||||||
)
|
|
||||||
self.position_ids = (
|
|
||||||
torch.arange(self.num_positions).expand((1, -1)).to(device=weights.device)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
|
||||||
batch_size = pixel_values.shape[0]
|
|
||||||
target_dtype = self.patch_embedding.weight.dtype
|
|
||||||
patch_embeds = self.patch_embedding(
|
|
||||||
pixel_values.to(dtype=target_dtype)
|
|
||||||
) # shape = [*, width, grid, grid]
|
|
||||||
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
|
||||||
|
|
||||||
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
|
|
||||||
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
|
||||||
embeddings = embeddings + self.position_embedding(self.position_ids)
|
|
||||||
return embeddings
|
|
||||||
|
|
||||||
|
|
||||||
# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision
|
|
||||||
class IdeficsVisionAttention(nn.Module):
|
|
||||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
||||||
|
|
||||||
def __init__(self, prefix, config, weights):
|
|
||||||
super().__init__()
|
|
||||||
self.config = config
|
|
||||||
self.embed_dim = config.hidden_size
|
|
||||||
self.num_heads = config.num_attention_heads
|
|
||||||
self.head_dim = self.embed_dim // self.num_heads
|
|
||||||
if self.head_dim * self.num_heads != self.embed_dim:
|
|
||||||
raise ValueError(
|
|
||||||
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
|
||||||
f" {self.num_heads})."
|
|
||||||
)
|
|
||||||
self.scale = self.head_dim**-0.5
|
|
||||||
self.dropout = config.attention_dropout
|
|
||||||
|
|
||||||
if self.num_heads % weights.process_group.size() != 0:
|
|
||||||
raise ValueError(
|
|
||||||
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
|
|
||||||
f"and `num_shards`: {weights.process_group.size()}"
|
|
||||||
)
|
|
||||||
self.num_heads = self.num_heads // weights.process_group.size()
|
|
||||||
self.embed_dim = self.embed_dim // weights.process_group.size()
|
|
||||||
|
|
||||||
self.k_proj = TensorParallelColumnLinear.load(
|
|
||||||
config, prefix=f"{prefix}.k_proj", weights=weights, bias=True
|
|
||||||
)
|
|
||||||
self.v_proj = TensorParallelColumnLinear.load(
|
|
||||||
config, prefix=f"{prefix}.v_proj", weights=weights, bias=True
|
|
||||||
)
|
|
||||||
self.q_proj = TensorParallelColumnLinear.load(
|
|
||||||
config, prefix=f"{prefix}.q_proj", weights=weights, bias=True
|
|
||||||
)
|
|
||||||
self.out_proj = TensorParallelRowLinear.load(
|
|
||||||
config, prefix=f"{prefix}.out_proj", weights=weights, bias=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
|
||||||
return (
|
|
||||||
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
|
|
||||||
.transpose(1, 2)
|
|
||||||
.contiguous()
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self,
|
|
||||||
hidden_states: torch.Tensor,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
causal_attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
output_attentions: Optional[bool] = False,
|
|
||||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
|
||||||
"""Input shape: Batch x Time x Channel"""
|
|
||||||
|
|
||||||
bsz, tgt_len, _ = hidden_states.size()
|
|
||||||
|
|
||||||
# get query proj
|
|
||||||
query_states = self.q_proj(hidden_states) * self.scale
|
|
||||||
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
|
|
||||||
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
|
|
||||||
|
|
||||||
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
|
|
||||||
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
|
|
||||||
key_states = key_states.view(*proj_shape)
|
|
||||||
value_states = value_states.view(*proj_shape)
|
|
||||||
|
|
||||||
src_len = key_states.size(1)
|
|
||||||
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
|
|
||||||
|
|
||||||
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
|
|
||||||
raise ValueError(
|
|
||||||
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
|
|
||||||
f" {attn_weights.size()}"
|
|
||||||
)
|
|
||||||
|
|
||||||
# apply the causal_attention_mask first
|
|
||||||
if causal_attention_mask is not None:
|
|
||||||
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
|
||||||
raise ValueError(
|
|
||||||
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
|
|
||||||
f" {causal_attention_mask.size()}"
|
|
||||||
)
|
|
||||||
attn_weights = (
|
|
||||||
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
|
||||||
+ causal_attention_mask
|
|
||||||
)
|
|
||||||
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
|
||||||
|
|
||||||
if attention_mask is not None:
|
|
||||||
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
|
|
||||||
raise ValueError(
|
|
||||||
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
|
|
||||||
)
|
|
||||||
attn_weights = (
|
|
||||||
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
|
|
||||||
+ attention_mask
|
|
||||||
)
|
|
||||||
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
|
|
||||||
|
|
||||||
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
|
||||||
|
|
||||||
if output_attentions:
|
|
||||||
# this operation is a bit akward, but it's required to
|
|
||||||
# make sure that attn_weights keeps its gradient.
|
|
||||||
# In order to do so, attn_weights have to reshaped
|
|
||||||
# twice and have to be reused in the following
|
|
||||||
attn_weights_reshaped = attn_weights.view(
|
|
||||||
bsz, self.num_heads, tgt_len, src_len
|
|
||||||
)
|
|
||||||
attn_weights = attn_weights_reshaped.view(
|
|
||||||
bsz * self.num_heads, tgt_len, src_len
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
attn_weights_reshaped = None
|
|
||||||
|
|
||||||
attn_probs = nn.functional.dropout(
|
|
||||||
attn_weights, p=self.dropout, training=self.training
|
|
||||||
)
|
|
||||||
|
|
||||||
attn_output = torch.bmm(attn_probs, value_states)
|
|
||||||
|
|
||||||
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
|
|
||||||
raise ValueError(
|
|
||||||
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
|
|
||||||
f" {attn_output.size()}"
|
|
||||||
)
|
|
||||||
|
|
||||||
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
|
|
||||||
attn_output = attn_output.transpose(1, 2)
|
|
||||||
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
|
|
||||||
|
|
||||||
attn_output = self.out_proj(attn_output)
|
|
||||||
|
|
||||||
return attn_output, attn_weights_reshaped
|
|
||||||
|
|
||||||
|
|
||||||
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision
|
|
||||||
class IdeficsVisionMLP(nn.Module):
|
|
||||||
def __init__(self, prefix, config, weights):
|
|
||||||
super().__init__()
|
|
||||||
self.config = config
|
|
||||||
self.activation_fn = ACT2FN[config.hidden_act]
|
|
||||||
self.fc1 = TensorParallelColumnLinear.load(
|
|
||||||
config, prefix=f"{prefix}.fc1", weights=weights, bias=True
|
|
||||||
)
|
|
||||||
self.fc2 = TensorParallelRowLinear.load(
|
|
||||||
config, prefix=f"{prefix}.fc2", weights=weights, bias=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
||||||
hidden_states = self.fc1(hidden_states)
|
|
||||||
hidden_states = self.activation_fn(hidden_states)
|
|
||||||
hidden_states = self.fc2(hidden_states)
|
|
||||||
return hidden_states
|
|
||||||
|
|
||||||
|
|
||||||
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision
|
|
||||||
class IdeficsVisionEncoderLayer(nn.Module):
|
|
||||||
def __init__(self, prefix, config, weights):
|
|
||||||
super().__init__()
|
|
||||||
self.embed_dim = config.hidden_size
|
|
||||||
self.self_attn = IdeficsVisionAttention(
|
|
||||||
prefix=f"{prefix}.self_attn", config=config, weights=weights
|
|
||||||
)
|
|
||||||
self.layer_norm1 = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps
|
|
||||||
)
|
|
||||||
self.mlp = IdeficsVisionMLP(
|
|
||||||
prefix=f"{prefix}.mlp", config=config, weights=weights
|
|
||||||
)
|
|
||||||
self.layer_norm2 = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self,
|
|
||||||
hidden_states: torch.Tensor,
|
|
||||||
attention_mask: torch.Tensor,
|
|
||||||
causal_attention_mask: torch.Tensor,
|
|
||||||
output_attentions: Optional[bool] = False,
|
|
||||||
) -> Tuple[torch.FloatTensor]:
|
|
||||||
"""
|
|
||||||
Args:
|
|
||||||
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
|
||||||
attention_mask (`torch.FloatTensor`): attention mask of size
|
|
||||||
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
|
||||||
`(config.encoder_attention_heads,)`.
|
|
||||||
output_attentions (`bool`, *optional*):
|
|
||||||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
||||||
returned tensors for more detail.
|
|
||||||
"""
|
|
||||||
residual = hidden_states
|
|
||||||
|
|
||||||
hidden_states = self.layer_norm1(hidden_states)
|
|
||||||
hidden_states, attn_weights = self.self_attn(
|
|
||||||
hidden_states=hidden_states,
|
|
||||||
attention_mask=attention_mask,
|
|
||||||
causal_attention_mask=causal_attention_mask,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
)
|
|
||||||
hidden_states = residual + hidden_states
|
|
||||||
|
|
||||||
residual = hidden_states
|
|
||||||
hidden_states = self.layer_norm2(hidden_states)
|
|
||||||
hidden_states = self.mlp(hidden_states)
|
|
||||||
hidden_states = residual + hidden_states
|
|
||||||
|
|
||||||
outputs = (hidden_states,)
|
|
||||||
|
|
||||||
if output_attentions:
|
|
||||||
outputs += (attn_weights,)
|
|
||||||
|
|
||||||
return outputs
|
|
||||||
|
|
||||||
|
|
||||||
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision
|
|
||||||
class IdeficsVisionEncoder(nn.Module):
|
|
||||||
"""
|
|
||||||
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
|
||||||
[`IdeficsVisionEncoderLayer`].
|
|
||||||
|
|
||||||
Args:
|
|
||||||
config: IdeficsVisionConfig
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, prefix, config, weights):
|
|
||||||
super().__init__()
|
|
||||||
self.config = config
|
|
||||||
self.layers = nn.ModuleList(
|
|
||||||
[
|
|
||||||
IdeficsVisionEncoderLayer(
|
|
||||||
prefix=f"{prefix}.encoder.layers.{layer_id}",
|
|
||||||
config=config,
|
|
||||||
weights=weights,
|
|
||||||
)
|
|
||||||
for layer_id in range(config.num_hidden_layers)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
# self.gradient_checkpointing = False
|
|
||||||
|
|
||||||
def forward(
|
|
||||||
self,
|
|
||||||
inputs_embeds,
|
|
||||||
attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
causal_attention_mask: Optional[torch.Tensor] = None,
|
|
||||||
output_attentions: Optional[bool] = None,
|
|
||||||
output_hidden_states: Optional[bool] = None,
|
|
||||||
return_dict: Optional[bool] = None,
|
|
||||||
) -> Union[Tuple, BaseModelOutput]:
|
|
||||||
r"""
|
|
||||||
Args:
|
|
||||||
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
||||||
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
|
|
||||||
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
|
|
||||||
than the model's internal embedding lookup matrix.
|
|
||||||
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
||||||
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
||||||
|
|
||||||
- 1 for tokens that are **not masked**,
|
|
||||||
- 0 for tokens that are **masked**.
|
|
||||||
|
|
||||||
[What are attention masks?](../glossary#attention-mask)
|
|
||||||
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
||||||
Causal mask for the text model. Mask values selected in `[0, 1]`:
|
|
||||||
|
|
||||||
- 1 for tokens that are **not masked**,
|
|
||||||
- 0 for tokens that are **masked**.
|
|
||||||
|
|
||||||
[What are attention masks?](../glossary#attention-mask)
|
|
||||||
output_attentions (`bool`, *optional*):
|
|
||||||
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
||||||
returned tensors for more detail.
|
|
||||||
output_hidden_states (`bool`, *optional*):
|
|
||||||
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
|
||||||
for more detail.
|
|
||||||
return_dict (`bool`, *optional*):
|
|
||||||
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
|
||||||
"""
|
|
||||||
output_attentions = (
|
|
||||||
output_attentions
|
|
||||||
if output_attentions is not None
|
|
||||||
else self.config.output_attentions
|
|
||||||
)
|
|
||||||
output_hidden_states = (
|
|
||||||
output_hidden_states
|
|
||||||
if output_hidden_states is not None
|
|
||||||
else self.config.output_hidden_states
|
|
||||||
)
|
|
||||||
return_dict = (
|
|
||||||
return_dict if return_dict is not None else self.config.use_return_dict
|
|
||||||
)
|
|
||||||
|
|
||||||
encoder_states = () if output_hidden_states else None
|
|
||||||
all_attentions = () if output_attentions else None
|
|
||||||
|
|
||||||
hidden_states = inputs_embeds
|
|
||||||
for idx, encoder_layer in enumerate(self.layers):
|
|
||||||
if output_hidden_states:
|
|
||||||
encoder_states = encoder_states + (hidden_states,)
|
|
||||||
# if self.gradient_checkpointing and self.training:
|
|
||||||
|
|
||||||
# def create_custom_forward(module):
|
|
||||||
# def custom_forward(*inputs):
|
|
||||||
# return module(*inputs, output_attentions)
|
|
||||||
|
|
||||||
# return custom_forward
|
|
||||||
|
|
||||||
# layer_outputs = torch.utils.checkpoint.checkpoint(
|
|
||||||
# create_custom_forward(encoder_layer),
|
|
||||||
# hidden_states,
|
|
||||||
# attention_mask,
|
|
||||||
# causal_attention_mask,
|
|
||||||
# )
|
|
||||||
# else:
|
|
||||||
layer_outputs = encoder_layer(
|
|
||||||
hidden_states,
|
|
||||||
attention_mask,
|
|
||||||
causal_attention_mask,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
)
|
|
||||||
|
|
||||||
hidden_states = layer_outputs[0]
|
|
||||||
|
|
||||||
if output_attentions:
|
|
||||||
all_attentions = all_attentions + (layer_outputs[1],)
|
|
||||||
|
|
||||||
if output_hidden_states:
|
|
||||||
encoder_states = encoder_states + (hidden_states,)
|
|
||||||
|
|
||||||
if not return_dict:
|
|
||||||
return tuple(
|
|
||||||
v
|
|
||||||
for v in [hidden_states, encoder_states, all_attentions]
|
|
||||||
if v is not None
|
|
||||||
)
|
|
||||||
return BaseModelOutput(
|
|
||||||
last_hidden_state=hidden_states,
|
|
||||||
hidden_states=encoder_states,
|
|
||||||
attentions=all_attentions,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
|
|
||||||
class IdeficsVisionTransformer(nn.Module):
|
|
||||||
def __init__(self, prefix, config, weights):
|
|
||||||
super().__init__()
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
self.embeddings = IdeficsVisionEmbeddings(
|
|
||||||
prefix=f"{prefix}.embeddings", config=config, weights=weights
|
|
||||||
)
|
|
||||||
self.pre_layrnorm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.pre_layrnorm", weights=weights, eps=config.layer_norm_eps
|
|
||||||
)
|
|
||||||
self.encoder = IdeficsVisionEncoder(
|
|
||||||
prefix=prefix, config=config, weights=weights
|
|
||||||
)
|
|
||||||
self.post_layernorm = nn.LayerNorm.load(
|
|
||||||
prefix=f"{prefix}.post_layernorm",
|
|
||||||
weights=weights,
|
|
||||||
eps=config.layer_norm_eps,
|
|
||||||
)
|
|
||||||
|
|
||||||
# copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
|
|
||||||
def forward(
|
|
||||||
self,
|
|
||||||
pixel_values: Optional[torch.FloatTensor] = None,
|
|
||||||
output_attentions: Optional[bool] = None,
|
|
||||||
output_hidden_states: Optional[bool] = None,
|
|
||||||
return_dict: Optional[bool] = None,
|
|
||||||
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
|
||||||
r"""
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
"""
|
|
||||||
output_attentions = (
|
|
||||||
output_attentions
|
|
||||||
if output_attentions is not None
|
|
||||||
else self.config.output_attentions
|
|
||||||
)
|
|
||||||
output_hidden_states = (
|
|
||||||
output_hidden_states
|
|
||||||
if output_hidden_states is not None
|
|
||||||
else self.config.output_hidden_states
|
|
||||||
)
|
|
||||||
return_dict = (
|
|
||||||
return_dict if return_dict is not None else self.config.use_return_dict
|
|
||||||
)
|
|
||||||
|
|
||||||
if pixel_values is None:
|
|
||||||
raise ValueError("You have to specify pixel_values")
|
|
||||||
|
|
||||||
hidden_states = self.embeddings(pixel_values)
|
|
||||||
hidden_states = self.pre_layrnorm(hidden_states)
|
|
||||||
|
|
||||||
encoder_outputs = self.encoder(
|
|
||||||
inputs_embeds=hidden_states,
|
|
||||||
output_attentions=output_attentions,
|
|
||||||
output_hidden_states=output_hidden_states,
|
|
||||||
return_dict=return_dict,
|
|
||||||
)
|
|
||||||
|
|
||||||
last_hidden_state = encoder_outputs[0]
|
|
||||||
pooled_output = last_hidden_state[:, 0, :]
|
|
||||||
pooled_output = self.post_layernorm(pooled_output)
|
|
||||||
|
|
||||||
if not return_dict:
|
|
||||||
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
|
||||||
|
|
||||||
return BaseModelOutputWithPooling(
|
|
||||||
last_hidden_state=last_hidden_state,
|
|
||||||
pooler_output=pooled_output,
|
|
||||||
hidden_states=encoder_outputs.hidden_states,
|
|
||||||
attentions=encoder_outputs.attentions,
|
|
||||||
)
|
|
@ -23,6 +23,12 @@ def load_text_model(prefix, config, weights, name=None):
|
|||||||
)
|
)
|
||||||
|
|
||||||
return FlashGemma2ForCausalLM(prefix, config, weights)
|
return FlashGemma2ForCausalLM(prefix, config, weights)
|
||||||
|
elif config.model_type == "gemma3" or config.model_type == "gemma3_text":
|
||||||
|
from text_generation_server.models.custom_modeling.flash_gemma3_modeling import (
|
||||||
|
FlashGemma3ForCausalLM,
|
||||||
|
)
|
||||||
|
|
||||||
|
return FlashGemma3ForCausalLM(prefix, config, weights)
|
||||||
elif config.model_type == "paligemma":
|
elif config.model_type == "paligemma":
|
||||||
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
|
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
|
||||||
FlashGemmaForCausalLM,
|
FlashGemmaForCausalLM,
|
||||||
@ -42,13 +48,20 @@ def load_vision_model(prefix, config, weights):
|
|||||||
return CLIPVisionTransformer(
|
return CLIPVisionTransformer(
|
||||||
prefix=f"{prefix}.vision_model", config=config, weights=weights
|
prefix=f"{prefix}.vision_model", config=config, weights=weights
|
||||||
)
|
)
|
||||||
if config.model_type == "siglip_vision_model":
|
if (
|
||||||
|
config.model_type == "siglip_vision_model"
|
||||||
|
or config.model_type == "gemma3_vision"
|
||||||
|
):
|
||||||
from text_generation_server.models.custom_modeling.siglip import (
|
from text_generation_server.models.custom_modeling.siglip import (
|
||||||
SiglipVisionTransformer,
|
SiglipVisionTransformer,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# TODO: ensure that using the prefix doesn't break any existing models
|
||||||
|
# that rely on the old prefix (update the old models if necessary)
|
||||||
return SiglipVisionTransformer(
|
return SiglipVisionTransformer(
|
||||||
prefix="vision_tower.vision_model", config=config, weights=weights
|
prefix=f"{prefix}.vision_model",
|
||||||
|
config=config,
|
||||||
|
weights=weights,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(f"Unsupported model type {config.model_type}")
|
raise RuntimeError(f"Unsupported model type {config.model_type}")
|
||||||
|
@ -80,22 +80,15 @@ from vllm_hpu_extension.profiler import HabanaMemoryProfiler, format_bytes
|
|||||||
|
|
||||||
tracer = trace.get_tracer(__name__)
|
tracer = trace.get_tracer(__name__)
|
||||||
|
|
||||||
# Will be set in init
|
|
||||||
SLIDING_WINDOW: Optional[int] = None
|
|
||||||
|
|
||||||
|
def generate_block_metadata(
|
||||||
def set_sliding_window(sliding_window: int):
|
dtype,
|
||||||
global SLIDING_WINDOW
|
use_contiguous_pa,
|
||||||
SLIDING_WINDOW = sliding_window
|
slots,
|
||||||
|
block_tables,
|
||||||
|
bucketing_ctx,
|
||||||
def get_sliding_windows() -> int:
|
slots_in_window=None,
|
||||||
global SLIDING_WINDOW
|
block_bucket_size=None,
|
||||||
return SLIDING_WINDOW
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_for_decode(
|
|
||||||
dtype, use_contiguous_pa, device, slots, block_tables, batch_size, bucketing_ctx
|
|
||||||
):
|
):
|
||||||
# Prepare values if we need to continue decoding
|
# Prepare values if we need to continue decoding
|
||||||
# need for HPUPagedAttentionMetadata preparation
|
# need for HPUPagedAttentionMetadata preparation
|
||||||
@ -125,11 +118,12 @@ def prepare_for_decode(
|
|||||||
assert len(block_list) == len(block_groups)
|
assert len(block_list) == len(block_groups)
|
||||||
assert len(block_list) == len(block_usage)
|
assert len(block_list) == len(block_usage)
|
||||||
if use_contiguous_pa:
|
if use_contiguous_pa:
|
||||||
block_bucket_size = max(max(block_list) + 1, len(block_list))
|
if block_bucket_size is None:
|
||||||
if bucketing_ctx is not None:
|
block_bucket_size = max(max(block_list) + 1, len(block_list))
|
||||||
block_bucket_size = bucketing_ctx.get_padded_decode_num_blocks(
|
if bucketing_ctx is not None:
|
||||||
block_bucket_size
|
block_bucket_size = bucketing_ctx.get_padded_decode_num_blocks(
|
||||||
)
|
block_bucket_size
|
||||||
|
)
|
||||||
indices: List[Any]
|
indices: List[Any]
|
||||||
indices = [None] * block_bucket_size
|
indices = [None] * block_bucket_size
|
||||||
for i, bid in enumerate(block_list):
|
for i, bid in enumerate(block_list):
|
||||||
@ -138,30 +132,38 @@ def prepare_for_decode(
|
|||||||
block_groups = gather_list(block_groups, indices, -1)
|
block_groups = gather_list(block_groups, indices, -1)
|
||||||
block_usage = gather_list(block_usage, indices, 1)
|
block_usage = gather_list(block_usage, indices, 1)
|
||||||
else:
|
else:
|
||||||
block_bucket_size = len(block_list)
|
if block_bucket_size is None:
|
||||||
if bucketing_ctx is not None:
|
block_bucket_size = len(block_list)
|
||||||
block_bucket_size = bucketing_ctx.get_padded_decode_num_blocks(
|
if bucketing_ctx is not None:
|
||||||
block_bucket_size
|
block_bucket_size = bucketing_ctx.get_padded_decode_num_blocks(
|
||||||
)
|
block_bucket_size
|
||||||
|
)
|
||||||
block_list = pad_list(block_list, block_bucket_size, 0)
|
block_list = pad_list(block_list, block_bucket_size, 0)
|
||||||
block_groups = pad_list(block_groups, block_bucket_size, -1)
|
block_groups = pad_list(block_groups, block_bucket_size, -1)
|
||||||
block_usage = pad_list(block_usage, block_bucket_size, 1)
|
block_usage = pad_list(block_usage, block_bucket_size, 1)
|
||||||
|
slots_in_window_mask = None
|
||||||
|
if slots_in_window is not None:
|
||||||
|
slot_list = [
|
||||||
|
block_id * BLOCK_SIZE + slot_idx
|
||||||
|
for block_id in block_list
|
||||||
|
for slot_idx in range(BLOCK_SIZE)
|
||||||
|
]
|
||||||
|
slot_list = torch.tensor(slot_list, dtype=torch.int64)
|
||||||
|
slot_list = slot_list.view(-1, BLOCK_SIZE)
|
||||||
|
slots_in_window_mask = torch.isin(slot_list, slots_in_window)
|
||||||
|
for i in range(slots_in_window_mask.shape[0]):
|
||||||
|
if not slots_in_window_mask[i].any():
|
||||||
|
slots_in_window_mask[i, 0] = True
|
||||||
|
|
||||||
block_list = torch.tensor(block_list, dtype=torch.int, device="cpu")
|
block_list = torch.tensor(block_list, dtype=torch.int, device="cpu")
|
||||||
block_groups = torch.tensor(block_groups, dtype=torch.int, device="cpu")
|
block_groups = torch.tensor(block_groups, dtype=torch.int, device="cpu")
|
||||||
block_usage = torch.tensor(block_usage, dtype=dtype, device="cpu")
|
block_usage = torch.tensor(block_usage, dtype=dtype, device="cpu")
|
||||||
block_list_device = _async_h2d_tensor_copy(block_list)
|
return (
|
||||||
block_groups_device = _async_h2d_tensor_copy(block_groups)
|
block_list,
|
||||||
block_usage_device = _async_h2d_tensor_copy(block_usage)
|
block_groups,
|
||||||
|
block_usage,
|
||||||
return trim_attn_metadata(
|
slots_in_window_mask,
|
||||||
HPUPagedAttentionMetadata(
|
block_bucket_size,
|
||||||
block_list=block_list_device,
|
|
||||||
block_groups=block_groups_device,
|
|
||||||
block_usage=block_usage_device,
|
|
||||||
block_mapping=None,
|
|
||||||
attn_bias=None,
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -975,7 +977,9 @@ class FlashCausalLMBatch(Batch):
|
|||||||
valid_indices=None,
|
valid_indices=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def prepare_for_decode(self, dtype, use_contiguous_pa, bucketing_ctx, pad_token_id):
|
def prepare_for_decode(
|
||||||
|
self, dtype, use_contiguous_pa, bucketing_ctx, pad_token_id, sliding_window
|
||||||
|
):
|
||||||
block_num = [length // BLOCK_SIZE + 1 for length in self.cache_lengths]
|
block_num = [length // BLOCK_SIZE + 1 for length in self.cache_lengths]
|
||||||
block_tables = []
|
block_tables = []
|
||||||
for i, bt in enumerate(self.block_tables):
|
for i, bt in enumerate(self.block_tables):
|
||||||
@ -988,15 +992,65 @@ class FlashCausalLMBatch(Batch):
|
|||||||
padded_bs = self.input_ids.shape[0]
|
padded_bs = self.input_ids.shape[0]
|
||||||
slots = self.slots[self.slot_indices]
|
slots = self.slots[self.slot_indices]
|
||||||
|
|
||||||
self.hpu_attn_meta = prepare_for_decode(
|
block_list, block_groups, block_usage, _, block_bucket_size = (
|
||||||
dtype,
|
generate_block_metadata(
|
||||||
use_contiguous_pa,
|
dtype,
|
||||||
"hpu",
|
use_contiguous_pa,
|
||||||
slots,
|
slots,
|
||||||
block_tables,
|
block_tables,
|
||||||
padded_bs,
|
bucketing_ctx,
|
||||||
bucketing_ctx,
|
)
|
||||||
)
|
)
|
||||||
|
meta = HPUPagedAttentionMetadata(
|
||||||
|
block_list=_async_h2d_tensor_copy(block_list),
|
||||||
|
block_groups=_async_h2d_tensor_copy(block_groups),
|
||||||
|
block_usage=_async_h2d_tensor_copy(block_usage),
|
||||||
|
block_mapping=None,
|
||||||
|
attn_bias=None,
|
||||||
|
)
|
||||||
|
if sliding_window is not None:
|
||||||
|
block_tables_in_window = []
|
||||||
|
for i, bt in enumerate(self.block_tables):
|
||||||
|
block_num_in_window = (
|
||||||
|
sliding_window + 2 * BLOCK_SIZE - 2 - slots[i] % BLOCK_SIZE
|
||||||
|
) // BLOCK_SIZE
|
||||||
|
block_tables_in_window.append(
|
||||||
|
bt[max(0, block_num[i] - block_num_in_window) : block_num[i]]
|
||||||
|
)
|
||||||
|
slots_in_window = []
|
||||||
|
for i, indice in enumerate(self.slot_indices):
|
||||||
|
start_idx = indice - self.cache_lengths[i]
|
||||||
|
mask = (
|
||||||
|
indice
|
||||||
|
- torch.arange(
|
||||||
|
start_idx,
|
||||||
|
indice + 1,
|
||||||
|
device=self.slots.device,
|
||||||
|
)
|
||||||
|
) < sliding_window
|
||||||
|
slots_in_window.append(self.slots[start_idx : indice + 1][mask])
|
||||||
|
slots_in_window = torch.cat(slots_in_window, dim=0)
|
||||||
|
(
|
||||||
|
block_list_in_window,
|
||||||
|
block_groups_in_window,
|
||||||
|
block_usage_in_window,
|
||||||
|
slots_in_window_mask,
|
||||||
|
_,
|
||||||
|
) = generate_block_metadata(
|
||||||
|
dtype,
|
||||||
|
use_contiguous_pa,
|
||||||
|
slots,
|
||||||
|
block_tables_in_window,
|
||||||
|
bucketing_ctx,
|
||||||
|
slots_in_window,
|
||||||
|
block_bucket_size,
|
||||||
|
)
|
||||||
|
meta.block_list_in_window = _async_h2d_tensor_copy(block_list_in_window)
|
||||||
|
meta.block_groups_in_window = _async_h2d_tensor_copy(block_groups_in_window)
|
||||||
|
meta.block_usage_in_window = _async_h2d_tensor_copy(block_usage_in_window)
|
||||||
|
meta.slots_in_window_mask = _async_h2d_tensor_copy(slots_in_window_mask)
|
||||||
|
|
||||||
|
self.hpu_attn_meta = trim_attn_metadata(meta)
|
||||||
self.input_ids = F.pad(
|
self.input_ids = F.pad(
|
||||||
self.input_ids, (0, padded_bs - self.input_ids.shape[0]), value=pad_token_id
|
self.input_ids, (0, padded_bs - self.input_ids.shape[0]), value=pad_token_id
|
||||||
)
|
)
|
||||||
@ -1022,22 +1076,23 @@ class FlashCausalLMBatch(Batch):
|
|||||||
(0, padded_bs - self.cache_lengths_tensor.shape[0]),
|
(0, padded_bs - self.cache_lengths_tensor.shape[0]),
|
||||||
value=0,
|
value=0,
|
||||||
)
|
)
|
||||||
next_token_chooser_parameters = []
|
if len(self.next_token_chooser.do_sample) != padded_bs:
|
||||||
next_token_chooser_parameters.extend([r.parameters for r in self.requests])
|
next_token_chooser_parameters = []
|
||||||
pad_next_token_chooser_parameters(next_token_chooser_parameters, padded_bs)
|
next_token_chooser_parameters.extend([r.parameters for r in self.requests])
|
||||||
# update past grammar states
|
pad_next_token_chooser_parameters(next_token_chooser_parameters, padded_bs)
|
||||||
fsm_grammar_states = [0] * padded_bs
|
# update past grammar states
|
||||||
|
fsm_grammar_states = [0] * padded_bs
|
||||||
|
|
||||||
for i, req in enumerate(self.requests):
|
for i, req in enumerate(self.requests):
|
||||||
fsm_grammar_states[i] = self.next_token_chooser.fsm_grammar_states[i]
|
fsm_grammar_states[i] = self.next_token_chooser.fsm_grammar_states[i]
|
||||||
|
|
||||||
self.next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
|
self.next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
|
||||||
next_token_chooser_parameters,
|
next_token_chooser_parameters,
|
||||||
self.next_token_chooser.dtype,
|
self.next_token_chooser.dtype,
|
||||||
self.next_token_chooser.device,
|
self.next_token_chooser.device,
|
||||||
self.next_token_chooser.tokenizer,
|
self.next_token_chooser.tokenizer,
|
||||||
fsm_grammar_states,
|
fsm_grammar_states,
|
||||||
)
|
)
|
||||||
|
|
||||||
def prepare_for_prefill(
|
def prepare_for_prefill(
|
||||||
self, max_padded_input_len, max_padded_bs, max_total_tokens, pad_token_id
|
self, max_padded_input_len, max_padded_bs, max_total_tokens, pad_token_id
|
||||||
@ -1112,7 +1167,6 @@ class FlashCausalLMBatch(Batch):
|
|||||||
self.cache_lengths_tensor, (0, extra_pad_bs), value=0
|
self.cache_lengths_tensor, (0, extra_pad_bs), value=0
|
||||||
)
|
)
|
||||||
|
|
||||||
sliding_window = get_sliding_windows()
|
|
||||||
position_ids = []
|
position_ids = []
|
||||||
slot_indices = []
|
slot_indices = []
|
||||||
prefill_cache_indices = []
|
prefill_cache_indices = []
|
||||||
@ -1178,9 +1232,7 @@ class FlashCausalLMBatch(Batch):
|
|||||||
|
|
||||||
# Create tensor to slice into the kv tensor in prefill
|
# Create tensor to slice into the kv tensor in prefill
|
||||||
# hpu need request_prefill_cache_indices to skip padding in kv cache
|
# hpu need request_prefill_cache_indices to skip padding in kv cache
|
||||||
sliding_window = get_sliding_windows()
|
sliding_window = input_length
|
||||||
if sliding_window is None:
|
|
||||||
sliding_window = input_length
|
|
||||||
cumulative_length += input_ids_padded_length[i]
|
cumulative_length += input_ids_padded_length[i]
|
||||||
if sliding_window is not None:
|
if sliding_window is not None:
|
||||||
request_prefill_cache_indices = torch.arange(
|
request_prefill_cache_indices = torch.arange(
|
||||||
@ -1328,23 +1380,25 @@ class FlashCausalLMBatch(Batch):
|
|||||||
self.all_input_ids_tensor[i]
|
self.all_input_ids_tensor[i]
|
||||||
)
|
)
|
||||||
self.all_input_ids_tensor = all_input_ids_tensor
|
self.all_input_ids_tensor = all_input_ids_tensor
|
||||||
|
if len(self.next_token_chooser.do_sample) != max_padded_bs:
|
||||||
|
next_token_chooser_parameters = []
|
||||||
|
next_token_chooser_parameters.extend([r.parameters for r in self.requests])
|
||||||
|
pad_next_token_chooser_parameters(
|
||||||
|
next_token_chooser_parameters, max_padded_bs
|
||||||
|
)
|
||||||
|
# update past grammar states
|
||||||
|
fsm_grammar_states = [0] * max_padded_bs
|
||||||
|
|
||||||
next_token_chooser_parameters = []
|
for i, req in enumerate(self.requests):
|
||||||
next_token_chooser_parameters.extend([r.parameters for r in self.requests])
|
fsm_grammar_states[i] = self.next_token_chooser.fsm_grammar_states[i]
|
||||||
pad_next_token_chooser_parameters(next_token_chooser_parameters, max_padded_bs)
|
|
||||||
# update past grammar states
|
|
||||||
fsm_grammar_states = [0] * max_padded_bs
|
|
||||||
|
|
||||||
for i, req in enumerate(self.requests):
|
self.next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
|
||||||
fsm_grammar_states[i] = self.next_token_chooser.fsm_grammar_states[i]
|
next_token_chooser_parameters,
|
||||||
|
self.next_token_chooser.dtype,
|
||||||
self.next_token_chooser = HeterogeneousNextTokenChooser.from_pb(
|
self.next_token_chooser.device,
|
||||||
next_token_chooser_parameters,
|
self.next_token_chooser.tokenizer,
|
||||||
self.next_token_chooser.dtype,
|
fsm_grammar_states,
|
||||||
self.next_token_chooser.device,
|
)
|
||||||
self.next_token_chooser.tokenizer,
|
|
||||||
fsm_grammar_states,
|
|
||||||
)
|
|
||||||
|
|
||||||
if ADAPTER_TO_INDEX:
|
if ADAPTER_TO_INDEX:
|
||||||
if adapter_set:
|
if adapter_set:
|
||||||
@ -1457,9 +1511,9 @@ class FlashCausalLM(Model):
|
|||||||
if text_config is not None:
|
if text_config is not None:
|
||||||
config = text_config
|
config = text_config
|
||||||
|
|
||||||
if getattr(config, "sliding_window", None) is not None:
|
if getattr(config, "sliding_window", None) is None:
|
||||||
set_sliding_window(config.sliding_window)
|
config.sliding_window = None
|
||||||
else:
|
if getattr(config, "use_sliding_window", True) is False:
|
||||||
config.sliding_window = None
|
config.sliding_window = None
|
||||||
|
|
||||||
self.num_layers = config.num_hidden_layers
|
self.num_layers = config.num_hidden_layers
|
||||||
@ -1552,7 +1606,7 @@ class FlashCausalLM(Model):
|
|||||||
):
|
):
|
||||||
self.kv_cache = []
|
self.kv_cache = []
|
||||||
empty_cache()
|
empty_cache()
|
||||||
if self.config.model_type == "deepseek_v3":
|
if self.config.model_type in ["deepseek_v3", "deepseek_v2"]:
|
||||||
self.kv_cache = [
|
self.kv_cache = [
|
||||||
KVCompressCache(
|
KVCompressCache(
|
||||||
num_blocks=num_blocks,
|
num_blocks=num_blocks,
|
||||||
@ -1592,7 +1646,7 @@ class FlashCausalLM(Model):
|
|||||||
# Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
|
# Inspired by the original implementation in [vllm](https://github.com/vllm-project/vllm)
|
||||||
# Calculate the number of blocks that can be allocated with the free memory
|
# Calculate the number of blocks that can be allocated with the free memory
|
||||||
dtype_size = torch.tensor([], dtype=self.kv_cache_dtype).element_size()
|
dtype_size = torch.tensor([], dtype=self.kv_cache_dtype).element_size()
|
||||||
if self.config.model_type == "deepseek_v3":
|
if self.config.model_type in ["deepseek_v3", "deepseek_v2"]:
|
||||||
cache_block_size = BLOCK_SIZE * (
|
cache_block_size = BLOCK_SIZE * (
|
||||||
self.config.kv_lora_rank + self.config.qk_rope_head_dim
|
self.config.kv_lora_rank + self.config.qk_rope_head_dim
|
||||||
)
|
)
|
||||||
@ -1883,6 +1937,15 @@ class FlashCausalLM(Model):
|
|||||||
kwargs["bypass_hpu_graphs"] = not self.use_graphs(
|
kwargs["bypass_hpu_graphs"] = not self.use_graphs(
|
||||||
True, prompt_len, batch_size
|
True, prompt_len, batch_size
|
||||||
)
|
)
|
||||||
|
if self.sliding_window is not None:
|
||||||
|
attn_mask = seqlen.make_sliding_window_bias(
|
||||||
|
input_lengths.tolist(),
|
||||||
|
self.sliding_window,
|
||||||
|
self.dtype,
|
||||||
|
prompt_len,
|
||||||
|
batch_size,
|
||||||
|
)
|
||||||
|
seqlen.attn_mask = _async_h2d_tensor_copy(attn_mask)
|
||||||
|
|
||||||
# We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation.
|
# We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation.
|
||||||
self.model.forward(
|
self.model.forward(
|
||||||
@ -1903,17 +1966,17 @@ class FlashCausalLM(Model):
|
|||||||
position_ids = torch.arange(batch_size, dtype=batch.position_ids.dtype)
|
position_ids = torch.arange(batch_size, dtype=batch.position_ids.dtype)
|
||||||
blocks = [block_num // batch_size for _ in range(batch_size)]
|
blocks = [block_num // batch_size for _ in range(batch_size)]
|
||||||
blocks[0] += block_num % batch_size
|
blocks[0] += block_num % batch_size
|
||||||
past_len = []
|
|
||||||
block_tables = []
|
block_tables = []
|
||||||
slots = []
|
slots = []
|
||||||
start_idx = 0
|
start_idx = 0
|
||||||
|
slot_indices = []
|
||||||
|
|
||||||
# fetch the last blocked to warmup block num
|
# fetch the last blocked to warmup block num
|
||||||
for i in range(batch_size):
|
for i in range(batch_size):
|
||||||
block_array = list(range(start_idx, start_idx + blocks[i]))
|
block_array = list(range(start_idx, start_idx + blocks[i]))
|
||||||
slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1)
|
slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1)
|
||||||
|
slot_indices.append((start_idx + blocks[i]) * BLOCK_SIZE - 1)
|
||||||
block_tables.append(block_array)
|
block_tables.append(block_array)
|
||||||
past_len.append(blocks[i] * BLOCK_SIZE - 1)
|
|
||||||
start_idx += blocks[i]
|
start_idx += blocks[i]
|
||||||
input_lengths = torch.ones(batch_size, dtype=torch.int32)
|
input_lengths = torch.ones(batch_size, dtype=torch.int32)
|
||||||
cu_seqlen_prefill = torch.zeros(batch_size + 1, dtype=torch.int32)
|
cu_seqlen_prefill = torch.zeros(batch_size + 1, dtype=torch.int32)
|
||||||
@ -1922,16 +1985,61 @@ class FlashCausalLM(Model):
|
|||||||
seqlen = Seqlen(
|
seqlen = Seqlen(
|
||||||
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
||||||
)
|
)
|
||||||
|
block_list, block_groups, block_usage, _, block_bucket_size = (
|
||||||
hpu_attention_meta = prepare_for_decode(
|
generate_block_metadata(
|
||||||
self.dtype,
|
self.dtype,
|
||||||
self.use_contiguous_pa,
|
self.use_contiguous_pa,
|
||||||
self.device,
|
slots,
|
||||||
slots,
|
block_tables,
|
||||||
block_tables,
|
self.bucketing_ctx,
|
||||||
batch_size,
|
)
|
||||||
bucketing_ctx=None,
|
|
||||||
)
|
)
|
||||||
|
meta = HPUPagedAttentionMetadata(
|
||||||
|
block_list=_async_h2d_tensor_copy(block_list),
|
||||||
|
block_groups=_async_h2d_tensor_copy(block_groups),
|
||||||
|
block_usage=_async_h2d_tensor_copy(block_usage),
|
||||||
|
block_mapping=None,
|
||||||
|
attn_bias=None,
|
||||||
|
)
|
||||||
|
if self.sliding_window is not None:
|
||||||
|
block_tables_in_window = []
|
||||||
|
for i, bt in enumerate(block_tables):
|
||||||
|
block_num_in_window = (
|
||||||
|
self.sliding_window + BLOCK_SIZE - 1
|
||||||
|
) // BLOCK_SIZE
|
||||||
|
block_tables_in_window.append(
|
||||||
|
bt[max(0, blocks[i] - block_num_in_window) : blocks[i]]
|
||||||
|
)
|
||||||
|
slots_in_window = []
|
||||||
|
start_idx = 0
|
||||||
|
for i, indice in enumerate(slot_indices):
|
||||||
|
mask = (
|
||||||
|
indice - torch.arange(start_idx, indice + 1)
|
||||||
|
) < self.sliding_window
|
||||||
|
slots_in_window.append(torch.arange(start_idx, indice + 1)[mask])
|
||||||
|
start_idx += blocks[i] * BLOCK_SIZE
|
||||||
|
slots_in_window = torch.cat(slots_in_window, dim=0)
|
||||||
|
(
|
||||||
|
block_list_in_window,
|
||||||
|
block_groups_in_window,
|
||||||
|
block_usage_in_window,
|
||||||
|
slots_in_window_mask,
|
||||||
|
_,
|
||||||
|
) = generate_block_metadata(
|
||||||
|
self.dtype,
|
||||||
|
self.use_contiguous_pa,
|
||||||
|
slots,
|
||||||
|
block_tables_in_window,
|
||||||
|
self.bucketing_ctx,
|
||||||
|
slots_in_window,
|
||||||
|
block_bucket_size,
|
||||||
|
)
|
||||||
|
meta.block_list_in_window = _async_h2d_tensor_copy(block_list_in_window)
|
||||||
|
meta.block_groups_in_window = _async_h2d_tensor_copy(block_groups_in_window)
|
||||||
|
meta.block_usage_in_window = _async_h2d_tensor_copy(block_usage_in_window)
|
||||||
|
meta.slots_in_window_mask = _async_h2d_tensor_copy(slots_in_window_mask)
|
||||||
|
|
||||||
|
hpu_attention_meta = trim_attn_metadata(meta)
|
||||||
slots_tensor = torch.tensor(slots, dtype=batch.slots.dtype)
|
slots_tensor = torch.tensor(slots, dtype=batch.slots.dtype)
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
if htorch.utils.internal.is_lazy():
|
if htorch.utils.internal.is_lazy():
|
||||||
@ -2032,16 +2140,25 @@ class FlashCausalLM(Model):
|
|||||||
)
|
)
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
|
batch_size = input_lengths.shape[0]
|
||||||
|
prompt_len = (
|
||||||
|
input_ids.shape[0] // batch_size
|
||||||
|
if batch.prefilling
|
||||||
|
else batch.hpu_attn_meta.block_list.shape[0]
|
||||||
|
)
|
||||||
if htorch.utils.internal.is_lazy():
|
if htorch.utils.internal.is_lazy():
|
||||||
batch_size = input_lengths.shape[0]
|
|
||||||
prompt_len = (
|
|
||||||
input_ids.shape[0] // batch_size
|
|
||||||
if batch.prefilling
|
|
||||||
else batch.hpu_attn_meta.block_list.shape[0]
|
|
||||||
)
|
|
||||||
kwargs["bypass_hpu_graphs"] = not self.use_graphs(
|
kwargs["bypass_hpu_graphs"] = not self.use_graphs(
|
||||||
batch.prefilling, prompt_len, batch_size
|
batch.prefilling, prompt_len, batch_size
|
||||||
)
|
)
|
||||||
|
if self.sliding_window is not None and batch.prefilling:
|
||||||
|
attn_mask = seqlen.make_sliding_window_bias(
|
||||||
|
input_lengths.tolist(),
|
||||||
|
self.sliding_window,
|
||||||
|
self.dtype,
|
||||||
|
prompt_len,
|
||||||
|
batch_size,
|
||||||
|
)
|
||||||
|
seqlen.attn_mask = _async_h2d_tensor_copy(attn_mask)
|
||||||
|
|
||||||
logits, speculative_logits = self.model.forward(
|
logits, speculative_logits = self.model.forward(
|
||||||
input_ids=input_ids,
|
input_ids=input_ids,
|
||||||
@ -2321,6 +2438,7 @@ class FlashCausalLM(Model):
|
|||||||
self.use_contiguous_pa,
|
self.use_contiguous_pa,
|
||||||
self.bucketing_ctx,
|
self.bucketing_ctx,
|
||||||
self.tokenizer.pad_token_id,
|
self.tokenizer.pad_token_id,
|
||||||
|
self.sliding_window,
|
||||||
)
|
)
|
||||||
if hasattr(self, "set_inputs_embeds") and callable(self.set_inputs_embeds):
|
if hasattr(self, "set_inputs_embeds") and callable(self.set_inputs_embeds):
|
||||||
self.set_inputs_embeds(batch)
|
self.set_inputs_embeds(batch)
|
||||||
|
@ -11,7 +11,7 @@ from text_generation_server.pb import generate_pb2
|
|||||||
from text_generation_server.models.flash_causal_lm import (
|
from text_generation_server.models.flash_causal_lm import (
|
||||||
FlashCausalLMBatch,
|
FlashCausalLMBatch,
|
||||||
FlashCausalLM,
|
FlashCausalLM,
|
||||||
prepare_for_decode,
|
generate_block_metadata,
|
||||||
)
|
)
|
||||||
from text_generation_server.models.globals import PREFIX_CACHING, BLOCK_SIZE
|
from text_generation_server.models.globals import PREFIX_CACHING, BLOCK_SIZE
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
@ -21,6 +21,8 @@ from text_generation_server.layers.attention import (
|
|||||||
Seqlen,
|
Seqlen,
|
||||||
trim_seqlen_metadata,
|
trim_seqlen_metadata,
|
||||||
_async_h2d_tensor_copy,
|
_async_h2d_tensor_copy,
|
||||||
|
HPUPagedAttentionMetadata,
|
||||||
|
trim_attn_metadata,
|
||||||
)
|
)
|
||||||
import habana_frameworks.torch as htorch
|
import habana_frameworks.torch as htorch
|
||||||
import time
|
import time
|
||||||
@ -749,33 +751,79 @@ class FlashVlmCausalLM(FlashCausalLM):
|
|||||||
)
|
)
|
||||||
blocks = [block_num // batch_size for _ in range(batch_size)]
|
blocks = [block_num // batch_size for _ in range(batch_size)]
|
||||||
blocks[0] += block_num % batch_size
|
blocks[0] += block_num % batch_size
|
||||||
past_len = []
|
|
||||||
block_tables = []
|
block_tables = []
|
||||||
slots = []
|
slots = []
|
||||||
start_idx = 0
|
start_idx = 0
|
||||||
|
slot_indices = []
|
||||||
|
|
||||||
# fetch the last blocked to warmup block num
|
# fetch the last blocked to warmup block num
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(batch_size):
|
||||||
block_array = list(range(start_idx, start_idx + blocks[i]))
|
block_array = list(range(start_idx, start_idx + blocks[i]))
|
||||||
slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1)
|
slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1)
|
||||||
block_tables.append(block_array)
|
block_tables.append(block_array)
|
||||||
past_len.append(blocks[i] * BLOCK_SIZE - 1)
|
slot_indices.append((start_idx + blocks[i]) * BLOCK_SIZE - 1)
|
||||||
start_idx += blocks[i]
|
start_idx += blocks[i]
|
||||||
input_lengths = torch.ones(batch_size, dtype=torch.int32)
|
input_lengths = torch.ones(batch_size, dtype=torch.int32)
|
||||||
|
|
||||||
seqlen = Seqlen(
|
seqlen = Seqlen(
|
||||||
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
||||||
)
|
)
|
||||||
|
block_list, block_groups, block_usage, _, block_bucket_size = (
|
||||||
hpu_attention_meta = prepare_for_decode(
|
generate_block_metadata(
|
||||||
self.dtype,
|
self.dtype,
|
||||||
self.use_contiguous_pa,
|
self.use_contiguous_pa,
|
||||||
self.device,
|
slots,
|
||||||
slots,
|
block_tables,
|
||||||
block_tables,
|
self.bucketing_ctx,
|
||||||
batch_size,
|
)
|
||||||
bucketing_ctx=None,
|
|
||||||
)
|
)
|
||||||
|
meta = HPUPagedAttentionMetadata(
|
||||||
|
block_list=_async_h2d_tensor_copy(block_list),
|
||||||
|
block_groups=_async_h2d_tensor_copy(block_groups),
|
||||||
|
block_usage=_async_h2d_tensor_copy(block_usage),
|
||||||
|
block_mapping=None,
|
||||||
|
attn_bias=None,
|
||||||
|
)
|
||||||
|
if self.sliding_window is not None:
|
||||||
|
block_tables_in_window = []
|
||||||
|
for i, bt in enumerate(block_tables):
|
||||||
|
block_num_in_window = (
|
||||||
|
self.sliding_window + BLOCK_SIZE - 1
|
||||||
|
) // BLOCK_SIZE
|
||||||
|
block_tables_in_window.append(
|
||||||
|
bt[max(0, blocks[i] - block_num_in_window) : blocks[i]]
|
||||||
|
)
|
||||||
|
slots_in_window = []
|
||||||
|
start_idx = 0
|
||||||
|
for i, indice in enumerate(slot_indices):
|
||||||
|
mask = (
|
||||||
|
indice - torch.arange(start_idx, indice + 1)
|
||||||
|
) < self.sliding_window
|
||||||
|
slots_in_window.append(torch.arange(start_idx, indice + 1)[mask])
|
||||||
|
start_idx += blocks[i] * BLOCK_SIZE
|
||||||
|
slots_in_window = torch.cat(slots_in_window, dim=0)
|
||||||
|
(
|
||||||
|
block_list_in_window,
|
||||||
|
block_groups_in_window,
|
||||||
|
block_usage_in_window,
|
||||||
|
slots_in_window_mask,
|
||||||
|
_,
|
||||||
|
) = generate_block_metadata(
|
||||||
|
self.dtype,
|
||||||
|
self.use_contiguous_pa,
|
||||||
|
slots,
|
||||||
|
block_tables_in_window,
|
||||||
|
self.bucketing_ctx,
|
||||||
|
slots_in_window,
|
||||||
|
block_bucket_size,
|
||||||
|
)
|
||||||
|
meta.block_list_in_window = _async_h2d_tensor_copy(block_list_in_window)
|
||||||
|
meta.block_groups_in_window = _async_h2d_tensor_copy(block_groups_in_window)
|
||||||
|
meta.block_usage_in_window = _async_h2d_tensor_copy(block_usage_in_window)
|
||||||
|
meta.slots_in_window_mask = _async_h2d_tensor_copy(slots_in_window_mask)
|
||||||
|
|
||||||
|
hpu_attention_meta = trim_attn_metadata(meta)
|
||||||
slots_tensor = torch.tensor(slots, dtype=batch.slots.dtype)
|
slots_tensor = torch.tensor(slots, dtype=batch.slots.dtype)
|
||||||
inputs_embeds = self.get_inputs_embeds(
|
inputs_embeds = self.get_inputs_embeds(
|
||||||
input_ids=input_ids.to(self.device),
|
input_ids=input_ids.to(self.device),
|
||||||
@ -1001,17 +1049,8 @@ class FlashVlmCausalLM(FlashCausalLM):
|
|||||||
|
|
||||||
attention_mask = None
|
attention_mask = None
|
||||||
attention_mask_forward = None
|
attention_mask_forward = None
|
||||||
if self.model.config.model_type == "gemma3" and cu_seqlen_prefill is not None:
|
|
||||||
attention_mask = self.model.get_attention_mask(
|
|
||||||
input_ids, cu_seqlen_prefill, self.dtype, bool_mask=True
|
|
||||||
)
|
|
||||||
min_dtype = torch.finfo(self.dtype).min
|
|
||||||
attention_mask_forward = torch.where(attention_mask, 0, min_dtype).to(
|
|
||||||
input_ids.device
|
|
||||||
)
|
|
||||||
attention_mask = attention_mask.reshape(-1)
|
|
||||||
if self.model.config.model_type == "llama4":
|
if self.model.config.model_type == "llama4":
|
||||||
attention_mask = (input_ids != 0).long()
|
attention_mask = (input_ids != self.tokenizer.pad_token_id).long()
|
||||||
attention_mask_forward = attention_mask.view(input_lengths.shape[0], -1)
|
attention_mask_forward = attention_mask.view(input_lengths.shape[0], -1)
|
||||||
|
|
||||||
if cu_seqlen_prefill is None and self.max_past() is not None:
|
if cu_seqlen_prefill is None and self.max_past() is not None:
|
||||||
@ -1020,17 +1059,6 @@ class FlashVlmCausalLM(FlashCausalLM):
|
|||||||
# This makes sure the max_s for the decode pass is correct.
|
# This makes sure the max_s for the decode pass is correct.
|
||||||
max_s = min(self.max_past(), max_s)
|
max_s = min(self.max_past(), max_s)
|
||||||
|
|
||||||
kwargs = {}
|
|
||||||
if htorch.utils.internal.is_lazy():
|
|
||||||
batch_size = input_lengths.shape[0]
|
|
||||||
seqlen = (
|
|
||||||
input_ids.shape[0] // batch_size
|
|
||||||
if batch.prefilling
|
|
||||||
else batch.hpu_attn_meta.block_list.shape[0]
|
|
||||||
)
|
|
||||||
kwargs["bypass_hpu_graphs"] = not self.use_graphs(
|
|
||||||
batch.prefilling, seqlen, batch_size
|
|
||||||
)
|
|
||||||
if batch.prefill_cache_indices is not None:
|
if batch.prefill_cache_indices is not None:
|
||||||
slots_pad = torch.zeros_like(input_ids, device=slots.device)
|
slots_pad = torch.zeros_like(input_ids, device=slots.device)
|
||||||
slots_pad[batch.prefill_cache_indices] = slots
|
slots_pad[batch.prefill_cache_indices] = slots
|
||||||
@ -1043,6 +1071,26 @@ class FlashVlmCausalLM(FlashCausalLM):
|
|||||||
seqlen = Seqlen(
|
seqlen = Seqlen(
|
||||||
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
||||||
)
|
)
|
||||||
|
kwargs = {}
|
||||||
|
batch_size = input_lengths.shape[0]
|
||||||
|
prompt_len = (
|
||||||
|
input_ids.shape[0] // batch_size
|
||||||
|
if batch.prefilling
|
||||||
|
else batch.hpu_attn_meta.block_list.shape[0]
|
||||||
|
)
|
||||||
|
if htorch.utils.internal.is_lazy():
|
||||||
|
kwargs["bypass_hpu_graphs"] = not self.use_graphs(
|
||||||
|
batch.prefilling, prompt_len, batch_size
|
||||||
|
)
|
||||||
|
if self.sliding_window is not None:
|
||||||
|
attn_mask = seqlen.make_sliding_window_bias(
|
||||||
|
input_lengths.tolist(),
|
||||||
|
self.sliding_window,
|
||||||
|
self.dtype,
|
||||||
|
prompt_len,
|
||||||
|
batch_size,
|
||||||
|
)
|
||||||
|
seqlen.attn_mask = _async_h2d_tensor_copy(attn_mask)
|
||||||
logits, speculative_logits = self.model.forward(
|
logits, speculative_logits = self.model.forward(
|
||||||
inputs_embeds=inputs_embeds,
|
inputs_embeds=inputs_embeds,
|
||||||
position_ids=_async_h2d_tensor_copy(position_ids),
|
position_ids=_async_h2d_tensor_copy(position_ids),
|
||||||
|
@ -12,7 +12,7 @@ from transformers import (
|
|||||||
PreTrainedTokenizerBase,
|
PreTrainedTokenizerBase,
|
||||||
)
|
)
|
||||||
from text_generation_server.models.flash_causal_lm import (
|
from text_generation_server.models.flash_causal_lm import (
|
||||||
prepare_for_decode,
|
generate_block_metadata,
|
||||||
)
|
)
|
||||||
from text_generation_server.models.flash_vlm_causal_lm import (
|
from text_generation_server.models.flash_vlm_causal_lm import (
|
||||||
FlashVlmCausalLMBatch,
|
FlashVlmCausalLMBatch,
|
||||||
@ -23,6 +23,8 @@ from text_generation_server.layers.attention import (
|
|||||||
Seqlen,
|
Seqlen,
|
||||||
trim_seqlen_metadata,
|
trim_seqlen_metadata,
|
||||||
_async_h2d_tensor_copy,
|
_async_h2d_tensor_copy,
|
||||||
|
HPUPagedAttentionMetadata,
|
||||||
|
trim_attn_metadata,
|
||||||
)
|
)
|
||||||
import habana_frameworks.torch as htorch
|
import habana_frameworks.torch as htorch
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
@ -224,7 +226,7 @@ def generate_cross_attention_states(
|
|||||||
cross_attention_states, image_indices, input_lengths, pad_seq_len, prefilling
|
cross_attention_states, image_indices, input_lengths, pad_seq_len, prefilling
|
||||||
):
|
):
|
||||||
if cross_attention_states is None:
|
if cross_attention_states is None:
|
||||||
return None, None, None
|
return None, None
|
||||||
indices_list = []
|
indices_list = []
|
||||||
if prefilling:
|
if prefilling:
|
||||||
for i in image_indices:
|
for i in image_indices:
|
||||||
@ -247,33 +249,41 @@ class FlashMllamaCausalLM(FlashVlmCausalLM):
|
|||||||
position_ids = torch.arange(batch_size, dtype=batch.position_ids.dtype)
|
position_ids = torch.arange(batch_size, dtype=batch.position_ids.dtype)
|
||||||
blocks = [block_num // batch_size for _ in range(batch_size)]
|
blocks = [block_num // batch_size for _ in range(batch_size)]
|
||||||
blocks[0] += block_num % batch_size
|
blocks[0] += block_num % batch_size
|
||||||
past_len = []
|
|
||||||
block_tables = []
|
block_tables = []
|
||||||
slots = []
|
slots = []
|
||||||
start_idx = 0
|
start_idx = 0
|
||||||
|
slot_indices = []
|
||||||
|
|
||||||
# fetch the last blocked to warmup block num
|
# fetch the last blocked to warmup block num
|
||||||
for i in range(batch_size):
|
for i in range(batch_size):
|
||||||
block_array = list(range(start_idx, start_idx + blocks[i]))
|
block_array = list(range(start_idx, start_idx + blocks[i]))
|
||||||
slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1)
|
slots.append(BLOCK_SIZE * block_array[-1] + BLOCK_SIZE - 1)
|
||||||
block_tables.append(block_array)
|
block_tables.append(block_array)
|
||||||
past_len.append(blocks[i] * BLOCK_SIZE - 1)
|
slot_indices.append((start_idx + blocks[i]) * BLOCK_SIZE - 1)
|
||||||
start_idx += blocks[i]
|
start_idx += blocks[i]
|
||||||
input_lengths = torch.ones(batch_size, dtype=torch.int32)
|
input_lengths = torch.ones(batch_size, dtype=torch.int32)
|
||||||
|
|
||||||
seqlen = Seqlen(
|
seqlen = Seqlen(
|
||||||
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
input_lengths=_async_h2d_tensor_copy(input_lengths),
|
||||||
)
|
)
|
||||||
|
block_list, block_groups, block_usage, _, block_bucket_size = (
|
||||||
hpu_attention_meta = prepare_for_decode(
|
generate_block_metadata(
|
||||||
self.dtype,
|
self.dtype,
|
||||||
self.use_contiguous_pa,
|
self.use_contiguous_pa,
|
||||||
self.device,
|
slots,
|
||||||
slots,
|
block_tables,
|
||||||
block_tables,
|
self.bucketing_ctx,
|
||||||
batch_size,
|
)
|
||||||
bucketing_ctx=None,
|
|
||||||
)
|
)
|
||||||
|
meta = HPUPagedAttentionMetadata(
|
||||||
|
block_list=_async_h2d_tensor_copy(block_list),
|
||||||
|
block_groups=_async_h2d_tensor_copy(block_groups),
|
||||||
|
block_usage=_async_h2d_tensor_copy(block_usage),
|
||||||
|
block_mapping=None,
|
||||||
|
attn_bias=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
hpu_attention_meta = trim_attn_metadata(meta)
|
||||||
# We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation.
|
# We pass a `cu_seqlen_prefill` in order not to have to deal with paged attention cache allocation/deallocation.
|
||||||
image_indices = torch.tensor(batch.image_indices)
|
image_indices = torch.tensor(batch.image_indices)
|
||||||
image_indices = image_indices.repeat(batch_size)
|
image_indices = image_indices.repeat(batch_size)
|
||||||
|
@ -79,7 +79,7 @@ class Model(ABC):
|
|||||||
requires_padding=self.requires_padding,
|
requires_padding=self.requires_padding,
|
||||||
dtype=str(self.dtype),
|
dtype=str(self.dtype),
|
||||||
device_type=self.device.type,
|
device_type=self.device.type,
|
||||||
window_size=self.sliding_window,
|
window_size=None,
|
||||||
speculate=self.speculate,
|
speculate=self.speculate,
|
||||||
block_size=BLOCK_SIZE,
|
block_size=BLOCK_SIZE,
|
||||||
)
|
)
|
||||||
|
@ -1,50 +0,0 @@
|
|||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from loguru import logger
|
|
||||||
from text_generation_server import server
|
|
||||||
import argparse
|
|
||||||
from text_generation_server.utils.adapter import parse_lora_adapters
|
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
|
||||||
logger.info("TGIService: starting tgi service .... ")
|
|
||||||
logger.info(
|
|
||||||
"TGIService: --model_id {}, --revision {}, --sharded {}, --speculate {}, --dtype {}, --trust_remote_code {}, --uds_path {} ".format(
|
|
||||||
args.model_id,
|
|
||||||
args.revision,
|
|
||||||
args.sharded,
|
|
||||||
args.speculate,
|
|
||||||
args.dtype,
|
|
||||||
args.trust_remote_code,
|
|
||||||
args.uds_path,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
lora_adapters = parse_lora_adapters(os.getenv("LORA_ADAPTERS"))
|
|
||||||
server.serve(
|
|
||||||
model_id=args.model_id,
|
|
||||||
lora_adapters=lora_adapters,
|
|
||||||
revision=args.revision,
|
|
||||||
sharded=args.sharded,
|
|
||||||
quantize=args.quantize,
|
|
||||||
speculate=args.speculate,
|
|
||||||
dtype=args.dtype,
|
|
||||||
trust_remote_code=args.trust_remote_code,
|
|
||||||
uds_path=args.uds_path,
|
|
||||||
max_input_tokens=args.max_input_tokens,
|
|
||||||
kv_cache_dtype="auto",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("--model_id", type=str)
|
|
||||||
parser.add_argument("--revision", type=str)
|
|
||||||
parser.add_argument("--sharded", type=bool)
|
|
||||||
parser.add_argument("--speculate", type=int, default=None)
|
|
||||||
parser.add_argument("--dtype", type=str)
|
|
||||||
parser.add_argument("--trust_remote_code", type=bool)
|
|
||||||
parser.add_argument("--uds_path", type=Path)
|
|
||||||
parser.add_argument("--quantize", type=str)
|
|
||||||
parser.add_argument("--max_input_tokens", type=int)
|
|
||||||
args = parser.parse_args()
|
|
||||||
main(args)
|
|
@ -341,7 +341,10 @@ class NeuronGenerator(Generator):
|
|||||||
self.model = model
|
self.model = model
|
||||||
if not isinstance(self.model, NeuronModelForCausalLM):
|
if not isinstance(self.model, NeuronModelForCausalLM):
|
||||||
raise ValueError("The model must be a NeuronModelForCausalLM.")
|
raise ValueError("The model must be a NeuronModelForCausalLM.")
|
||||||
if not model.neuron_config.continuous_batching:
|
if (
|
||||||
|
model.neuron_config.batch_size > 1
|
||||||
|
and not model.neuron_config.continuous_batching
|
||||||
|
):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"The neuron model must be compiled with continuous_batching=True."
|
"The neuron model must be compiled with continuous_batching=True."
|
||||||
)
|
)
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
"name": "Apache 2.0",
|
"name": "Apache 2.0",
|
||||||
"url": "https://www.apache.org/licenses/LICENSE-2.0"
|
"url": "https://www.apache.org/licenses/LICENSE-2.0"
|
||||||
},
|
},
|
||||||
"version": "3.3.3"
|
"version": "3.3.4-dev0"
|
||||||
},
|
},
|
||||||
"paths": {
|
"paths": {
|
||||||
"/": {
|
"/": {
|
||||||
|
@ -20,7 +20,7 @@ hf_token=YOUR_HF_ACCESS_TOKEN
|
|||||||
|
|
||||||
docker run --runtime=habana --cap-add=sys_nice --ipc=host \
|
docker run --runtime=habana --cap-add=sys_nice --ipc=host \
|
||||||
-p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \
|
-p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-gaudi \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ hf_token=YOUR_ACCESS_TOKEN
|
|||||||
|
|
||||||
docker run --runtime=habana --cap-add=sys_nice --ipc=host \
|
docker run --runtime=habana --cap-add=sys_nice --ipc=host \
|
||||||
-p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \
|
-p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-gaudi \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
<text-generation-inference-launcher-arguments>
|
<text-generation-inference-launcher-arguments>
|
||||||
```
|
```
|
||||||
@ -86,42 +86,9 @@ We recommend always using sharding when running on a multi-card machine.
|
|||||||
By default, all models run with BF16 precision on Gaudi hardware.
|
By default, all models run with BF16 precision on Gaudi hardware.
|
||||||
|
|
||||||
#### FP8 Precision
|
#### FP8 Precision
|
||||||
|
TGI-Gaudi supports FP8 precision inference, which can significantly reduce memory usage and improve performance for large models. We support model like W8A8 FP compressed-tensors parameters such as [RedHatAI/Mixtral-8x7B-Instruct-v0.1-FP8](https://huggingface.co/RedHatAI/Mixtral-8x7B-Instruct-v0.1-FP8) and AutoFP8 generated model[RedHatAI/Meta-Llama-3-8B-Instruct-FP8](https://huggingface.co/RedHatAI/Meta-Llama-3-8B-Instruct-FP8) .
|
||||||
TGI-Gaudi supports FP8 precision inference with [Intel Neural Compressor (INC)](https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_FP8.html).
|
TGI-Gaudi supports FP8 precision inference with [Intel Neural Compressor (INC)](https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_FP8.html).
|
||||||
|
|
||||||
To run FP8 Inference:
|
|
||||||
|
|
||||||
1. Measure statistics using [Optimum Habana measurement script](https://github.com/huggingface/optimum-habana/tree/main/examples/text-generation#running-with-fp8)
|
|
||||||
2. Run the model in TGI with QUANT_CONFIG setting - e.g. `-e QUANT_CONFIG=./quantization_config/maxabs_quant.json`.
|
|
||||||
|
|
||||||
The following commmand example for FP8 inference is based on the assumption that measurement is done via the first step above.
|
|
||||||
|
|
||||||
Example for Llama3.1-70B on 8 cards with FP8 precision:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
model=meta-llama/Meta-Llama-3.1-70B-Instruct
|
|
||||||
hf_token=YOUR_ACCESS_TOKEN
|
|
||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
|
||||||
|
|
||||||
docker run -p 8080:80 \
|
|
||||||
--runtime=habana \
|
|
||||||
--cap-add=sys_nice \
|
|
||||||
--ipc=host \
|
|
||||||
-v $volume:/data \
|
|
||||||
-v $PWD/quantization_config:/usr/src/quantization_config \
|
|
||||||
-v $PWD/hqt_output:/usr/src/hqt_output \
|
|
||||||
-e QUANT_CONFIG=./quantization_config/maxabs_quant.json \
|
|
||||||
-e HF_TOKEN=$hf_token \
|
|
||||||
-e MAX_TOTAL_TOKENS=2048 \
|
|
||||||
-e BATCH_BUCKET_SIZE=256 \
|
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=4 \
|
|
||||||
-e PAD_SEQUENCE_TO_MULTIPLE_OF=64 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-gaudi \
|
|
||||||
--model-id $model \
|
|
||||||
--sharded true --num-shard 8 \
|
|
||||||
--max-input-tokens 1024 --max-total-tokens 2048 \
|
|
||||||
--max-batch-prefill-tokens 4096 --max-batch-size 256 \
|
|
||||||
--max-waiting-tokens 7 --waiting-served-ratio 1.2 --max-concurrent-requests 512
|
|
||||||
```
|
|
||||||
|
|
||||||
### How to Run Vision-Language Models (VLMs)
|
### How to Run Vision-Language Models (VLMs)
|
||||||
|
|
||||||
@ -139,9 +106,7 @@ docker run -p 8080:80 \
|
|||||||
--cap-add=sys_nice \
|
--cap-add=sys_nice \
|
||||||
--ipc=host \
|
--ipc=host \
|
||||||
-v $volume:/data \
|
-v $volume:/data \
|
||||||
-e PREFILL_BATCH_BUCKET_SIZE=1 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-gaudi \
|
||||||
-e BATCH_BUCKET_SIZE=1 \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-gaudi \
|
|
||||||
--model-id $model \
|
--model-id $model \
|
||||||
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
|
--max-input-tokens 4096 --max-batch-prefill-tokens 16384 \
|
||||||
--max-total-tokens 8192 --max-batch-size 4
|
--max-total-tokens 8192 --max-batch-size 4
|
||||||
@ -155,7 +120,7 @@ curl -N 127.0.0.1:8080/generate \
|
|||||||
-H 'Content-Type: application/json'
|
-H 'Content-Type: application/json'
|
||||||
```
|
```
|
||||||
|
|
||||||
> Note: In Llava-v1.6-Mistral-7B, an image usually accounts for 2000 input tokens. For example, an image of size 512x512 is represented by 2800 tokens. Thus, `max-input-tokens` must be larger than the number of tokens associated with the image. Otherwise the image may be truncated. We set `BASE_IMAGE_TOKENS=2048` as the default image token value. This is the minimum value of `max-input-tokens`. You can override the environment variable `BASE_IMAGE_TOKENS` to change this value. The warmup will generate graphs with input length from `BASE_IMAGE_TOKENS` to `max-input-tokens`. For Llava-v1.6-Mistral-7B, the value of `max-batch-prefill-tokens` is 16384, which is calcualted as follows: `prefill_batch_size` = `max-batch-prefill-tokens` / `max-input-tokens`.
|
> Note: In Llava-v1.6-Mistral-7B, an image usually accounts for 2000 input tokens. For example, an image of size 512x512 is represented by 2800 tokens. Thus, `max-input-tokens` must be larger than the number of tokens associated with the image. Otherwise the image may be truncated. The value of `max-batch-prefill-tokens` is 16384, which is calculated as follows: `prefill_batch_size` = `max-batch-prefill-tokens` / `max-input-tokens`.
|
||||||
|
|
||||||
### How to Benchmark Performance
|
### How to Benchmark Performance
|
||||||
|
|
||||||
@ -184,39 +149,16 @@ docker run \
|
|||||||
|
|
||||||
Please refer to the [inference-benchmarker README](https://github.com/huggingface/inference-benchmarker) for more details.
|
Please refer to the [inference-benchmarker README](https://github.com/huggingface/inference-benchmarker) for more details.
|
||||||
|
|
||||||
### How to Profile Performance
|
|
||||||
|
|
||||||
To collect performance profiling, you need to set the following environment variables:
|
|
||||||
|
|
||||||
| Name | Value(s) | Default | Description |
|
|
||||||
|--------------------| :--------- | :--------------- | :------------------------------------------------------- |
|
|
||||||
| PROF_WAITSTEP | integer | 0 | Control profile wait steps |
|
|
||||||
| PROF_WARMUPSTEP | integer | 0 | Control profile warmup steps |
|
|
||||||
| PROF_STEP | integer | 0 | Enable/disable profile, control profile active steps |
|
|
||||||
| PROF_PATH | string | /tmp/hpu_profile | Define profile folder |
|
|
||||||
| PROF_RANKS | string | 0 | Comma-separated list of ranks to profile |
|
|
||||||
| PROF_RECORD_SHAPES | True/False | False | Control record_shapes option in the profiler |
|
|
||||||
|
|
||||||
To use these environment variables, add them to your docker run command with the -e flag. For example:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run --runtime=habana --ipc=host --cap-add=sys_nice \
|
|
||||||
-p 8080:80 -v $volume:/data -e HF_TOKEN=$hf_token \
|
|
||||||
-e PROF_WAITSTEP=10 \
|
|
||||||
-e PROF_WARMUPSTEP=10 \
|
|
||||||
-e PROF_STEP=1 \
|
|
||||||
-e PROF_PATH=/tmp/hpu_profile \
|
|
||||||
-e PROF_RANKS=0 \
|
|
||||||
-e PROF_RECORD_SHAPES=True \
|
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-gaudi \
|
|
||||||
--model-id $model
|
|
||||||
```
|
|
||||||
|
|
||||||
## Explanation: Understanding TGI on Gaudi
|
## Explanation: Understanding TGI on Gaudi
|
||||||
|
|
||||||
### The Warmup Process
|
### The Warmup Process
|
||||||
|
|
||||||
To ensure optimal performance, warmup is performed at the beginning of each server run. This process creates queries with various input shapes based on provided parameters and runs basic TGI operations (prefill, decode, concatenate).
|
Intel Gaudi accelerators perform best when operating on models with fixed tensor shapes. [Intel Gaudi Graph Compiler](https://docs.habana.ai/en/latest/Gaudi_Overview/Intel_Gaudi_Software_Suite.html#graph-compiler-and-runtime)
|
||||||
|
generates optimized binary code that implements the given model topology on Gaudi. In its default configuration, the produced binary code may be highly dependent on input and output tensor shapes, requiring graph recompilation
|
||||||
|
when encountering tensors with different shapes within the same topology. While these binaries efficiently utilize Gaudi, the compilation process itself can introduce noticeable overhead in end-to-end execution.
|
||||||
|
In dynamic inference serving scenarios, minimizing the number of graph compilations and reducing the risk of graph compilation occurring during server runtime is important.
|
||||||
|
|
||||||
|
To ensure optimal performance, warmup is performed at the beginning of each server run. This process creates queries with various input shapes based on provided parameters and runs basic TGI operations (prefill, decode).
|
||||||
|
|
||||||
Note: Model warmup can take several minutes, especially for FP8 inference. For faster subsequent runs, refer to [Disk Caching Eviction Policy](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_PyTorch_Models.html#disk-caching-eviction-policy).
|
Note: Model warmup can take several minutes, especially for FP8 inference. For faster subsequent runs, refer to [Disk Caching Eviction Policy](https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_PyTorch_Models.html#disk-caching-eviction-policy).
|
||||||
|
|
||||||
@ -229,20 +171,8 @@ Note: Model warmup can take several minutes, especially for FP8 inference. For f
|
|||||||
#### Batch Size Parameters
|
#### Batch Size Parameters
|
||||||
- For prefill operation, please set `--max-batch-prefill-tokens` as `bs * max-input-tokens`, where `bs` is your expected maximum prefill batch size.
|
- For prefill operation, please set `--max-batch-prefill-tokens` as `bs * max-input-tokens`, where `bs` is your expected maximum prefill batch size.
|
||||||
- For decode operation, please set `--max-batch-size` as `bs`, where `bs` is your expected maximum decode batch size.
|
- For decode operation, please set `--max-batch-size` as `bs`, where `bs` is your expected maximum decode batch size.
|
||||||
- Please note that batch size will be always padded to the nearest multiplication of `BATCH_BUCKET_SIZE` and `PREFILL_BATCH_BUCKET_SIZE`.
|
- Please note that batch size will be always padded to the nearest shapes that has been warmed up. This is done to avoid out of memory issues and to ensure that the graphs are reused efficiently.
|
||||||
|
|
||||||
#### Performance and Memory Parameters
|
|
||||||
- `PAD_SEQUENCE_TO_MULTIPLE_OF` determines sizes of input length buckets. Since warmup creates several graphs for each bucket, it's important to adjust that value proportionally to input sequence length. Otherwise, some out of memory issues can be observed.
|
|
||||||
- `ENABLE_HPU_GRAPH` enables HPU graphs usage, which is crucial for performance results. Recommended value to keep is `true`.
|
|
||||||
|
|
||||||
#### Sequence Length Parameters
|
|
||||||
- `--max-input-tokens`: Maximum possible input prompt length (default: 4095)
|
|
||||||
- `--max-total-tokens`: Maximum possible total sequence length (input + output) (default: 4096)
|
|
||||||
|
|
||||||
#### Batch Size Parameters
|
|
||||||
- `--max-batch-prefill-tokens`: Set as `bs * max-input-tokens` where `bs` is your expected maximum prefill batch size
|
|
||||||
- `--max-batch-size`: Set as `bs` where `bs` is your expected maximum decode batch size
|
|
||||||
- Note: Batch sizes are padded to the nearest multiple of `BATCH_BUCKET_SIZE` and `PREFILL_BATCH_BUCKET_SIZE`
|
|
||||||
|
|
||||||
## Reference
|
## Reference
|
||||||
|
|
||||||
@ -253,39 +183,45 @@ This section contains reference information about the Gaudi backend.
|
|||||||
Text Generation Inference enables serving optimized models on Gaudi hardware. The following sections list which models (VLMs & LLMs) are supported on Gaudi.
|
Text Generation Inference enables serving optimized models on Gaudi hardware. The following sections list which models (VLMs & LLMs) are supported on Gaudi.
|
||||||
|
|
||||||
**Large Language Models (LLMs)**
|
**Large Language Models (LLMs)**
|
||||||
- [Llama2-7B](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
|
- [deepseek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1)
|
||||||
- [Llama2-70B](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)
|
- [deepseek-v2](https://huggingface.co/deepseek-ai/DeepSeek-V2)
|
||||||
- [Llama3-8B](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct)
|
- [Llama2](https://huggingface.co/collections/meta-llama/llama-2-family-661da1f90a9d678b6f55773b)
|
||||||
- [Llama3-70B](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)
|
- [Llama3](https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f)
|
||||||
- [LLama3.1-8B](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct)
|
- [CodeLlama](https://huggingface.co/codellama/CodeLlama-13b-hf)
|
||||||
- [LLama3.1-70B](https://huggingface.co/meta-llama/Meta-Llama-3.1-70B-Instruct)
|
- [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
|
||||||
- [CodeLlama-13B](https://huggingface.co/codellama/CodeLlama-13b-hf)
|
- [Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3)
|
||||||
- [Opt-125m](https://huggingface.co/facebook/opt-125m)
|
- [Qwen 2](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f)
|
||||||
- [OpenAI-gpt2](https://huggingface.co/openai-community/gpt2)
|
- [Qwen 3](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f)
|
||||||
- [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
|
- [Qwen 3 Moe](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f)
|
||||||
- [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3)
|
|
||||||
- [Qwen2-72B](https://huggingface.co/Qwen/Qwen2-72B-Instruct)
|
|
||||||
- [Qwen2-7B](https://huggingface.co/Qwen/Qwen2-7B-Instruct)
|
|
||||||
- [Phi-1.5](https://huggingface.co/microsoft/phi-1_5)
|
- [Phi-1.5](https://huggingface.co/microsoft/phi-1_5)
|
||||||
- [Gemma-7b](https://huggingface.co/google/gemma-7b-it)
|
- [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)
|
||||||
- [Starcoder2-3b](https://huggingface.co/bigcode/starcoder2-3b)
|
- [PhiMoe](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct)
|
||||||
- [Starcoder2-15b](https://huggingface.co/bigcode/starcoder2-15b)
|
- [Gemma](https://huggingface.co/google/gemma-7b-it)
|
||||||
- [Starcoder](https://huggingface.co/bigcode/starcoder)
|
- [Gemma2](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315)
|
||||||
- [falcon-7b-instruct](https://huggingface.co/tiiuae/falcon-7b-instruct)
|
- [Gemma3 Text](https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d)
|
||||||
- [Falcon-180B](https://huggingface.co/tiiuae/falcon-180B-chat)
|
- [Granite](https://huggingface.co/ibm-granite/granite-3.0-8b-instruct)
|
||||||
|
- [Cohere](https://huggingface.co/CohereForAI/c4ai-command-r-plus)
|
||||||
|
- [dbrx](https://huggingface.co/databricks/dbrx-instruct)
|
||||||
|
- [Starcoder2](https://huggingface.co/bigcode/starcoder2-3b)
|
||||||
|
- [Falcon](https://huggingface.co/tiiuae/falcon-7b-instruct)
|
||||||
- [GPT-2](https://huggingface.co/openai-community/gpt2)
|
- [GPT-2](https://huggingface.co/openai-community/gpt2)
|
||||||
- [gpt-j-6b](https://huggingface.co/EleutherAI/gpt-j-6b)
|
- [gpt-j-6b](https://huggingface.co/EleutherAI/gpt-j-6b)
|
||||||
|
- [gpt-bigcode](https://huggingface.co/bigcode/gpt_bigcode-santacoder)
|
||||||
|
- [Baichuan](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat)
|
||||||
|
|
||||||
|
|
||||||
**Vision-Language Models (VLMs)**
|
**Vision-Language Models (VLMs)**
|
||||||
- [LLaVA-v1.6-Mistral-7B](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)
|
- [Llava Next (1.6)](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf)
|
||||||
- [Mllama (Multimodal Llama from Meta)](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct)
|
- [Mllama (Multimodal Llama from Meta)](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct)
|
||||||
- [Idefics](https://huggingface.co/HuggingFaceM4/idefics-9b)
|
- [idefics 2](https://huggingface.co/HuggingFaceM4/idefics2-8b)
|
||||||
- [Idefics 2](https://huggingface.co/HuggingFaceM4/idefics2-8b)
|
- [idefics 3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3)
|
||||||
- [Idefics 2.5](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3)
|
- [PaliGemma](https://huggingface.co/google/paligemma-3b-pt-224)
|
||||||
- [Qwen2-VL-2B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct)
|
- [Llama4](https://huggingface.co/collections/meta-llama/llama-4-67f0c30d9fe03840bc9d0164)
|
||||||
- [Qwen/Qwen2.5-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct)
|
- [Gemma3](https://huggingface.co/collections/google/gemma-3-release-67c6c6f89c4f76621268bb6d)
|
||||||
|
- [Qwen 2.5 VL](https://huggingface.co/collections/Qwen/qwen25-vl-6795ffac22b334a837c0f9a5)
|
||||||
|
- [Qwen 2 VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d)
|
||||||
|
|
||||||
We also support on a best effort basis models with different parameters count that use the same model architecture but those models were not tested. For example, the gaudi backend supports `meta-llama/Llama-3.2-1B` as the architecture is the standard llama3 architecture. If you have an issue with a model, please open an issue on the [Gaudi backend repository](https://github.com/huggingface/text-generation-inference/issues).
|
If you have an issue with a model, please open an issue on the [Gaudi backend repository](https://github.com/huggingface/text-generation-inference/issues).
|
||||||
|
|
||||||
### Environment Variables
|
### Environment Variables
|
||||||
|
|
||||||
@ -293,16 +229,10 @@ The following table contains the environment variables that can be used to confi
|
|||||||
|
|
||||||
| Name | Value(s) | Default | Description | Usage |
|
| Name | Value(s) | Default | Description | Usage |
|
||||||
|-----------------------------| :--------- | :--------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------- |
|
|-----------------------------| :--------- | :--------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------- |
|
||||||
| ENABLE_HPU_GRAPH | True/False | True | Enable hpu graph or not | add -e in docker run command |
|
|
||||||
| LIMIT_HPU_GRAPH | True/False | True | Skip HPU graph usage for prefill to save memory, set to `True` for large sequence/decoding lengths(e.g. 300/212) | add -e in docker run command |
|
| LIMIT_HPU_GRAPH | True/False | True | Skip HPU graph usage for prefill to save memory, set to `True` for large sequence/decoding lengths(e.g. 300/212) | add -e in docker run command |
|
||||||
| BATCH_BUCKET_SIZE | integer | 8 | Batch size for decode operation will be rounded to the nearest multiple of this number. This limits the number of cached graphs | add -e in docker run command |
|
|
||||||
| PREFILL_BATCH_BUCKET_SIZE | integer | 4 | Batch size for prefill operation will be rounded to the nearest multiple of this number. This limits the number of cached graphs | add -e in docker run command |
|
|
||||||
| PAD_SEQUENCE_TO_MULTIPLE_OF | integer | 128 | For prefill operation, sequences will be padded to a multiple of provided value. | add -e in docker run command |
|
|
||||||
| SKIP_TOKENIZER_IN_TGI | True/False | False | Skip tokenizer for input/output processing | add -e in docker run command |
|
| SKIP_TOKENIZER_IN_TGI | True/False | False | Skip tokenizer for input/output processing | add -e in docker run command |
|
||||||
| WARMUP_ENABLED | True/False | True | Enable warmup during server initialization to recompile all graphs. This can increase TGI setup time. | add -e in docker run command |
|
| VLLM_SKIP_WARMUP | True/False | False | Skip graph warmup during server initialization which is not recommended, but could be used for debug. | add -e in docker run command |
|
||||||
| QUEUE_THRESHOLD_MS | integer | 120 | Controls the threshold beyond which the request are considered overdue and handled with priority. Shorter requests are prioritized otherwise. | add -e in docker run command |
|
|
||||||
| USE_FLASH_ATTENTION | True/False | True | Whether to enable Habana Flash Attention, provided that the model supports it. Please refer to https://docs.habana.ai/en/latest/PyTorch/Model_Optimization_PyTorch/Optimization_in_PyTorch_Models.html?highlight=fusedsdpa#using-fused-scaled-dot-product-attention-fusedsdpa | add -e in docker run command |
|
|
||||||
| FLASH_ATTENTION_RECOMPUTE | True/False | True | Whether to enable Habana Flash Attention in recompute mode on first token generation. | add -e in docker run command |
|
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ deployment instructions in the model card:
|
|||||||
The service is launched simply by running the text-generation-inference container with two sets of parameters:
|
The service is launched simply by running the text-generation-inference container with two sets of parameters:
|
||||||
|
|
||||||
```
|
```
|
||||||
docker run <system_parameters> ghcr.io/huggingface/text-generation-inference:3.3.3-neuron <service_parameters>
|
docker run <system_parameters> ghcr.io/huggingface/text-generation-inference:3.3.4-neuron <service_parameters>
|
||||||
```
|
```
|
||||||
|
|
||||||
- system parameters are used to map ports, volumes and devices between the host and the service,
|
- system parameters are used to map ports, volumes and devices between the host and the service,
|
||||||
|
@ -19,6 +19,6 @@ docker run --gpus all \
|
|||||||
--shm-size 1g \
|
--shm-size 1g \
|
||||||
-e HF_TOKEN=$token \
|
-e HF_TOKEN=$token \
|
||||||
-p 8080:80 \
|
-p 8080:80 \
|
||||||
-v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.3 \
|
-v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
@ -19,7 +19,7 @@ bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models.
|
|||||||
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
|
In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.3 --model-id $model --quantize bitsandbytes
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model --quantize bitsandbytes
|
||||||
```
|
```
|
||||||
|
|
||||||
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
|
4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load.
|
||||||
@ -27,7 +27,7 @@ docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingf
|
|||||||
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
|
In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.3 --model-id $model --quantize bitsandbytes-nf4
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model --quantize bitsandbytes-nf4
|
||||||
```
|
```
|
||||||
|
|
||||||
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
|
You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
|
||||||
@ -48,7 +48,7 @@ $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$
|
|||||||
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
|
TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.3 --model-id $model --quantize gptq
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:3.3.4 --model-id $model --quantize gptq
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.
|
Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI.
|
||||||
|
@ -11,7 +11,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
docker run --rm -it --cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||||
--device=/dev/kfd --device=/dev/dri --group-add video \
|
--device=/dev/kfd --device=/dev/dri --group-add video \
|
||||||
--ipc=host --shm-size 256g --net host -v $volume:/data \
|
--ipc=host --shm-size 256g --net host -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-rocm \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-rocm \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
docker run --rm --privileged --cap-add=sys_nice \
|
docker run --rm --privileged --cap-add=sys_nice \
|
||||||
--device=/dev/dri \
|
--device=/dev/dri \
|
||||||
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-intel-xpu \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-intel-xpu \
|
||||||
--model-id $model --cuda-graphs 0
|
--model-id $model --cuda-graphs 0
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -29,7 +29,7 @@ volume=$PWD/data # share a volume with the Docker container to avoid downloading
|
|||||||
docker run --rm --privileged --cap-add=sys_nice \
|
docker run --rm --privileged --cap-add=sys_nice \
|
||||||
--device=/dev/dri \
|
--device=/dev/dri \
|
||||||
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
--ipc=host --shm-size 1g --net host -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3-intel-cpu \
|
ghcr.io/huggingface/text-generation-inference:3.3.4-intel-cpu \
|
||||||
--model-id $model --cuda-graphs 0
|
--model-id $model --cuda-graphs 0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
|
|||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
|
|
||||||
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 64g -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4 \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ model=teknium/OpenHermes-2.5-Mistral-7B
|
|||||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||||
|
|
||||||
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data \
|
||||||
ghcr.io/huggingface/text-generation-inference:3.3.3 \
|
ghcr.io/huggingface/text-generation-inference:3.3.4 \
|
||||||
--model-id $model
|
--model-id $model
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ curl 127.0.0.1:8080/generate \
|
|||||||
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
|
To see all possible deploy flags and options, you can use the `--help` flag. It's possible to configure the number of shards, quantization, generation parameters, and more.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run ghcr.io/huggingface/text-generation-inference:3.3.3 --help
|
docker run ghcr.io/huggingface/text-generation-inference:3.3.4 --help
|
||||||
```
|
```
|
||||||
|
|
||||||
</Tip>
|
</Tip>
|
||||||
|
@ -163,7 +163,7 @@ hub = {
|
|||||||
|
|
||||||
# create Hugging Face Model Class
|
# create Hugging Face Model Class
|
||||||
huggingface_model = HuggingFaceModel(
|
huggingface_model = HuggingFaceModel(
|
||||||
image_uri=get_huggingface_llm_image_uri("huggingface",version="3.3.3"),
|
image_uri=get_huggingface_llm_image_uri("huggingface",version="3.3.4"),
|
||||||
env=hub,
|
env=hub,
|
||||||
role=role,
|
role=role,
|
||||||
)
|
)
|
||||||
|
@ -1,4 +1,8 @@
|
|||||||
pytest_plugins = ["fixtures.neuron.service", "fixtures.neuron.export_models"]
|
pytest_plugins = [
|
||||||
|
"fixtures.neuron.service",
|
||||||
|
"fixtures.neuron.export_models",
|
||||||
|
"fixtures.gaudi.service",
|
||||||
|
]
|
||||||
# ruff: noqa: E402
|
# ruff: noqa: E402
|
||||||
from _pytest.fixtures import SubRequest
|
from _pytest.fixtures import SubRequest
|
||||||
from huggingface_hub.inference._generated.types.chat_completion import (
|
from huggingface_hub.inference._generated.types.chat_completion import (
|
||||||
@ -68,6 +72,15 @@ def pytest_addoption(parser):
|
|||||||
parser.addoption(
|
parser.addoption(
|
||||||
"--neuron", action="store_true", default=False, help="run neuron tests"
|
"--neuron", action="store_true", default=False, help="run neuron tests"
|
||||||
)
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--gaudi", action="store_true", default=False, help="run gaudi tests"
|
||||||
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--gaudi-all-models",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="Run tests for all models instead of just the default subset",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
@ -84,6 +97,22 @@ def pytest_collection_modifyitems(config, items):
|
|||||||
item.add_marker(pytest.mark.skip(reason="need --release option to run"))
|
item.add_marker(pytest.mark.skip(reason="need --release option to run"))
|
||||||
|
|
||||||
selectors.append(skip_release)
|
selectors.append(skip_release)
|
||||||
|
|
||||||
|
if config.getoption("--gaudi"):
|
||||||
|
|
||||||
|
def skip_not_gaudi(item):
|
||||||
|
if "gaudi" not in item.keywords:
|
||||||
|
item.add_marker(pytest.mark.skip(reason="requires --gaudi to run"))
|
||||||
|
|
||||||
|
selectors.append(skip_not_gaudi)
|
||||||
|
else:
|
||||||
|
|
||||||
|
def skip_gaudi(item):
|
||||||
|
if "gaudi" in item.keywords:
|
||||||
|
item.add_marker(pytest.mark.skip(reason="requires --gaudi to run"))
|
||||||
|
|
||||||
|
selectors.append(skip_gaudi)
|
||||||
|
|
||||||
if config.getoption("--neuron"):
|
if config.getoption("--neuron"):
|
||||||
|
|
||||||
def skip_not_neuron(item):
|
def skip_not_neuron(item):
|
||||||
@ -100,6 +129,7 @@ def pytest_collection_modifyitems(config, items):
|
|||||||
item.add_marker(pytest.mark.skip(reason="requires --neuron to run"))
|
item.add_marker(pytest.mark.skip(reason="requires --neuron to run"))
|
||||||
|
|
||||||
selectors.append(skip_neuron)
|
selectors.append(skip_neuron)
|
||||||
|
|
||||||
for item in items:
|
for item in items:
|
||||||
for selector in selectors:
|
for selector in selectors:
|
||||||
selector(item)
|
selector(item)
|
||||||
|
@ -14,15 +14,21 @@ import docker
|
|||||||
import pytest
|
import pytest
|
||||||
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
|
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
|
||||||
from docker.errors import NotFound
|
from docker.errors import NotFound
|
||||||
from loguru import logger
|
import logging
|
||||||
from test_model import TEST_CONFIGS
|
from huggingface_hub import AsyncInferenceClient, TextGenerationOutput
|
||||||
from text_generation import AsyncClient
|
import huggingface_hub
|
||||||
from text_generation.types import Response
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
|
||||||
|
stream=sys.stdout,
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__file__)
|
||||||
|
|
||||||
# Use the latest image from the local docker build
|
# Use the latest image from the local docker build
|
||||||
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", "tgi-gaudi")
|
DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", "tgi-gaudi")
|
||||||
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", None)
|
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", None)
|
||||||
HF_TOKEN = os.getenv("HF_TOKEN", None)
|
HF_TOKEN = huggingface_hub.get_token()
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
HF_TOKEN is not None
|
HF_TOKEN is not None
|
||||||
@ -48,12 +54,6 @@ HABANA_RUN_ARGS = {
|
|||||||
"cap_add": ["sys_nice"],
|
"cap_add": ["sys_nice"],
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.add(
|
|
||||||
sys.stderr,
|
|
||||||
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
|
|
||||||
level="INFO",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def stream_container_logs(container, test_name):
|
def stream_container_logs(container, test_name):
|
||||||
"""Stream container logs in a separate thread."""
|
"""Stream container logs in a separate thread."""
|
||||||
@ -69,9 +69,15 @@ def stream_container_logs(container, test_name):
|
|||||||
logger.error(f"Error streaming container logs: {str(e)}")
|
logger.error(f"Error streaming container logs: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
class TestClient(AsyncInferenceClient):
|
||||||
|
def __init__(self, service_name: str, base_url: str):
|
||||||
|
super().__init__(model=base_url)
|
||||||
|
self.service_name = service_name
|
||||||
|
|
||||||
|
|
||||||
class LauncherHandle:
|
class LauncherHandle:
|
||||||
def __init__(self, port: int):
|
def __init__(self, service_name: str, port: int):
|
||||||
self.client = AsyncClient(f"http://localhost:{port}", timeout=3600)
|
self.client = TestClient(service_name, f"http://localhost:{port}")
|
||||||
|
|
||||||
def _inner_health(self):
|
def _inner_health(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@ -87,7 +93,7 @@ class LauncherHandle:
|
|||||||
raise RuntimeError("Launcher crashed")
|
raise RuntimeError("Launcher crashed")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self.client.generate("test")
|
await self.client.text_generation("test", max_new_tokens=1)
|
||||||
elapsed = time.time() - start_time
|
elapsed = time.time() - start_time
|
||||||
logger.info(f"Health check passed after {elapsed:.1f}s")
|
logger.info(f"Health check passed after {elapsed:.1f}s")
|
||||||
return
|
return
|
||||||
@ -111,7 +117,8 @@ class LauncherHandle:
|
|||||||
|
|
||||||
class ContainerLauncherHandle(LauncherHandle):
|
class ContainerLauncherHandle(LauncherHandle):
|
||||||
def __init__(self, docker_client, container_name, port: int):
|
def __init__(self, docker_client, container_name, port: int):
|
||||||
super(ContainerLauncherHandle, self).__init__(port)
|
service_name = container_name # Use container name as service name
|
||||||
|
super(ContainerLauncherHandle, self).__init__(service_name, port)
|
||||||
self.docker_client = docker_client
|
self.docker_client = docker_client
|
||||||
self.container_name = container_name
|
self.container_name = container_name
|
||||||
|
|
||||||
@ -132,7 +139,8 @@ class ContainerLauncherHandle(LauncherHandle):
|
|||||||
|
|
||||||
class ProcessLauncherHandle(LauncherHandle):
|
class ProcessLauncherHandle(LauncherHandle):
|
||||||
def __init__(self, process, port: int):
|
def __init__(self, process, port: int):
|
||||||
super(ProcessLauncherHandle, self).__init__(port)
|
service_name = "process" # Use generic name for process launcher
|
||||||
|
super(ProcessLauncherHandle, self).__init__(service_name, port)
|
||||||
self.process = process
|
self.process = process
|
||||||
|
|
||||||
def _inner_health(self) -> bool:
|
def _inner_health(self) -> bool:
|
||||||
@ -151,11 +159,13 @@ def data_volume():
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def launcher(data_volume):
|
def gaudi_launcher():
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def docker_launcher(
|
def docker_launcher(
|
||||||
model_id: str,
|
model_id: str,
|
||||||
test_name: str,
|
test_name: str,
|
||||||
|
tgi_args: List[str] = None,
|
||||||
|
env_config: dict = None,
|
||||||
):
|
):
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Starting docker launcher for model {model_id} and test {test_name}"
|
f"Starting docker launcher for model {model_id} and test {test_name}"
|
||||||
@ -183,32 +193,40 @@ def launcher(data_volume):
|
|||||||
)
|
)
|
||||||
container.stop()
|
container.stop()
|
||||||
container.wait()
|
container.wait()
|
||||||
|
container.remove()
|
||||||
|
logger.info(f"Removed existing container {container_name}")
|
||||||
except NotFound:
|
except NotFound:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error handling existing container: {str(e)}")
|
logger.error(f"Error handling existing container: {str(e)}")
|
||||||
|
|
||||||
model_name = next(
|
if tgi_args is None:
|
||||||
name for name, cfg in TEST_CONFIGS.items() if cfg["model_id"] == model_id
|
tgi_args = []
|
||||||
)
|
else:
|
||||||
|
tgi_args = tgi_args.copy()
|
||||||
tgi_args = TEST_CONFIGS[model_name]["args"].copy()
|
|
||||||
|
|
||||||
env = BASE_ENV.copy()
|
env = BASE_ENV.copy()
|
||||||
|
|
||||||
# Add model_id to env
|
# Add model_id to env
|
||||||
env["MODEL_ID"] = model_id
|
env["MODEL_ID"] = model_id
|
||||||
|
|
||||||
# Add env config that is definied in the fixture parameter
|
# Add env config that is defined in the fixture parameter
|
||||||
if "env_config" in TEST_CONFIGS[model_name]:
|
if env_config is not None:
|
||||||
env.update(TEST_CONFIGS[model_name]["env_config"].copy())
|
env.update(env_config.copy())
|
||||||
|
|
||||||
volumes = [f"{DOCKER_VOLUME}:/data"]
|
volumes = []
|
||||||
|
if DOCKER_VOLUME:
|
||||||
|
volumes = [f"{DOCKER_VOLUME}:/data"]
|
||||||
logger.debug(f"Using volume {volumes}")
|
logger.debug(f"Using volume {volumes}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
logger.debug(f"Using command {tgi_args}")
|
||||||
logger.info(f"Creating container with name {container_name}")
|
logger.info(f"Creating container with name {container_name}")
|
||||||
|
|
||||||
|
logger.debug(f"Using environment {env}")
|
||||||
|
logger.debug(f"Using volumes {volumes}")
|
||||||
|
logger.debug(f"HABANA_RUN_ARGS {HABANA_RUN_ARGS}")
|
||||||
|
|
||||||
# Log equivalent docker run command for debugging, this is not actually executed
|
# Log equivalent docker run command for debugging, this is not actually executed
|
||||||
container = client.containers.run(
|
container = client.containers.run(
|
||||||
DOCKER_IMAGE,
|
DOCKER_IMAGE,
|
||||||
@ -271,15 +289,16 @@ def launcher(data_volume):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def generate_load():
|
def gaudi_generate_load():
|
||||||
async def generate_load_inner(
|
async def generate_load_inner(
|
||||||
client: AsyncClient, prompt: str, max_new_tokens: int, n: int
|
client: AsyncInferenceClient, prompt: str, max_new_tokens: int, n: int
|
||||||
) -> List[Response]:
|
) -> List[TextGenerationOutput]:
|
||||||
try:
|
try:
|
||||||
futures = [
|
futures = [
|
||||||
client.generate(
|
client.text_generation(
|
||||||
prompt,
|
prompt,
|
||||||
max_new_tokens=max_new_tokens,
|
max_new_tokens=max_new_tokens,
|
||||||
|
details=True,
|
||||||
decoder_input_details=True,
|
decoder_input_details=True,
|
||||||
)
|
)
|
||||||
for _ in range(n)
|
for _ in range(n)
|
@ -46,6 +46,15 @@ MODEL_CONFIGURATIONS = {
|
|||||||
"auto_cast_type": "fp16",
|
"auto_cast_type": "fp16",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"qwen3": {
|
||||||
|
"model_id": "Qwen/Qwen3-1.7B",
|
||||||
|
"export_kwargs": {
|
||||||
|
"batch_size": 4,
|
||||||
|
"sequence_length": 4096,
|
||||||
|
"num_cores": 2,
|
||||||
|
"auto_cast_type": "bf16",
|
||||||
|
},
|
||||||
|
},
|
||||||
"granite": {
|
"granite": {
|
||||||
"model_id": "ibm-granite/granite-3.1-2b-instruct",
|
"model_id": "ibm-granite/granite-3.1-2b-instruct",
|
||||||
"export_kwargs": {
|
"export_kwargs": {
|
||||||
@ -55,6 +64,15 @@ MODEL_CONFIGURATIONS = {
|
|||||||
"auto_cast_type": "bf16",
|
"auto_cast_type": "bf16",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"phi3": {
|
||||||
|
"model_id": "microsoft/Phi-3-mini-4k-instruct",
|
||||||
|
"export_kwargs": {
|
||||||
|
"batch_size": 4,
|
||||||
|
"sequence_length": 4096,
|
||||||
|
"num_cores": 2,
|
||||||
|
"auto_cast_type": "bf16",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@ import os
|
|||||||
from typing import Dict, Any, Generator
|
from typing import Dict, Any, Generator
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from test_model import TEST_CONFIGS
|
from test_gaudi_generate import TEST_CONFIGS
|
||||||
|
|
||||||
UNKNOWN_CONFIGS = {
|
UNKNOWN_CONFIGS = {
|
||||||
name: config
|
name: config
|
@ -1,12 +1,18 @@
|
|||||||
from typing import Any, Dict
|
from typing import Any, Dict, Generator
|
||||||
|
from _pytest.fixtures import SubRequest
|
||||||
from text_generation import AsyncClient
|
from huggingface_hub import AsyncInferenceClient
|
||||||
import pytest
|
import pytest
|
||||||
from Levenshtein import distance as levenshtein_distance
|
|
||||||
|
|
||||||
# The "args" config is not optimized for speed but only check that the inference is working for the different models architectures
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.addinivalue_line(
|
||||||
|
"markers", "gaudi_all_models: mark test to run with all models"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# The "args" values in TEST_CONFIGS are not optimized for speed but only check that the inference is working for the different models architectures.
|
||||||
TEST_CONFIGS = {
|
TEST_CONFIGS = {
|
||||||
"meta-llama/Llama-3.1-8B-Instruct-shared": {
|
"meta-llama/Llama-3.1-8B-Instruct-sharded": {
|
||||||
"model_id": "meta-llama/Llama-3.1-8B-Instruct",
|
"model_id": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
"input": "What is Deep Learning?",
|
"input": "What is Deep Learning?",
|
||||||
"expected_greedy_output": " A Beginner’s Guide\nDeep learning is a subset of machine learning that involves the use of artificial neural networks to analyze and interpret data. It is a type of",
|
"expected_greedy_output": " A Beginner’s Guide\nDeep learning is a subset of machine learning that involves the use of artificial neural networks to analyze and interpret data. It is a type of",
|
||||||
@ -15,16 +21,17 @@ TEST_CONFIGS = {
|
|||||||
"--sharded",
|
"--sharded",
|
||||||
"true",
|
"true",
|
||||||
"--num-shard",
|
"--num-shard",
|
||||||
"8",
|
"2",
|
||||||
"--max-input-tokens",
|
"--max-input-tokens",
|
||||||
"512",
|
"512",
|
||||||
"--max-total-tokens",
|
"--max-total-tokens",
|
||||||
"1024",
|
"1024",
|
||||||
"--max-batch-size",
|
"--max-batch-size",
|
||||||
"8",
|
"4",
|
||||||
"--max-batch-prefill-tokens",
|
"--max-batch-prefill-tokens",
|
||||||
"2048",
|
"2048",
|
||||||
],
|
],
|
||||||
|
"run_by_default": True,
|
||||||
},
|
},
|
||||||
"meta-llama/Llama-3.1-8B-Instruct": {
|
"meta-llama/Llama-3.1-8B-Instruct": {
|
||||||
"model_id": "meta-llama/Llama-3.1-8B-Instruct",
|
"model_id": "meta-llama/Llama-3.1-8B-Instruct",
|
||||||
@ -42,6 +49,7 @@ TEST_CONFIGS = {
|
|||||||
"--max-batch-prefill-tokens",
|
"--max-batch-prefill-tokens",
|
||||||
"2048",
|
"2048",
|
||||||
],
|
],
|
||||||
|
"run_by_default": True,
|
||||||
},
|
},
|
||||||
"meta-llama/Llama-2-7b-chat-hf": {
|
"meta-llama/Llama-2-7b-chat-hf": {
|
||||||
"model_id": "meta-llama/Llama-2-7b-chat-hf",
|
"model_id": "meta-llama/Llama-2-7b-chat-hf",
|
||||||
@ -94,8 +102,8 @@ TEST_CONFIGS = {
|
|||||||
"google/gemma-7b-it": {
|
"google/gemma-7b-it": {
|
||||||
"model_id": "google/gemma-7b-it",
|
"model_id": "google/gemma-7b-it",
|
||||||
"input": "What is Deep Learning?",
|
"input": "What is Deep Learning?",
|
||||||
"expected_greedy_output": "\n\nDeep learning is a subset of machine learning that uses artificial neural networks to learn from large amounts of data. Neural networks are inspired by the structure and function of",
|
"expected_greedy_output": "\n\nDeep learning is a subset of machine learning that uses artificial neural networks to learn from large amounts of data. Deep learning is a powerful tool for many tasks,",
|
||||||
"expected_batch_output": "\n\nDeep learning is a subset of machine learning that uses artificial neural networks to learn from large amounts of data. Neural networks are inspired by the structure and function of",
|
"expected_batch_output": "\n\nDeep learning is a subset of machine learning that uses artificial neural networks to learn from large amounts of data. Deep learning is a powerful tool for many tasks,",
|
||||||
"args": [
|
"args": [
|
||||||
"--max-input-tokens",
|
"--max-input-tokens",
|
||||||
"512",
|
"512",
|
||||||
@ -154,8 +162,8 @@ TEST_CONFIGS = {
|
|||||||
"openai-community/gpt2": {
|
"openai-community/gpt2": {
|
||||||
"model_id": "openai-community/gpt2",
|
"model_id": "openai-community/gpt2",
|
||||||
"input": "What is Deep Learning?",
|
"input": "What is Deep Learning?",
|
||||||
"expected_greedy_output": "\n\nDeep learning is a new field of research that has been around for a long time. It is a new field of research that has been around for a",
|
"expected_greedy_output": "\n\nDeep learning is a subset of machine learning that is based on artificial neural networks. It is a type of machine learning that is based on the idea of",
|
||||||
"expected_batch_output": "\n\nDeep learning is a new field of research that has been around for a long time. It is a new field of research that has been around for a",
|
"expected_batch_output": "\n\nDeep learning is a subset of machine learning that is based on artificial neural networks. It is a type of machine learning that is based on the idea of",
|
||||||
"args": [
|
"args": [
|
||||||
"--max-input-tokens",
|
"--max-input-tokens",
|
||||||
"512",
|
"512",
|
||||||
@ -181,72 +189,98 @@ TEST_CONFIGS = {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
print(f"Testing {len(TEST_CONFIGS)} models")
|
|
||||||
|
def pytest_generate_tests(metafunc):
|
||||||
|
if "test_config" in metafunc.fixturenames:
|
||||||
|
if metafunc.config.getoption("--gaudi-all-models"):
|
||||||
|
models = list(TEST_CONFIGS.keys())
|
||||||
|
else:
|
||||||
|
models = [
|
||||||
|
name
|
||||||
|
for name, config in TEST_CONFIGS.items()
|
||||||
|
if config.get("run_by_default", False)
|
||||||
|
]
|
||||||
|
print(f"Testing {len(models)} models")
|
||||||
|
metafunc.parametrize("test_config", models, indirect=True)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module", params=TEST_CONFIGS.keys())
|
@pytest.fixture(scope="module")
|
||||||
def test_config(request) -> Dict[str, Any]:
|
def test_config(request: SubRequest) -> Dict[str, Any]:
|
||||||
"""Fixture that provides model configurations for testing."""
|
"""Fixture that provides model configurations for testing."""
|
||||||
test_config = TEST_CONFIGS[request.param]
|
model_name = request.param
|
||||||
test_config["test_name"] = request.param
|
test_config = TEST_CONFIGS[model_name]
|
||||||
|
test_config["test_name"] = model_name
|
||||||
return test_config
|
return test_config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def model_id(test_config):
|
def model_id(test_config: Dict[str, Any]) -> Generator[str, None, None]:
|
||||||
yield test_config["model_id"]
|
yield test_config["model_id"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def test_name(test_config):
|
def test_name(test_config: Dict[str, Any]) -> Generator[str, None, None]:
|
||||||
yield test_config["test_name"]
|
yield test_config["test_name"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def expected_outputs(test_config):
|
def expected_outputs(test_config: Dict[str, Any]) -> Dict[str, str]:
|
||||||
return {
|
return {
|
||||||
"greedy": test_config["expected_greedy_output"],
|
"greedy": test_config["expected_greedy_output"],
|
||||||
# "sampling": model_config["expected_sampling_output"],
|
|
||||||
"batch": test_config["expected_batch_output"],
|
"batch": test_config["expected_batch_output"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def input(test_config):
|
def input(test_config: Dict[str, Any]) -> str:
|
||||||
return test_config["input"]
|
return test_config["input"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def tgi_service(launcher, model_id, test_name):
|
def tgi_service(
|
||||||
with launcher(model_id, test_name) as tgi_service:
|
gaudi_launcher, model_id: str, test_name: str, test_config: Dict[str, Any]
|
||||||
|
):
|
||||||
|
with gaudi_launcher(
|
||||||
|
model_id,
|
||||||
|
test_name,
|
||||||
|
tgi_args=test_config.get("args", []),
|
||||||
|
env_config=test_config.get("env_config", {}),
|
||||||
|
) as tgi_service:
|
||||||
yield tgi_service
|
yield tgi_service
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
async def tgi_client(tgi_service) -> AsyncClient:
|
async def tgi_client(tgi_service) -> AsyncInferenceClient:
|
||||||
await tgi_service.health(1000)
|
await tgi_service.health(1000)
|
||||||
return tgi_service.client
|
return tgi_service.client
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.all_models
|
||||||
async def test_model_single_request(
|
async def test_model_single_request(
|
||||||
tgi_client: AsyncClient, expected_outputs: Dict[str, Any], input: str
|
tgi_client: AsyncInferenceClient, expected_outputs: Dict[str, str], input: str
|
||||||
):
|
):
|
||||||
# Bounded greedy decoding without input
|
# Bounded greedy decoding without input
|
||||||
response = await tgi_client.generate(
|
response = await tgi_client.text_generation(
|
||||||
input,
|
input,
|
||||||
max_new_tokens=32,
|
max_new_tokens=32,
|
||||||
|
details=True,
|
||||||
|
decoder_input_details=True,
|
||||||
)
|
)
|
||||||
assert response.details.generated_tokens == 32
|
assert response.details.generated_tokens == 32
|
||||||
assert response.generated_text == expected_outputs["greedy"]
|
assert response.generated_text == expected_outputs["greedy"]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.all_models
|
||||||
async def test_model_multiple_requests(
|
async def test_model_multiple_requests(
|
||||||
tgi_client, generate_load, expected_outputs, input
|
tgi_client: AsyncInferenceClient,
|
||||||
|
gaudi_generate_load,
|
||||||
|
expected_outputs: Dict[str, str],
|
||||||
|
input: str,
|
||||||
):
|
):
|
||||||
num_requests = 4
|
num_requests = 4
|
||||||
responses = await generate_load(
|
responses = await gaudi_generate_load(
|
||||||
tgi_client,
|
tgi_client,
|
||||||
input,
|
input,
|
||||||
max_new_tokens=32,
|
max_new_tokens=32,
|
||||||
@ -257,6 +291,4 @@ async def test_model_multiple_requests(
|
|||||||
expected = expected_outputs["batch"]
|
expected = expected_outputs["batch"]
|
||||||
for r in responses:
|
for r in responses:
|
||||||
assert r.details.generated_tokens == 32
|
assert r.details.generated_tokens == 32
|
||||||
# Compute the similarity with the expectation using the levenshtein distance
|
assert r.generated_text == expected
|
||||||
# We should not have more than two substitutions or additions
|
|
||||||
assert levenshtein_distance(r.generated_text, expected) < 3
|
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 42,
|
"completion_tokens": 42,
|
||||||
"prompt_tokens": 277,
|
"prompt_tokens": 277,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 62,
|
"completion_tokens": 62,
|
||||||
"prompt_tokens": 277,
|
"prompt_tokens": 277,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 67,
|
"completion_tokens": 67,
|
||||||
"prompt_tokens": 277,
|
"prompt_tokens": 277,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 72,
|
"completion_tokens": 72,
|
||||||
"prompt_tokens": 275,
|
"prompt_tokens": 275,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 80,
|
"completion_tokens": 80,
|
||||||
"prompt_tokens": 279,
|
"prompt_tokens": 279,
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 35,
|
"completion_tokens": 35,
|
||||||
"prompt_tokens": 32,
|
"prompt_tokens": 32,
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "google/gemma-3-4b-it",
|
"model": "google/gemma-3-4b-it",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 44,
|
"completion_tokens": 44,
|
||||||
"prompt_tokens": 37,
|
"prompt_tokens": 37,
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "unsloth/Llama-3.2-11B-Vision-Instruct",
|
"model": "unsloth/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 45,
|
"prompt_tokens": 45,
|
||||||
@ -44,7 +44,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "unsloth/Llama-3.2-11B-Vision-Instruct",
|
"model": "unsloth/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 45,
|
"prompt_tokens": 45,
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
"id": "",
|
"id": "",
|
||||||
"model": "unsloth/Llama-3.2-11B-Vision-Instruct",
|
"model": "unsloth/Llama-3.2-11B-Vision-Instruct",
|
||||||
"object": "chat.completion",
|
"object": "chat.completion",
|
||||||
"system_fingerprint": "3.3.3-dev0-native",
|
"system_fingerprint": "3.3.4-dev0-native",
|
||||||
"usage": {
|
"usage": {
|
||||||
"completion_tokens": 10,
|
"completion_tokens": 10,
|
||||||
"prompt_tokens": 45,
|
"prompt_tokens": 45,
|
||||||
|
@ -21,8 +21,10 @@ async def test_model_single_request(tgi_service):
|
|||||||
assert response.details.generated_tokens == 17
|
assert response.details.generated_tokens == 17
|
||||||
greedy_expectations = {
|
greedy_expectations = {
|
||||||
"llama": " and how does it work?\nDeep learning is a subset of machine learning that uses artificial",
|
"llama": " and how does it work?\nDeep learning is a subset of machine learning that uses artificial",
|
||||||
"qwen2": " - Part 1\n\nDeep Learning is a subset of Machine Learning that is based on",
|
"qwen2": " - Deep Learning is a subset of Machine Learning that involves the use of artificial neural networks",
|
||||||
"granite": "\n\nDeep Learning is a subset of Machine Learning, which is a branch of Art",
|
"granite": "\n\nDeep learning is a subset of machine learning techniques based on artificial neural networks",
|
||||||
|
"qwen3": " A Deep Learning is a subset of machine learning that uses neural networks with multiple layers to",
|
||||||
|
"phi3": "\n\nDeep learning is a subfield of machine learning that focuses on creating",
|
||||||
}
|
}
|
||||||
assert response.generated_text == greedy_expectations[service_name]
|
assert response.generated_text == greedy_expectations[service_name]
|
||||||
|
|
||||||
@ -78,8 +80,10 @@ async def test_model_multiple_requests(tgi_service, neuron_generate_load):
|
|||||||
assert len(responses) == 4
|
assert len(responses) == 4
|
||||||
expectations = {
|
expectations = {
|
||||||
"llama": "Deep learning is a subset of machine learning that uses artificial",
|
"llama": "Deep learning is a subset of machine learning that uses artificial",
|
||||||
"qwen2": "Deep Learning is a subset of Machine Learning that is based on",
|
"qwen2": "Deep Learning is a subset of Machine Learning that involves",
|
||||||
"granite": "Deep Learning is a subset of Machine Learning, which is a branch of Art",
|
"granite": "Deep learning is a subset of machine learning techniques",
|
||||||
|
"qwen3": "Deep Learning is a subset of machine learning that uses neural networks",
|
||||||
|
"phi3": "Deep learning is a subfield of machine learning that focuses on creating",
|
||||||
}
|
}
|
||||||
expected = expectations[tgi_service.client.service_name]
|
expected = expectations[tgi_service.client.service_name]
|
||||||
for r in responses:
|
for r in responses:
|
||||||
|
@ -11,9 +11,8 @@ import torch
|
|||||||
from peft import LoraConfig as _LoraConfig
|
from peft import LoraConfig as _LoraConfig
|
||||||
from torch.distributed import ProcessGroup
|
from torch.distributed import ProcessGroup
|
||||||
from text_generation_server.utils.log import log_master
|
from text_generation_server.utils.log import log_master
|
||||||
|
|
||||||
from text_generation_server.adapters.config import AdapterConfig, ModuleMap
|
|
||||||
from text_generation_server.utils.import_utils import SYSTEM
|
from text_generation_server.utils.import_utils import SYSTEM
|
||||||
|
from text_generation_server.adapters.config import AdapterConfig, ModuleMap
|
||||||
from text_generation_server.utils.kernels import load_kernel
|
from text_generation_server.utils.kernels import load_kernel
|
||||||
from text_generation_server.adapters.weights import (
|
from text_generation_server.adapters.weights import (
|
||||||
AdapterBatchMetadata,
|
AdapterBatchMetadata,
|
||||||
@ -128,17 +127,27 @@ class LoraWeights(AdapterWeights):
|
|||||||
self.lora_a_r = weights_a[0].size(1) if len(weights_a) > 0 else 1
|
self.lora_a_r = weights_a[0].size(1) if len(weights_a) > 0 else 1
|
||||||
self.lora_b_r = weights_b[0].size(0) if len(weights_a) > 0 else 1
|
self.lora_b_r = weights_b[0].size(0) if len(weights_a) > 0 else 1
|
||||||
|
|
||||||
self._use_cutlass_shrink = punica_sgmv.use_cutlass_shrink(self.lora_a_r)
|
|
||||||
self._is_transposed = False
|
self._is_transposed = False
|
||||||
|
if SYSTEM == "ipex":
|
||||||
|
self._use_cutlass_shrink = False
|
||||||
|
# [num_layers, r, hidden_size]
|
||||||
|
weights_a = [w.transpose(0, 1).contiguous() for w in weights_a]
|
||||||
|
self._weights_a = torch.stack(weights_a)
|
||||||
|
|
||||||
# [num_layers, hidden_size, r]
|
# [num_layers, hidden_size, r]
|
||||||
weights_a = [
|
weights_b = [w.transpose(0, 1).contiguous() for w in weights_b]
|
||||||
punica_sgmv.orient_for_rank(w, w.size(1)).contiguous() for w in weights_a
|
self._weights_b = torch.stack(weights_b)
|
||||||
]
|
else:
|
||||||
self._weights_a = torch.stack(weights_a)
|
self._use_cutlass_shrink = punica_sgmv.use_cutlass_shrink(self.lora_a_r)
|
||||||
|
# [num_layers, hidden_size, r]
|
||||||
|
weights_a = [
|
||||||
|
punica_sgmv.orient_for_rank(w, w.size(1)).contiguous()
|
||||||
|
for w in weights_a
|
||||||
|
]
|
||||||
|
self._weights_a = torch.stack(weights_a)
|
||||||
|
|
||||||
# [num_layers, r, hidden_size]
|
# [num_layers, r, hidden_size]
|
||||||
self._weights_b = torch.stack(weights_b)
|
self._weights_b = torch.stack(weights_b)
|
||||||
|
|
||||||
self.adapter_config = adapter_config
|
self.adapter_config = adapter_config
|
||||||
|
|
||||||
@ -175,7 +184,10 @@ class LoraWeights(AdapterWeights):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_batch_types(cls) -> List[Type[BatchAdapterWeights]]:
|
def get_batch_types(cls) -> List[Type[BatchAdapterWeights]]:
|
||||||
return [BatchLoraWeights]
|
if SYSTEM == "ipex":
|
||||||
|
return [IPEXBatchLoraWeights]
|
||||||
|
else:
|
||||||
|
return [BatchLoraWeights]
|
||||||
|
|
||||||
# prepare pre-loaded lora weights for use in the model.
|
# prepare pre-loaded lora weights for use in the model.
|
||||||
#
|
#
|
||||||
@ -245,17 +257,20 @@ class LoraWeights(AdapterWeights):
|
|||||||
lora_b_list[layer_id] = lora_b.transpose(0, 1) * scale
|
lora_b_list[layer_id] = lora_b.transpose(0, 1) * scale
|
||||||
|
|
||||||
# pad lora ranks to be compatible with sgmv
|
# pad lora ranks to be compatible with sgmv
|
||||||
lora_a_list = [
|
if SYSTEM != "ipex":
|
||||||
punica_sgmv.pad_rank(w, dim=1, world_size=world_size) for w in lora_a_list
|
lora_a_list = [
|
||||||
]
|
punica_sgmv.pad_rank(w, dim=1, world_size=world_size)
|
||||||
lora_b_list = [
|
for w in lora_a_list
|
||||||
punica_sgmv.pad_rank(w, dim=0, world_size=world_size) for w in lora_b_list
|
]
|
||||||
]
|
lora_b_list = [
|
||||||
|
punica_sgmv.pad_rank(w, dim=0, world_size=world_size)
|
||||||
|
for w in lora_b_list
|
||||||
|
]
|
||||||
|
|
||||||
if lora_a_list:
|
if lora_a_list:
|
||||||
# update rank if it was padded
|
# update rank if it was padded
|
||||||
padded_rank = lora_a_list[0].size(1)
|
padded_rank = lora_a_list[0].size(1)
|
||||||
config.r = padded_rank
|
config.r = padded_rank
|
||||||
|
|
||||||
return LoraWeights(
|
return LoraWeights(
|
||||||
*shard_lora_weights(
|
*shard_lora_weights(
|
||||||
@ -471,6 +486,115 @@ class BatchLoraWeights(BatchAdapterWeights):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class IPEXBatchLoraWeights(BatchLoraWeights):
|
||||||
|
@classmethod
|
||||||
|
def load(
|
||||||
|
self,
|
||||||
|
adapter_weights: Dict[int, AdapterWeights],
|
||||||
|
meta: AdapterBatchMetadata,
|
||||||
|
prefill: bool,
|
||||||
|
prefill_head_indices: Optional[torch.Tensor],
|
||||||
|
) -> Optional["BatchLoraWeights"]:
|
||||||
|
adapter_weights = {k: _convert_lora(v) for k, v in adapter_weights.items()}
|
||||||
|
adapter_weights = {
|
||||||
|
k: v for k, v in adapter_weights.items() if isinstance(v, LoraWeights)
|
||||||
|
}
|
||||||
|
if not adapter_weights:
|
||||||
|
return None
|
||||||
|
|
||||||
|
first_weights = next(iter(adapter_weights.values()))
|
||||||
|
device = first_weights.weights_a.device
|
||||||
|
segment_indices = meta.segment_indices
|
||||||
|
|
||||||
|
lora_a = {
|
||||||
|
idx: adapter_weights[idx].weights_a
|
||||||
|
for idx in segment_indices
|
||||||
|
if idx in adapter_weights
|
||||||
|
}
|
||||||
|
lora_b = {
|
||||||
|
idx: adapter_weights[idx].weights_b
|
||||||
|
for idx in segment_indices
|
||||||
|
if idx in adapter_weights
|
||||||
|
}
|
||||||
|
adapter_index_configs = {
|
||||||
|
idx: adapter_weights[idx].adapter_config
|
||||||
|
for idx in segment_indices
|
||||||
|
if idx in adapter_weights
|
||||||
|
}
|
||||||
|
if len(lora_a) != 0:
|
||||||
|
lora_a_ptr = torch.stack(list(lora_a.values()))
|
||||||
|
if len(lora_b) != 0:
|
||||||
|
lora_b_ptr = torch.stack(list(lora_b.values()))
|
||||||
|
|
||||||
|
use_sgmv = True if prefill else False
|
||||||
|
|
||||||
|
adapter_to_segment = {v: k for k, v in enumerate(segment_indices)}
|
||||||
|
|
||||||
|
rank_indices = defaultdict(list)
|
||||||
|
for segment_idx, adapter_idx in enumerate(segment_indices):
|
||||||
|
if adapter_idx not in adapter_weights:
|
||||||
|
continue
|
||||||
|
rank_indices[adapter_weights[adapter_idx].lora_a_r].append(segment_idx)
|
||||||
|
|
||||||
|
if prefill_head_indices is not None:
|
||||||
|
j, prefill_head_segment_starts, prefill_head_segment_ends = 1, [0], [0]
|
||||||
|
for head_index in prefill_head_indices:
|
||||||
|
# j cannot go out of bounds as that would mean there are tokens without corresponding adapters
|
||||||
|
if head_index < meta.adapter_segments[j]:
|
||||||
|
prefill_head_segment_ends[-1] += 1
|
||||||
|
else:
|
||||||
|
prefill_head_segment_starts.append(prefill_head_segment_ends[-1])
|
||||||
|
prefill_head_segment_ends.append(prefill_head_segment_ends[-1] + 1)
|
||||||
|
j += 1
|
||||||
|
|
||||||
|
rank_data = {}
|
||||||
|
segment_starts = None
|
||||||
|
segment_ends = None
|
||||||
|
if use_sgmv:
|
||||||
|
segment_starts = meta.adapter_segments[:-1]
|
||||||
|
segment_ends = meta.adapter_segments[1:]
|
||||||
|
if prefill_head_indices is not None:
|
||||||
|
segment_starts = prefill_head_segment_starts[:-1]
|
||||||
|
segment_ends = prefill_head_segment_ends[1:]
|
||||||
|
batch_indices = [
|
||||||
|
adapter_to_segment[idx] for idx in meta.adapter_indices.tolist()
|
||||||
|
]
|
||||||
|
for rank, indices in rank_indices.items():
|
||||||
|
adapters_indices = []
|
||||||
|
lora_a_keys = list(lora_a.keys())
|
||||||
|
for segment_idx in batch_indices:
|
||||||
|
if segment_idx in indices:
|
||||||
|
adapters_indices.append(
|
||||||
|
lora_a_keys.index(segment_indices[segment_idx])
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
adapters_indices.append(-1)
|
||||||
|
adapters_indices = torch.tensor(
|
||||||
|
adapters_indices, dtype=torch.int64, device=device
|
||||||
|
)
|
||||||
|
if use_sgmv:
|
||||||
|
adapters_indices = adapters_indices[segment_starts]
|
||||||
|
rank_data[rank] = RankSegments(
|
||||||
|
rank=rank,
|
||||||
|
tmp_shrink=None,
|
||||||
|
tmp_expand=None,
|
||||||
|
lora_a_ptr=lora_a_ptr,
|
||||||
|
lora_b_ptr=lora_b_ptr,
|
||||||
|
segment_starts=segment_starts,
|
||||||
|
segment_ends=segment_ends,
|
||||||
|
indices=adapters_indices,
|
||||||
|
)
|
||||||
|
|
||||||
|
return BatchLoraWeights(
|
||||||
|
lora_a=lora_a,
|
||||||
|
lora_b=lora_b,
|
||||||
|
adapter_index_configs=adapter_index_configs,
|
||||||
|
rank_data=rank_data,
|
||||||
|
use_sgmv=use_sgmv,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_scaling_factor(
|
def get_scaling_factor(
|
||||||
lora_alpha: int,
|
lora_alpha: int,
|
||||||
r: int,
|
r: int,
|
||||||
|
@ -4,8 +4,8 @@ import torch
|
|||||||
import torch.distributed
|
import torch.distributed
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.distributed import ProcessGroup
|
from torch.distributed import ProcessGroup
|
||||||
|
|
||||||
from text_generation_server.utils.import_utils import SYSTEM
|
from text_generation_server.utils.import_utils import SYSTEM
|
||||||
|
|
||||||
from text_generation_server.utils.kernels import load_kernel
|
from text_generation_server.utils.kernels import load_kernel
|
||||||
|
|
||||||
if SYSTEM == "cuda":
|
if SYSTEM == "cuda":
|
||||||
@ -15,6 +15,17 @@ if SYSTEM == "cuda":
|
|||||||
else:
|
else:
|
||||||
punica_sgmv = None
|
punica_sgmv = None
|
||||||
|
|
||||||
|
if SYSTEM == "ipex":
|
||||||
|
try:
|
||||||
|
from intel_extension_for_pytorch.llm.functional import (
|
||||||
|
bgmv_expand,
|
||||||
|
bgmv_shrink,
|
||||||
|
sgmv_expand,
|
||||||
|
sgmv_shrink,
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from text_generation_server.adapters import AdapterBatchData
|
from text_generation_server.adapters import AdapterBatchData
|
||||||
@ -43,10 +54,9 @@ class LoraLinear(nn.Module):
|
|||||||
return result
|
return result
|
||||||
data: Optional["BatchLoraWeights"] = adapter_data.data.get(layer_type)
|
data: Optional["BatchLoraWeights"] = adapter_data.data.get(layer_type)
|
||||||
|
|
||||||
if (
|
if data is not None and (
|
||||||
punica_sgmv is not None
|
SYSTEM == "ipex"
|
||||||
and data is not None
|
or (punica_sgmv is not None and data.can_vectorize(self.process_group))
|
||||||
and data.can_vectorize(self.process_group)
|
|
||||||
):
|
):
|
||||||
# In tensor-parallel configurations, each GPU processes a specific segment of the output.
|
# In tensor-parallel configurations, each GPU processes a specific segment of the output.
|
||||||
# The 'result' tensor represents the full output, which can vary in size based on
|
# The 'result' tensor represents the full output, which can vary in size based on
|
||||||
@ -66,60 +76,121 @@ class LoraLinear(nn.Module):
|
|||||||
proj = result
|
proj = result
|
||||||
|
|
||||||
for r, rank_segments in data.rank_data.items():
|
for r, rank_segments in data.rank_data.items():
|
||||||
lora_a_ptr = rank_segments.lora_a_ptr
|
if SYSTEM == "ipex":
|
||||||
lora_b_ptr = rank_segments.lora_b_ptr
|
lora_a_ptr = rank_segments.lora_a_ptr[
|
||||||
|
:, self.layer_id, :
|
||||||
|
].contiguous()
|
||||||
|
lora_b_ptr = rank_segments.lora_b_ptr[
|
||||||
|
:, self.layer_id, :
|
||||||
|
].contiguous()
|
||||||
|
else:
|
||||||
|
lora_a_ptr = rank_segments.lora_a_ptr
|
||||||
|
lora_b_ptr = rank_segments.lora_b_ptr
|
||||||
|
|
||||||
if lora_a_ptr is None or lora_b_ptr is None:
|
if lora_a_ptr is None or lora_b_ptr is None:
|
||||||
raise ValueError("LoRA data is missing")
|
raise ValueError("LoRA data is missing")
|
||||||
|
|
||||||
if data.use_sgmv:
|
if data.use_sgmv:
|
||||||
# Use SGMV for prefill
|
if SYSTEM == "ipex":
|
||||||
v = punica_sgmv.lora_a_sgmv_cutlass(
|
# Use SGMV for prefill
|
||||||
input,
|
seq_len_tensor = (
|
||||||
rank_segments.tmp_shrink,
|
rank_segments.segment_ends - rank_segments.segment_starts
|
||||||
lora_a_ptr,
|
).to(torch.int64)
|
||||||
rank_segments.segment_starts,
|
b_seq_start_loc = rank_segments.segment_starts.to(torch.int64)
|
||||||
rank_segments.segment_ends,
|
total_tokens = seq_len_tensor.sum()
|
||||||
self.layer_id,
|
v = torch.zeros(
|
||||||
r,
|
(total_tokens, r), dtype=input.dtype, device=input.device
|
||||||
)
|
)
|
||||||
|
bs = seq_len_tensor.shape[0]
|
||||||
|
sgmv_shrink(
|
||||||
|
input,
|
||||||
|
lora_a_ptr,
|
||||||
|
v,
|
||||||
|
b_seq_start_loc,
|
||||||
|
seq_len_tensor,
|
||||||
|
rank_segments.indices,
|
||||||
|
bs,
|
||||||
|
seq_len_tensor.max().item(),
|
||||||
|
1.0,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Use SGMV for prefill
|
||||||
|
v = punica_sgmv.lora_a_sgmv_cutlass(
|
||||||
|
input,
|
||||||
|
rank_segments.tmp_shrink,
|
||||||
|
lora_a_ptr,
|
||||||
|
rank_segments.segment_starts,
|
||||||
|
rank_segments.segment_ends,
|
||||||
|
self.layer_id,
|
||||||
|
r,
|
||||||
|
)
|
||||||
|
|
||||||
if self.process_group.size() > 1:
|
if self.process_group.size() > 1:
|
||||||
v = self.collect_lora_a(v)
|
v = self.collect_lora_a(v)
|
||||||
|
if SYSTEM == "ipex":
|
||||||
punica_sgmv.lora_b_sgmv_cutlass(
|
sgmv_expand(
|
||||||
proj,
|
v,
|
||||||
v,
|
lora_b_ptr,
|
||||||
rank_segments.tmp_expand,
|
proj,
|
||||||
lora_b_ptr,
|
b_seq_start_loc,
|
||||||
rank_segments.segment_starts,
|
seq_len_tensor,
|
||||||
rank_segments.segment_ends,
|
rank_segments.indices,
|
||||||
self.layer_id,
|
bs,
|
||||||
)
|
seq_len_tensor.max().item(),
|
||||||
|
add_inputs=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
punica_sgmv.lora_b_sgmv_cutlass(
|
||||||
|
proj,
|
||||||
|
v,
|
||||||
|
rank_segments.tmp_expand,
|
||||||
|
lora_b_ptr,
|
||||||
|
rank_segments.segment_starts,
|
||||||
|
rank_segments.segment_ends,
|
||||||
|
self.layer_id,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Use BGMV for decode
|
# Use BGMV for decode
|
||||||
v = torch.zeros(
|
v = torch.zeros(
|
||||||
(input.size(0), r), dtype=input.dtype, device=input.device
|
(input.size(0), r), dtype=input.dtype, device=input.device
|
||||||
)
|
)
|
||||||
# TODO: error with [-1, 0], but not [0, -1]
|
if SYSTEM == "ipex":
|
||||||
punica_sgmv.add_lora_a_bgmv(
|
bgmv_shrink(
|
||||||
v,
|
input,
|
||||||
input,
|
lora_a_ptr,
|
||||||
lora_a_ptr,
|
v,
|
||||||
rank_segments.indices,
|
rank_segments.indices,
|
||||||
self.layer_id,
|
1.0,
|
||||||
)
|
)
|
||||||
|
else:
|
||||||
|
# TODO: error with [-1, 0], but not [0, -1]
|
||||||
|
punica_sgmv.add_lora_a_bgmv(
|
||||||
|
v,
|
||||||
|
input,
|
||||||
|
lora_a_ptr,
|
||||||
|
rank_segments.indices,
|
||||||
|
self.layer_id,
|
||||||
|
)
|
||||||
|
|
||||||
if self.process_group.size() > 1:
|
if self.process_group.size() > 1:
|
||||||
v = self.collect_lora_a(v)
|
v = self.collect_lora_a(v)
|
||||||
|
|
||||||
punica_sgmv.add_lora_b_bgmv(
|
if SYSTEM == "ipex":
|
||||||
proj,
|
bgmv_expand(
|
||||||
v,
|
v,
|
||||||
lora_b_ptr,
|
lora_b_ptr,
|
||||||
rank_segments.indices,
|
proj,
|
||||||
self.layer_id,
|
rank_segments.indices,
|
||||||
)
|
add_inputs=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
punica_sgmv.add_lora_b_bgmv(
|
||||||
|
proj,
|
||||||
|
v,
|
||||||
|
lora_b_ptr,
|
||||||
|
rank_segments.indices,
|
||||||
|
self.layer_id,
|
||||||
|
)
|
||||||
|
|
||||||
if end_idx - start_idx != result.shape[1]:
|
if end_idx - start_idx != result.shape[1]:
|
||||||
result[:, start_idx:end_idx] += proj
|
result[:, start_idx:end_idx] += proj
|
||||||
|
Loading…
Reference in New Issue
Block a user