mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-06-03 13:12:10 +00:00
This PR adds support for AMD Instinct MI210 & MI250 GPUs, with paged attention and FAv2 support. Remaining items to discuss, on top of possible others: * Should we have a `ghcr.io/huggingface/text-generation-inference:1.1.0+rocm` hosted image, or is it too early? * Should we set up a CI on MI210/MI250? I don't have access to the runners of TGI though. * Are we comfortable with those changes being directly in TGI, or do we need a fork? --------- Co-authored-by: Felix Marty <felix@hf.co> Co-authored-by: OlivierDehaene <olivier@huggingface.co> Co-authored-by: Your Name <you@example.com>
26 lines
1.3 KiB
Plaintext
26 lines
1.3 KiB
Plaintext
flash_att_v2_commit := 02ac572f3ffc4f402e4183aaa6824b45859d3ed3
|
|
|
|
build-flash-attention-v2-cuda: FLASH_ATTN_V2_COMMIT=02ac572f3ffc4f402e4183aaa6824b45859d3ed3
|
|
build-flash-attention-v2-cuda: FLASH_REPOSITORY=https://github.com/HazyResearch/flash-attention.git
|
|
build-flash-attention-v2-cuda: BRANCH=main
|
|
build-flash-attention-v2-cuda: PYTORCH_ROCM_ARCH=""
|
|
build-flash-attention-v2-cuda: build-flash-attention-v2
|
|
|
|
build-flash-attention-v2-rocm: FLASH_ATTN_V2_COMMIT=8736558c287ff2ef28b24878e42828c595ac3e69
|
|
build-flash-attention-v2-rocm: FLASH_REPOSITORY=https://github.com/fxmarty/flash-attention-rocm
|
|
build-flash-attention-v2-rocm: BRANCH=remove-offload-arch-native
|
|
build-flash-attention-v2-rocm: PYTORCH_ROCM_ARCH=gfx90a
|
|
build-flash-attention-v2-rocm: build-flash-attention-v2
|
|
|
|
flash-attention-v2:
|
|
# Clone flash attention
|
|
pip install -U packaging ninja --no-cache-dir
|
|
git clone --single-branch --branch $(BRANCH) $(FLASH_REPOSITORY) flash-attention-v2
|
|
|
|
build-flash-attention-v2: flash-attention-v2
|
|
cd flash-attention-v2 && git fetch && git checkout $(FLASH_ATTN_V2_COMMIT)
|
|
cd flash-attention-v2 && git submodule update --init --recursive
|
|
cd flash-attention-v2 && PYTORCH_ROCM_ARCH=$(PYTORCH_ROCM_ARCH) python setup.py build
|
|
|
|
install-flash-attention-v2: build-flash-attention-v2
|
|
cd flash-attention-v2 && git submodule update --init --recursive && python setup.py install |