2024-07-22 16:27:10 +00:00
|
|
|
flash_att_v2_commit_cuda := v2.6.1
|
2025-01-13 10:12:35 +00:00
|
|
|
flash_att_v2_commit_rocm := 47bd46e0204a95762ae48712fd1a3978827c77fd
|
2023-07-18 14:21:18 +00:00
|
|
|
|
2024-06-04 17:38:46 +00:00
|
|
|
build-flash-attention-v2-cuda:
|
|
|
|
pip install -U packaging wheel
|
|
|
|
pip install flash-attn==$(flash_att_v2_commit_cuda)
|
2023-11-27 13:08:12 +00:00
|
|
|
|
2024-06-04 21:34:03 +00:00
|
|
|
install-flash-attention-v2-cuda: build-flash-attention-v2-cuda
|
|
|
|
echo "Flash v2 installed"
|
2023-11-28 15:28:40 +00:00
|
|
|
|
2024-06-04 17:38:46 +00:00
|
|
|
build-flash-attention-v2-rocm:
|
|
|
|
if [ ! -d 'flash-attention-v2' ]; then \
|
|
|
|
pip install -U packaging ninja --no-cache-dir && \
|
2024-09-30 08:54:32 +00:00
|
|
|
git clone https://github.com/mht-sharma/flash-attention.git flash-attention-v2 && \
|
2024-06-04 17:38:46 +00:00
|
|
|
cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_rocm) && \
|
|
|
|
git submodule update --init --recursive && GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build; \
|
|
|
|
fi
|
2023-07-18 14:21:18 +00:00
|
|
|
|
2023-11-28 15:28:40 +00:00
|
|
|
install-flash-attention-v2-rocm: build-flash-attention-v2-rocm
|
2024-06-04 21:34:03 +00:00
|
|
|
cd flash-attention-v2 && \
|
|
|
|
GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py install
|