From 3c71c656c7075fa39d1354f7cb97a2107d28d7e4 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 28 Nov 2023 16:28:40 +0100 Subject: [PATCH] `make install-flash-attn-v2-cuda` should work like `make install-flash-attn-v2` used to work. (#1294) # What does this PR do? Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/Makefile-flash-att-v2 | 41 +++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/server/Makefile-flash-att-v2 b/server/Makefile-flash-att-v2 index 8b9f289d..71c6cabe 100644 --- a/server/Makefile-flash-att-v2 +++ b/server/Makefile-flash-att-v2 @@ -1,26 +1,29 @@ -flash_att_v2_commit := 02ac572f3ffc4f402e4183aaa6824b45859d3ed3 +flash_att_v2_commit_cuda := 02ac572f3ffc4f402e4183aaa6824b45859d3ed3 +flash_att_v2_commit_rocm := 8736558c287ff2ef28b24878e42828c595ac3e69 -build-flash-attention-v2-cuda: FLASH_ATTN_V2_COMMIT=02ac572f3ffc4f402e4183aaa6824b45859d3ed3 -build-flash-attention-v2-cuda: FLASH_REPOSITORY=https://github.com/HazyResearch/flash-attention.git -build-flash-attention-v2-cuda: BRANCH=main -build-flash-attention-v2-cuda: PYTORCH_ROCM_ARCH="" -build-flash-attention-v2-cuda: build-flash-attention-v2 -build-flash-attention-v2-rocm: FLASH_ATTN_V2_COMMIT=8736558c287ff2ef28b24878e42828c595ac3e69 -build-flash-attention-v2-rocm: FLASH_REPOSITORY=https://github.com/fxmarty/flash-attention-rocm -build-flash-attention-v2-rocm: BRANCH=remove-offload-arch-native -build-flash-attention-v2-rocm: PYTORCH_ROCM_ARCH=gfx90a -build-flash-attention-v2-rocm: build-flash-attention-v2 - -flash-attention-v2: +flash-attention-v2-cuda: # Clone flash attention pip install -U packaging ninja --no-cache-dir - git clone --single-branch --branch $(BRANCH) $(FLASH_REPOSITORY) flash-attention-v2 + git clone https://github.com/HazyResearch/flash-attention.git flash-attention-v2 -build-flash-attention-v2: flash-attention-v2 - cd flash-attention-v2 && git fetch && git checkout $(FLASH_ATTN_V2_COMMIT) +build-flash-attention-v2-cuda: flash-attention-v2-cuda + cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_cuda) cd flash-attention-v2 && git submodule update --init --recursive - cd flash-attention-v2 && PYTORCH_ROCM_ARCH=$(PYTORCH_ROCM_ARCH) python setup.py build + cd flash-attention-v2 && python setup.py build -install-flash-attention-v2: build-flash-attention-v2 - cd flash-attention-v2 && git submodule update --init --recursive && python setup.py install \ No newline at end of file +install-flash-attention-v2-cuda: build-flash-attention-v2-cuda + cd flash-attention-v2 && git submodule update --init --recursive && python setup.py install + +flash-attention-v2-rocm: + # Clone flash attention + pip install -U packaging ninja --no-cache-dir + git clone https://github.com/fxmarty/flash-attention-rocm flash-attention-v2 + +build-flash-attention-v2-rocm: flash-attention-v2-rocm + cd flash-attention-v2 && git fetch && git checkout $(flash_att_v2_commit_rocm) + cd flash-attention-v2 && git submodule update --init --recursive + cd flash-attention-v2 && PYTORCH_ROCM_ARCH=gfx90a python setup.py build + +install-flash-attention-v2-rocm: build-flash-attention-v2-rocm + cd flash-attention-v2 && git submodule update --init --recursive && python setup.py install