Hotfixing make install.

This commit is contained in:
Nicolas Patry 2024-06-04 18:41:09 +00:00
parent 8390e251d9
commit 698f7cd474
3 changed files with 16 additions and 24 deletions

View File

@ -9,6 +9,4 @@ build-flash-attention:
fi fi
install-flash-attention: build-flash-attention install-flash-attention: build-flash-attention
if [ ! -d 'flash-attention' ]; then \ cd flash-attention && git checkout $(flash_att_commit) && MAX_JOBS=8 python setup.py && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install; \
cd flash-attntion && python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install; \
fi

View File

@ -5,9 +5,8 @@ build-flash-attention-v2-cuda:
pip install -U packaging wheel pip install -U packaging wheel
pip install flash-attn==$(flash_att_v2_commit_cuda) pip install flash-attn==$(flash_att_v2_commit_cuda)
install-flash-attention-v2-cuda: install-flash-attention-v2-cuda: build-flash-attention-v2-cuda
pip install -U packaging wheel echo "Flash v2 installed"
pip install flash-attn==$(flash_att_v2_commit_cuda)
build-flash-attention-v2-rocm: build-flash-attention-v2-rocm:
if [ ! -d 'flash-attention-v2' ]; then \ if [ ! -d 'flash-attention-v2' ]; then \
@ -18,7 +17,5 @@ build-flash-attention-v2-rocm:
fi fi
install-flash-attention-v2-rocm: build-flash-attention-v2-rocm install-flash-attention-v2-rocm: build-flash-attention-v2-rocm
if [ ! -d 'flash-attention-v2' ]; then \ cd flash-attention-v2 && \
cd flash-attention-v2 && \ GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py install
GPU_ARCHS="gfx90a;gfx942" PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py install; \
fi

View File

@ -1,26 +1,23 @@
commit_cuda := b5dfc61db88a81069e45b44f7cc99bd9e62a60fa
commit_rocm := ca6913b3c2ffacdcb7d15e914dc34adbc6c89479
build-vllm-cuda: build-vllm-cuda:
if [ ! -d 'vllm' ]; then \ if [ ! -d 'vllm' ]; then \
pip install -U ninja packaging --no-cache-dir && \ pip install -U ninja packaging --no-cache-dir && \
git clone https://github.com/Narsil/vllm.git vllm &&\ git clone https://github.com/Narsil/vllm.git vllm; \
cd vllm && \
git fetch && git checkout b5dfc61db88a81069e45b44f7cc99bd9e62a60fa &&\
python setup.py build; \
fi fi
cd vllm && git fetch && git checkout $(commit_cuda) && python setup.py build
install-vllm-cuda: build-vllm-cuda install-vllm-cuda: build-vllm-cuda
if [ ! -d 'vllm' ]; then \ cd vllm && git fetch && git checkout $(commit_cuda) && pip install -e .
cd vllm && pip install -e .; \
fi
build-vllm-rocm: build-vllm-rocm:
if [ ! -d 'vllm' ]; then \ if [ ! -d 'vllm' ]; then \
pip install -U ninja packaging --no-cache-dir && \ pip install -U ninja packaging --no-cache-dir && \
git clone https://github.com/fxmarty/rocm-vllm.git vllm && \ git clone https://github.com/fxmarty/rocm-vllm.git vllm; \
cd vllm && git fetch && git checkout ca6913b3c2ffacdcb7d15e914dc34adbc6c89479 && \
PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build; \
fi fi
cd vllm && git fetch && git checkout $(commit_rocm) && \
PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build
install-vllm-rocm: build-vllm-rocm install-vllm-rocm: build-vllm-rocm
if [ ! -d 'vllm' ]; then \ cd vllm && git fetch && git checkout $(commit_rocm) && \
cd vllm && \ PYTORCH_ROCM_ARCH="gfx90a;gfx942" pip install -e .
PYTORCH_ROCM_ARCH="gfx90a;gfx942" pip install -e .; \
fi