mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 20:34:54 +00:00
Put back build step.
This commit is contained in:
parent
4fd7c64793
commit
f1cd046f6b
@ -1,9 +1,14 @@
|
||||
flash_att_commit := 3a9bfd076f98746c73362328958dbc68d145fbec
|
||||
|
||||
install-flash-attention:
|
||||
build-flash-attention:
|
||||
if [ ! -d 'flash-attention' ]; then \
|
||||
pip install -U packaging ninja --no-cache-dir && \
|
||||
git clone https://github.com/HazyResearch/flash-attention.git && \
|
||||
cd flash-attention && git fetch && git checkout $(flash_att_commit) && \
|
||||
MAX_JOBS=8 python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install; \
|
||||
MAX_JOBS=8 python setup.py build && cd csrc/layer_norm && python setup.py build && cd ../rotary && python setup.py build; \
|
||||
fi
|
||||
|
||||
install-flash-attention: build-flash-attention
|
||||
if [ ! -d 'flash-attention' ]; then \
|
||||
cd flash-attntion && python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install; \
|
||||
fi
|
||||
|
@ -1,10 +1,14 @@
|
||||
install-vllm-cuda:
|
||||
build-vllm-cuda:
|
||||
if [ ! -d 'vllm' ]; then \
|
||||
pip install -U ninja packaging --no-cache-dir && \
|
||||
git clone https://github.com/Narsil/vllm.git vllm &&\
|
||||
cd vllm && \
|
||||
git fetch && git checkout b5dfc61db88a81069e45b44f7cc99bd9e62a60fa &&\
|
||||
pip install -e .; \
|
||||
python setup.py build; \
|
||||
fi
|
||||
install-vllm-cuda: build-vllm-cuda
|
||||
if [ ! -d 'vllm' ]; then \
|
||||
cd vllm && pip install -e .; \
|
||||
fi
|
||||
|
||||
install-vllm-rocm:
|
||||
|
Loading…
Reference in New Issue
Block a user