mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
* Putting back the NCCL forced upgrade. * . * ... * Ignoring conda. * Dropping conda from the buidl system + torch 2.6 * Cache min. * Rolling back torch version. * Reverting the EETQ modification. * Fix flash attention ? * Actually stay on flash v1. * Patching flash v1. * Torch 2.6, fork of rotary, eetq updated. * Put back nccl latest (override torch). * Slightly more reproducible build and not as scary.
13 lines
673 B
Plaintext
13 lines
673 B
Plaintext
flash_att_commit := ceee0de88c037ee6eda5e75c813a8648e4bcb1c9
|
|
|
|
build-flash-attention:
|
|
if [ ! -d 'flash-attention' ]; then \
|
|
pip install -U packaging ninja --no-cache-dir && \
|
|
git clone https://github.com/Narsil/flash-attention.git; \
|
|
fi
|
|
cd flash-attention && git fetch && git checkout $(flash_att_commit) && \
|
|
MAX_JOBS=8 python setup.py build && cd csrc/layer_norm && python setup.py build && cd ../rotary && python setup.py build
|
|
|
|
install-flash-attention: build-flash-attention
|
|
cd flash-attention && git checkout $(flash_att_commit) && MAX_JOBS=8 python setup.py install && cd csrc/layer_norm && python setup.py install && cd ../rotary && python setup.py install
|