mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 00:12:08 +00:00
* Remove vLLM dependency for CUDA This change adds `attention-kernels` as a dependency for paged attention and cache reshaping. With that, we don't use vLLM anywhere for CUDA. Tested run (since we don't have paged attention in CI): ``` ❯ ATTENTION=paged python -m pytest integration-tests -k "llama and awq" --release [...] 5 snapshots passed. ``` * Fix clippy warning
14 lines
480 B
Plaintext
14 lines
480 B
Plaintext
commit_rocm := 4e0929e6e4fa0a3d09d358715c288020ea9dc247
|
|
|
|
build-vllm-rocm:
|
|
if [ ! -d 'vllm' ]; then \
|
|
pip install -U ninja packaging --no-cache-dir && \
|
|
git clone https://github.com/mht-sharma/vllm.git vllm; \
|
|
fi
|
|
cd vllm && git fetch && git checkout $(commit_rocm) && \
|
|
PYTORCH_ROCM_ARCH="gfx90a;gfx942" python setup.py build
|
|
|
|
install-vllm-rocm: build-vllm-rocm
|
|
cd vllm && git fetch && git checkout $(commit_rocm) && \
|
|
PYTORCH_ROCM_ARCH="gfx90a;gfx942" pip install -e .
|