This commit is contained in:
deepily 2024-01-05 17:29:35 -05:00
parent 630800eed3
commit 716fe00d92

View File

@ -23,10 +23,15 @@ try:
try: try:
import flash_attn_2_cuda import flash_attn_2_cuda
except ImportError: except ImportError:
architecture_suffix = ""
if IS_CUDA_SYSTEM:
architecture_suffix = "-cuda"
elif IS_ROCM_SYSTEM:
architecture_suffix = "-rocm"
raise ImportError( raise ImportError(
"Flash Attention V2 is not installed.\n" "Flash Attention V2 is not installed.\n"
"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
"or install flash attention v2 with `cd server && make install install-flash-attention-v2`" f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`"
) )
if not (is_sm8x or is_sm90): if not (is_sm8x or is_sm90):
raise ImportError( raise ImportError(