From 92ddb41d95ecc561067ecb05aaed8823e62dfaba Mon Sep 17 00:00:00 2001 From: "R. P. Ruiz" <42214371+deepily@users.noreply.github.com> Date: Tue, 9 Jan 2024 10:19:31 -0500 Subject: [PATCH] Fix missing make target platform for local install: 'install-flash-attention-v2' (#1414) --- server/text_generation_server/utils/flash_attn.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py index 02f01e65..48f8ef70 100644 --- a/server/text_generation_server/utils/flash_attn.py +++ b/server/text_generation_server/utils/flash_attn.py @@ -23,10 +23,15 @@ try: try: import flash_attn_2_cuda except ImportError: + architecture_suffix = "" + if IS_CUDA_SYSTEM: + architecture_suffix = "-cuda" + elif IS_ROCM_SYSTEM: + architecture_suffix = "-rocm" raise ImportError( "Flash Attention V2 is not installed.\n" "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) " - "or install flash attention v2 with `cd server && make install install-flash-attention-v2`" + f"or install flash attention v2 with `cd server && make install install-flash-attention-v2{architecture_suffix}`" ) if not (is_sm8x or is_sm90): raise ImportError(