From 5b649d67c4ce9814732c3bc26eb180332ccaf3d5 Mon Sep 17 00:00:00 2001 From: drbh Date: Thu, 1 Aug 2024 16:17:29 +0000 Subject: [PATCH] fix: improve condtional and error message --- server/text_generation_server/layers/attention/cuda.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/layers/attention/cuda.py b/server/text_generation_server/layers/attention/cuda.py index f86ce1f4..c84cc7da 100644 --- a/server/text_generation_server/layers/attention/cuda.py +++ b/server/text_generation_server/layers/attention/cuda.py @@ -171,8 +171,9 @@ def paged_attention( try: - if major <= 8: - raise ImportError("Flash Attention V2 requires CUDA 11.0 or higher") + is_ampere_or_newer = major >= 8 and minor >= 0 + if not is_ampere_or_newer: + raise ImportError("FlashAttention only supports Ampere GPUs or newer.") import flash_attn_2_cuda