diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py index 1b59fbc5..02f01e65 100644 --- a/server/text_generation_server/utils/flash_attn.py +++ b/server/text_generation_server/utils/flash_attn.py @@ -72,7 +72,7 @@ def attention( softmax_scale, window_size_left=-1, ): - if window_size_left == 0: + if window_size_left <= 0 and window_size_left != -1: raise ValueError("`window_size_left` must be > 0 or -1") if HAS_FLASH_ATTN_V2_CUDA: