From 6abcab843d840de5f43ca0a127d32ca5dbfb1022 Mon Sep 17 00:00:00 2001 From: "Wang, Yi A" Date: Tue, 6 Aug 2024 22:00:36 -0700 Subject: [PATCH] fix in regression in ipex flashattention Signed-off-by: Wang, Yi A --- server/text_generation_server/layers/attention/ipex.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/layers/attention/ipex.py b/server/text_generation_server/layers/attention/ipex.py index 33e0a15d..d7cf780a 100644 --- a/server/text_generation_server/layers/attention/ipex.py +++ b/server/text_generation_server/layers/attention/ipex.py @@ -21,7 +21,7 @@ def attention( out = torch.empty_like(q) # We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load. - return ipex.llm.functional.varlen_attention( + ipex.llm.functional.varlen_attention( q, k, v, @@ -38,6 +38,8 @@ def attention( None, ) + return out + def reshape_and_cache( key: torch.Tensor,