From 4d28e29236bd987f80b23c8c2b5f94db6280bb2d Mon Sep 17 00:00:00 2001 From: "Wang, Yi A" Date: Tue, 30 Jul 2024 19:28:59 -0700 Subject: [PATCH 1/3] hotfix: fix xpu crash brought by code refine. torch.xpu rely on import ipex Signed-off-by: Wang, Yi A --- server/text_generation_server/utils/import_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/text_generation_server/utils/import_utils.py b/server/text_generation_server/utils/import_utils.py index 7c053014..782b4f15 100644 --- a/server/text_generation_server/utils/import_utils.py +++ b/server/text_generation_server/utils/import_utils.py @@ -56,6 +56,8 @@ elif torch.version.cuda is not None and torch.cuda.is_available(): get_free_memory = get_cuda_free_memory elif is_ipex_available(): SYSTEM = "ipex" + import intel_extension_for_pytorch # noqa: F401 + if hasattr(torch, "xpu") and torch.xpu.is_available(): empty_cache = torch.xpu.empty_cache synchronize = torch.xpu.synchronize From 3d21c8f43a089d4ec3d342927d0f86b361a6822e Mon Sep 17 00:00:00 2001 From: "Wang, Yi A" Date: Tue, 30 Jul 2024 22:20:49 -0700 Subject: [PATCH 2/3] reable gemma2 in xpu Signed-off-by: Wang, Yi A --- server/text_generation_server/layers/attention/ipex.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/text_generation_server/layers/attention/ipex.py b/server/text_generation_server/layers/attention/ipex.py index 45a0a03e..45ed3f45 100644 --- a/server/text_generation_server/layers/attention/ipex.py +++ b/server/text_generation_server/layers/attention/ipex.py @@ -2,6 +2,7 @@ import intel_extension_for_pytorch as ipex import torch from text_generation_server.models.flash_causal_lm import BLOCK_SIZE from text_generation_server.layers.attention import Seqlen +from typing import Optional SUPPORTS_WINDOWING = False @@ -16,6 +17,7 @@ def attention( softmax_scale, window_size_left=-1, causal=True, + softcap: Optional[float] = None, ): # We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load. return ipex.llm.functional.varlen_attention( @@ -58,6 +60,7 @@ def paged_attention( block_tables: torch.Tensor, seqlen: Seqlen, max_s: int, + softcap: Optional[float] = None, ): ipex.llm.modules.PagedAttention.single_query_cached_kv_attention( out, From 6abcab843d840de5f43ca0a127d32ca5dbfb1022 Mon Sep 17 00:00:00 2001 From: "Wang, Yi A" Date: Tue, 6 Aug 2024 22:00:36 -0700 Subject: [PATCH 3/3] fix in regression in ipex flashattention Signed-off-by: Wang, Yi A --- server/text_generation_server/layers/attention/ipex.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/layers/attention/ipex.py b/server/text_generation_server/layers/attention/ipex.py index 33e0a15d..d7cf780a 100644 --- a/server/text_generation_server/layers/attention/ipex.py +++ b/server/text_generation_server/layers/attention/ipex.py @@ -21,7 +21,7 @@ def attention( out = torch.empty_like(q) # We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load. - return ipex.llm.functional.varlen_attention( + ipex.llm.functional.varlen_attention( q, k, v, @@ -38,6 +38,8 @@ def attention( None, ) + return out + def reshape_and_cache( key: torch.Tensor,