mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
* add CPU tgi support Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> * ipex distributed ops support Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> --------- Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> Co-authored-by: Funtowicz Morgan <mfuntowicz@users.noreply.github.com>
14 lines
611 B
Python
14 lines
611 B
Python
from text_generation_server.utils.import_utils import SYSTEM, IPEX_AVAIL
|
|
import os
|
|
|
|
if os.getenv("USE_FLASH_ATTENTION", "").lower() == "false":
|
|
raise ImportError("`USE_FLASH_ATTENTION` is false.")
|
|
if SYSTEM == "cuda":
|
|
from .cuda import attention, paged_attention, reshape_and_cache, SUPPORTS_WINDOWING
|
|
elif SYSTEM == "rocm":
|
|
from .rocm import attention, paged_attention, reshape_and_cache, SUPPORTS_WINDOWING
|
|
elif IPEX_AVAIL:
|
|
from .xpu import attention, paged_attention, reshape_and_cache, SUPPORTS_WINDOWING
|
|
else:
|
|
raise ImportError(f"System {SYSTEM} doesn't support flash/paged attention")
|