mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
* Use Hub kernels for Marlin and cutlass quantization kernels * Use hub kernels for MoE/GPTQ-Marlin MoE * Use attention kernels from the Hub * Cache the kernels in the Docker image * Update moe kernels * Support loading local kernels for development * Support latest moe kernels * Update to moe 0.1.1 * CI: download locked kernels for server tests * Fixup some imports * CI: activate venv * Fix unused imports * Nix: add attention/moe/quantization kernels * Update hf-kernels to 0.1.5 * Update kernels * Update tgi-nix flake for hf-kernels * Fix EOF * Take `load_kernel` out of a frequently-called function * Hoist another case of kernel loading out of a somewhat hot function * marlin-kernels -> quantization * attention -> paged-attention * EOF fix * Update hf-kernels, fixup Docker * ipex fix * Remove outdated TODO
23 lines
593 B
Python
23 lines
593 B
Python
import importlib
|
|
|
|
from loguru import logger
|
|
from hf_kernels import load_kernel as hf_load_kernel
|
|
|
|
from text_generation_server.utils.log import log_once
|
|
|
|
|
|
def load_kernel(*, module: str, repo_id: str):
|
|
"""
|
|
Load a kernel. First try to load it as the given module (e.g. for
|
|
local development), falling back to a locked Hub kernel.
|
|
"""
|
|
try:
|
|
m = importlib.import_module(module)
|
|
log_once(logger.info, f"Using local module for `{module}`")
|
|
return m
|
|
except ModuleNotFoundError:
|
|
return hf_load_kernel(repo_id=repo_id)
|
|
|
|
|
|
__all__ = ["load_kernel"]
|