mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-20 06:12:07 +00:00
* Making prefix/flashinfer the default and testing the full release tests. * Include flashinfer in the docker. * Using prebuilt. * Allowing window_left_size (dummy version). * Disabling flashinfer/prefix caching on odd head_dim * Disable prefix caching for lora. * More specific codes. * Update lock * Updating integration tests with new values with FI/FD. Remove paged as a default too, and using FD everywhere. * Update cargo lock ? * Upgrade to 1.80 because of bitstream... * Everywhere 1.80 * Forgot last default place. * Apply suggestions from code review Co-authored-by: drbh <david.richard.holtz@gmail.com> * Updated flake lock * Tmp * Upgrade resolution system for less errors in resolution. * Remove lambda for cleaner function. * Handling debugger. * OVerride the env in server tests. * Is this enough to make it work ? * This seems to be working. * Downgrade some logs. * Fixing the default for vlm. * Don't enable prefix caching on VLM just yet. * Change `add_special_tokens` in order to have the correct tokens for chat input and not (since it's super important with the prefixing now) * Fixing prefix caching for flashdecoding. * Update all models. * Fixed flashinfer version. * add_special_tokens is internal only * Fixing seqlen with the new vlms. * Fixing the issue with `add_special_tokens` not being passed around. * Fixing the test. * Removing encoder_decoder (seq2seq). * Update the chat test. * Fixing the batching tokenization in flash causal lm. * Truncating left for radix purposes. * Oops this doesn't belong here. * Put back default pure shell. * Update server tests - Default to throughput test in k6 - Use TGI_WIGGLE_ROOM to adjust wiggle room * Only n_heads / process_group.size() are necessary. * Revert the integrationt tests change (seem linked to head_size modification). * Adding error message when assert is violated. * Fixing the free algorithm to handle times where the common prefix is smaller. * Apply suggestions from code review Co-authored-by: OlivierDehaene <olivier@huggingface.co> * Update server/text_generation_server/layers/attention/common.py Co-authored-by: OlivierDehaene <olivier@huggingface.co> * Fix disabling prefix caching - Fix windowing checks. * Revert the Cohere tokenizer change (for now using a revision instead). * Fmt. --------- Co-authored-by: drbh <david.richard.holtz@gmail.com> Co-authored-by: OlivierDehaene <olivier@huggingface.co>
64 lines
1.9 KiB
Python
64 lines
1.9 KiB
Python
import torch
|
|
import os
|
|
from loguru import logger
|
|
from typing import Dict, Optional
|
|
|
|
from text_generation_server.utils.log import log_master
|
|
|
|
PREFIX_CACHING = os.getenv("USE_PREFIX_CACHING").lower() in {"1", "true"}
|
|
log_master(logger.info, f"Using prefix caching = {PREFIX_CACHING}")
|
|
ATTENTION = os.getenv("ATTENTION")
|
|
_expected = {"paged", "flashdecoding", "flashinfer"}
|
|
assert (
|
|
ATTENTION in _expected
|
|
), f"Attention is not valid {ATTENTION}, expected {_expected}"
|
|
log_master(logger.info, f"Using Attention = {ATTENTION}")
|
|
|
|
if PREFIX_CACHING and ATTENTION not in {"flashinfer", "flashdecoding"}:
|
|
raise RuntimeError("Prefix caching is only supported with flashinfer")
|
|
|
|
MEM_POOL = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None
|
|
TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.95"))
|
|
assert TGI_WIGGLE_ROOM > 0
|
|
assert TGI_WIGGLE_ROOM < 1
|
|
|
|
# This is overridden by the cli
|
|
BLOCK_SIZE: int
|
|
if ATTENTION == "flashdecoding":
|
|
BLOCK_SIZE = 256
|
|
elif ATTENTION == "flashinfer":
|
|
BLOCK_SIZE = 1
|
|
else:
|
|
BLOCK_SIZE = 16
|
|
|
|
cuda_graphs = os.getenv("CUDA_GRAPHS")
|
|
if cuda_graphs is not None:
|
|
try:
|
|
cuda_graphs = [int(item) for item in cuda_graphs.split(",")]
|
|
except Exception as e:
|
|
raise RuntimeError(
|
|
f"Could not parse cuda graphs {cuda_graphs}, expected comma separated list for batch sizes to run on: {e}"
|
|
)
|
|
else:
|
|
cuda_graphs = None
|
|
# sorting the cuda graphs in descending order helps reduce the
|
|
# memory impact and results in less memory usage
|
|
if cuda_graphs is not None:
|
|
cuda_graphs.sort(reverse=True)
|
|
|
|
CUDA_GRAPHS = cuda_graphs
|
|
|
|
# NOTE: eventually we should move this into the router and pass back the
|
|
# index in all cases.
|
|
ADAPTER_TO_INDEX: Optional[Dict[str, int]] = None
|
|
|
|
|
|
def set_adapter_to_index(adapter_to_index: Dict[str, int]):
|
|
global ADAPTER_TO_INDEX
|
|
ADAPTER_TO_INDEX = adapter_to_index
|
|
|
|
|
|
def get_adapter_to_index():
|
|
global ADAPTER_TO_INDEX
|
|
return ADAPTER_TO_INDEX
|