mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-20 22:32:07 +00:00
* wip * rollback * refactor to use prefix/postfix namming + fix all_input_ids_tensor * maybe patching vlms? * fix filter and concat * wip, no filter, no concat * current * add prepare_for_prefill * working * load tested * re-create slots * re-create slots * fix slot_filtering_indices * feedback loop * remove log * fix benchmarker * fix vlm and seq2seq * rename to cache and input lengths * fix prefill logprobs * fix launcher * fix logprobs? * idk at this point * max input length * omfg * remove debugging lines * fix tests * fix mllama * fix cargo tests * remove support chunking for paged * Fixing non blocked attentions * Fixing dtype + AMD, Ipex targets. * lint fix. * rename * Fix prefix_caching variable, remove defaults in server (confusing a lot of the times). * Add simple resolution when user specifies ATTENTION=paged. * Put back non default simple tests. * Fix env name --------- Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
25 lines
552 B
Python
25 lines
552 B
Python
from typing import Optional
|
|
|
|
SUPPORT_CHUNKING: Optional[bool] = None
|
|
MAX_PREFILL_TOKENS: Optional[int] = None
|
|
|
|
|
|
def set_support_chunking(support_chunking: bool):
|
|
global SUPPORT_CHUNKING
|
|
SUPPORT_CHUNKING = support_chunking
|
|
|
|
|
|
def get_support_chunking() -> bool:
|
|
global SUPPORT_CHUNKING
|
|
return SUPPORT_CHUNKING
|
|
|
|
|
|
def set_max_prefill_tokens(max_prefill_tokens: int):
|
|
global MAX_PREFILL_TOKENS
|
|
MAX_PREFILL_TOKENS = max_prefill_tokens
|
|
|
|
|
|
def get_max_prefill_tokens() -> int:
|
|
global MAX_PREFILL_TOKENS
|
|
return MAX_PREFILL_TOKENS
|