Put more wiggle room.

This commit is contained in:
Nicolas Patry 2025-04-24 14:48:47 +02:00
parent 375802948d
commit 90989a4a04
No known key found for this signature in database
GPG Key ID: 690EBED7128A9DC8

View File

@ -15,9 +15,9 @@ PREFIX_CACHING = os.environ["PREFIX_CACHING"].lower() in {
PREFILL_CHUNKING = os.getenv("PREFILL_CHUNKING", "1").lower() in {"1", "true"}
log_master(logger.info, f"Using prefix caching = {PREFIX_CACHING}")
_expected = {"paged", "flashdecoding", "flashdecoding-ipex", "flashinfer"}
assert (
ATTENTION in _expected
), f"Attention is not valid {ATTENTION}, expected {_expected}"
assert ATTENTION in _expected, (
f"Attention is not valid {ATTENTION}, expected {_expected}"
)
log_master(logger.info, f"Using Attention = {ATTENTION}")
if PREFIX_CACHING and ATTENTION not in {
@ -28,7 +28,8 @@ if PREFIX_CACHING and ATTENTION not in {
raise RuntimeError("Prefix caching is only supported with flashinfer")
MEM_POOL = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None
TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.93"))
# Test a 70B model on 4xA100 under load for latest failure
TGI_WIGGLE_ROOM = float(os.getenv("TGI_WIGGLE_ROOM", "0.90"))
assert TGI_WIGGLE_ROOM > 0
assert TGI_WIGGLE_ROOM < 1