diff --git a/server/text_generation_server/models/flash_causal_lm.py b/server/text_generation_server/models/flash_causal_lm.py index e68a2100..21ed4f6c 100644 --- a/server/text_generation_server/models/flash_causal_lm.py +++ b/server/text_generation_server/models/flash_causal_lm.py @@ -777,7 +777,7 @@ class FlashCausalLM(Model): self.device, ) - if os.getenv("ENABLE_CUDA_GRAPHS", "false") == "true": + if os.getenv("ENABLE_CUDA_GRAPHS", "False") == "True": try: # Warmup cuda graphs for all power of twos until 64 for i in range(6):