From 89a4e723d24fd528f3b3f790da10e9e5c104d141 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Sat, 12 Aug 2023 09:06:49 +0200 Subject: [PATCH] Attempting to fix torch leak. --- server/text_generation_server/models/causal_lm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index cbdf4808..2e7c41d9 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -659,6 +659,7 @@ class CausalLM(Model): # We finished all generations in the batch; there is no next batch if stopped: + torch.cuda.empty_cache() return generations, None # Slice unused values from prefill