From 535ce23827e2aa38ecb34fe0fcd16b5170701133 Mon Sep 17 00:00:00 2001 From: kaixuanliu Date: Mon, 12 May 2025 15:58:43 +0800 Subject: [PATCH] Adjust the `round_up_seq` logic in Gaudi backend (#3224) Signed-off-by: Liu, Kaixuan --- .../gaudi/server/text_generation_server/models/causal_lm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backends/gaudi/server/text_generation_server/models/causal_lm.py b/backends/gaudi/server/text_generation_server/models/causal_lm.py index b501d488..dd6e070d 100644 --- a/backends/gaudi/server/text_generation_server/models/causal_lm.py +++ b/backends/gaudi/server/text_generation_server/models/causal_lm.py @@ -73,7 +73,7 @@ def torch_compile_for_eager(func): def round_up_seq(number, k, base): - exponent = math.ceil(math.log(number / k, base)) + exponent = max(0, math.ceil(math.log(number / k, base))) return int(k * (base**exponent))