mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-09 19:34:53 +00:00
adjust the round_up_seq
logit to align with prefill warmup phase on HPU
Signed-off-by: Liu, Kaixuan <kaixuan.liu@intel.com>
This commit is contained in:
parent
c94f415af4
commit
c264a42aa1
@ -73,7 +73,7 @@ def torch_compile_for_eager(func):
|
|||||||
|
|
||||||
|
|
||||||
def round_up_seq(number, k, base):
|
def round_up_seq(number, k, base):
|
||||||
exponent = math.ceil(math.log(number / k, base))
|
exponent = max(0, math.ceil(math.log(number / k, base)))
|
||||||
return int(k * (base**exponent))
|
return int(k * (base**exponent))
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user