fix: fix quant linear autotune

This commit is contained in:
OlivierDehaene 2023-12-14 16:45:47 +01:00 committed by Karol Damaszke
parent 28fcdcca6d
commit b3c2d7291e

View File

@ -88,7 +88,7 @@ class Autotuner(triton.KernelInterface):
# In testings using only 40 reps seems to be close enough and it appears to be what PyTorch uses # In testings using only 40 reps seems to be close enough and it appears to be what PyTorch uses
# PyTorch also sets fast_flush to True, but I didn't see any speedup so I'll leave the default # PyTorch also sets fast_flush to True, but I didn't see any speedup so I'll leave the default
return triton.testing.do_bench( return triton.testing.do_bench(
kernel_call, percentiles=(0.5, 0.2, 0.8), rep=40 kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40
) )
except triton.OutOfResources: except triton.OutOfResources:
return (float("inf"), float("inf"), float("inf")) return (float("inf"), float("inf"), float("inf"))