Fix GPTQ autotune data type to be compatible with Torch 2.4.0

This commit is contained in:
Daniël de Kok 2024-07-25 09:39:42 +00:00
parent 26614057a7
commit fa9221f28d

View File

@ -91,7 +91,7 @@ class Autotuner(triton.KernelInterface):
kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40 kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40
) )
except triton.OutOfResources: except triton.OutOfResources:
return (float("inf"), float("inf"), float("inf")) return [float("inf"), float("inf"), float("inf")]
def run(self, *args, **kwargs): def run(self, *args, **kwargs):
self.nargs = dict(zip(self.arg_names, args)) self.nargs = dict(zip(self.arg_names, args))