mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-23 07:52:06 +00:00
* Add support for repacking AWQ weights for GPTQ-Marlin So far we couldn't support AWQ because virtually all AWQ models use symmetric quantization, which GPTQ-Marlin did not suppors. GPTQ-Marlin has recently added support AWQ repacking and AWQ asymmetric quantization (zero_point=True). This change updates all GPTQ-Marlin kernels from upstream and wires up AWQ support. For now enabling AWQ using Marlin requires running TGI with `--quantize gptq`. * Enable Marlin for supported AWQ configurations by default This makes the AWQ -> GPTQ repack test redundant, since we are now testing this with the regular AWQ test.
25 lines
757 B
Python
25 lines
757 B
Python
from setuptools import setup
|
|
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
|
|
|
extra_compile_args = []
|
|
|
|
setup(
|
|
name="marlin_kernels",
|
|
ext_modules=[
|
|
CUDAExtension(
|
|
name="marlin_kernels",
|
|
sources=[
|
|
"marlin_kernels/awq_marlin_repack.cu",
|
|
"marlin_kernels/fp8_marlin.cu",
|
|
"marlin_kernels/gptq_marlin.cu",
|
|
"marlin_kernels/gptq_marlin_repack.cu",
|
|
"marlin_kernels/marlin_cuda_kernel.cu",
|
|
"marlin_kernels/sparse/marlin_24_cuda_kernel.cu",
|
|
"marlin_kernels/ext.cpp",
|
|
],
|
|
extra_compile_args=extra_compile_args,
|
|
),
|
|
],
|
|
cmdclass={"build_ext": BuildExtension},
|
|
)
|