mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
Add support for GPTQ Marlin kernels GPTQ Marlin extends the Marlin kernels to support common GPTQ configurations: - bits: 4 or 8 - groupsize: -1, 32, 64, or 128 - desc_act: true/false Using the GPTQ Marlin kernels requires repacking the parameters in the Marlin quantizer format. The kernels were contributed by Neural Magic to VLLM. We vendor them here for convenience.
22 lines
588 B
Python
22 lines
588 B
Python
from setuptools import setup
|
|
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
|
|
|
extra_compile_args = []
|
|
|
|
setup(
|
|
name="marlin_kernels",
|
|
ext_modules=[
|
|
CUDAExtension(
|
|
name="marlin_kernels",
|
|
sources=[
|
|
"marlin_kernels/gptq_marlin.cu",
|
|
"marlin_kernels/gptq_marlin_repack.cu",
|
|
"marlin_kernels/marlin_cuda_kernel.cu",
|
|
"marlin_kernels/ext.cpp",
|
|
],
|
|
extra_compile_args=extra_compile_args,
|
|
),
|
|
],
|
|
cmdclass={"build_ext": BuildExtension},
|
|
)
|