mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 14:52:20 +00:00
This change adds support for 2:4 sparsity when using Marlin quantization. The 2:4 kernel is used when: * The quantizer is `marlin`; * the quantizer checkpoint format is `marlin_24`. Fixes #2098.
13 lines
419 B
C++
13 lines
419 B
C++
#include <torch/extension.h>
|
|
|
|
#include "ext.hh"
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|
m.def("gptq_marlin_gemm", &gptq_marlin_gemm,
|
|
"Marlin gemm with GPTQ compatibility");
|
|
m.def("gptq_marlin_24_gemm", &gptq_marlin_24_gemm, "Marlin sparse 2:4 gemm");
|
|
m.def("gptq_marlin_repack", &gptq_marlin_repack,
|
|
"Repack GPTQ parameters for Marlin");
|
|
m.def("marlin_gemm", &marlin_gemm, "Marlin gemm");
|
|
}
|