mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 00:12:08 +00:00
* Add support for repacking AWQ weights for GPTQ-Marlin So far we couldn't support AWQ because virtually all AWQ models use symmetric quantization, which GPTQ-Marlin did not suppors. GPTQ-Marlin has recently added support AWQ repacking and AWQ asymmetric quantization (zero_point=True). This change updates all GPTQ-Marlin kernels from upstream and wires up AWQ support. For now enabling AWQ using Marlin requires running TGI with `--quantize gptq`. * Enable Marlin for supported AWQ configurations by default This makes the AWQ -> GPTQ repack test redundant, since we are now testing this with the regular AWQ test.
17 lines
621 B
C++
17 lines
621 B
C++
#include <torch/extension.h>
|
|
|
|
#include "ext.hh"
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|
m.def("awq_marlin_repack", &awq_marlin_repack,
|
|
"Repack AWQ parameters for Marlin");
|
|
m.def("gptq_marlin_gemm", &gptq_marlin_gemm,
|
|
"Marlin gemm with GPTQ compatibility");
|
|
m.def("gptq_marlin_24_gemm", &gptq_marlin_24_gemm, "Marlin sparse 2:4 gemm");
|
|
m.def("gptq_marlin_repack", &gptq_marlin_repack,
|
|
"Repack GPTQ parameters for Marlin");
|
|
m.def("marlin_gemm", &marlin_gemm, "Marlin gemm");
|
|
// fp8_marlin Optimized Quantized GEMM for FP8 weight-only.
|
|
m.def("fp8_marlin_gemm", &fp8_marlin_gemm);
|
|
}
|