mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 23:12:07 +00:00
Use FP8 GPTQ-Marlin kernels to enable FP8 support on CUDA GPUs with compute capability >=8.0 and <8.9. Co-authored-by: Florian Zimmermeister <flozi00.fz@gmail.com>
15 lines
527 B
C++
15 lines
527 B
C++
#include <torch/extension.h>
|
|
|
|
#include "ext.hh"
|
|
|
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
|
m.def("gptq_marlin_gemm", &gptq_marlin_gemm,
|
|
"Marlin gemm with GPTQ compatibility");
|
|
m.def("gptq_marlin_24_gemm", &gptq_marlin_24_gemm, "Marlin sparse 2:4 gemm");
|
|
m.def("gptq_marlin_repack", &gptq_marlin_repack,
|
|
"Repack GPTQ parameters for Marlin");
|
|
m.def("marlin_gemm", &marlin_gemm, "Marlin gemm");
|
|
// fp8_marlin Optimized Quantized GEMM for FP8 weight-only.
|
|
m.def("fp8_marlin_gemm", &fp8_marlin_gemm);
|
|
}
|