mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-20 14:22:08 +00:00
Tested with ``` CUDA_VISIBLE_DEVICES=0 text-generation-launcher --model-id TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq EXLLAMA_VERSION=1 CUDA_VISIBLE_DEVICES=0 text-generation-launcher --model-id TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq CUDA_VISIBLE_DEVICES="0,1" text-generation-launcher --model-id TheBloke/Llama-2-7B-Chat-GPTQ --quantize gptq ``` all with good and identical results on MI210. --------- Co-authored-by: Felix Marty <felix@hf.co> Co-authored-by: OlivierDehaene <olivier@huggingface.co> Co-authored-by: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com>
29 lines
703 B
Python
29 lines
703 B
Python
from setuptools import setup
|
|
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
|
import torch
|
|
|
|
extra_cuda_cflags = ["-lineinfo", "-O3"]
|
|
|
|
if torch.version.hip:
|
|
extra_cuda_cflags += ["-DHIPBLAS_USE_HIP_HALF"]
|
|
|
|
extra_compile_args = {
|
|
"nvcc": extra_cuda_cflags,
|
|
}
|
|
|
|
setup(
|
|
name="exllamav2_kernels",
|
|
ext_modules=[
|
|
CUDAExtension(
|
|
name="exllamav2_kernels",
|
|
sources=[
|
|
"exllamav2_kernels/ext.cpp",
|
|
"exllamav2_kernels/cuda/q_matrix.cu",
|
|
"exllamav2_kernels/cuda/q_gemm.cu",
|
|
],
|
|
extra_compile_args=extra_compile_args,
|
|
)
|
|
],
|
|
cmdclass={"build_ext": BuildExtension},
|
|
)
|