From 2d4ae09074a4f83f9594a791c476d79f927e7ad2 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 4 Oct 2023 15:50:56 +0000 Subject: [PATCH] Fixing GPTQ exllama kernel usage. --- server/text_generation_server/utils/weights.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index 4bae8cc0..2f330d9c 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -212,7 +212,9 @@ class Weights: g_idx = None bits, groupsize = self._get_gptq_params() - weight = (qweight, qzeros, scales, g_idx, bits, groupsize, False) + from text_generation_server.utils.layers import HAS_EXLLAMA + use_exllama = bits==4 and HAS_EXLLAMA and quantize == "gptq" + weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) else: w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes] weight = torch.cat(w, dim=dim)