mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-10 20:04:52 +00:00
Local gptq support.
This commit is contained in:
parent
3ef5ffbc64
commit
f29e3d7d34
@ -1,3 +1,4 @@
|
|||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List, Dict, Optional, Tuple
|
from typing import List, Dict, Optional, Tuple
|
||||||
from safetensors import safe_open, SafetensorError
|
from safetensors import safe_open, SafetensorError
|
||||||
@ -221,8 +222,12 @@ class Weights:
|
|||||||
return bits, groupsize
|
return bits, groupsize
|
||||||
|
|
||||||
def _set_gptq_params(self, model_id):
|
def _set_gptq_params(self, model_id):
|
||||||
|
filename = "quantize_config.json"
|
||||||
try:
|
try:
|
||||||
filename = hf_hub_download(model_id, filename="quantize_config.json")
|
if not os.path.exists(os.path.join(model_id, filename)):
|
||||||
|
filename = os.path.join(model_id, filename)
|
||||||
|
else:
|
||||||
|
filename = hf_hub_download(model_id, filename=filename)
|
||||||
with open(filename, "r") as f:
|
with open(filename, "r") as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
self.gptq_bits = data["bits"]
|
self.gptq_bits = data["bits"]
|
||||||
|
Loading…
Reference in New Issue
Block a user