Small fix.

This commit is contained in:
Nicolas Patry 2023-07-21 10:20:01 +00:00
parent c07ee68b60
commit 95583ee257
2 changed files with 3 additions and 1 deletions

View File

@ -63,7 +63,7 @@ class FlashSantacoderSharded(FlashCausalLM):
aliases={"transformer.wte.weight": ["lm_head.weight"]}, aliases={"transformer.wte.weight": ["lm_head.weight"]},
) )
if config.quantize == "gptq": if config.quantize == "gptq":
weights.set_gptq_params(model_id) weights._set_gptq_params(model_id)
model = FlashSantacoderForCausalLM(config, weights) model = FlashSantacoderForCausalLM(config, weights)

View File

@ -3,6 +3,8 @@ from typing import List, Dict, Optional, Tuple
from safetensors import safe_open, SafetensorError from safetensors import safe_open, SafetensorError
import torch import torch
from loguru import logger from loguru import logger
from huggingface_hub import hf_hub_download
import json
class Weights: class Weights: