2024-04-08 16:06:21 +00:00
|
|
|
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
|
|
|
|
|
2022-10-18 13:19:03 +00:00
|
|
|
import os
|
2024-01-17 08:57:03 +00:00
|
|
|
import psutil
|
|
|
|
import signal
|
2023-01-05 11:01:23 +00:00
|
|
|
import sys
|
2022-10-17 12:59:00 +00:00
|
|
|
import typer
|
|
|
|
|
|
|
|
from pathlib import Path
|
2023-01-05 11:01:23 +00:00
|
|
|
from loguru import logger
|
2023-01-31 17:53:56 +00:00
|
|
|
from typing import Optional
|
2023-05-12 12:46:41 +00:00
|
|
|
from enum import Enum
|
2023-12-11 11:46:30 +00:00
|
|
|
from huggingface_hub import hf_hub_download
|
2022-10-17 12:59:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
app = typer.Typer()
|
|
|
|
|
|
|
|
|
2023-05-12 12:46:41 +00:00
|
|
|
class Quantization(str, Enum):
|
|
|
|
bitsandbytes = "bitsandbytes"
|
|
|
|
gptq = "gptq"
|
|
|
|
|
|
|
|
|
2023-06-30 18:30:09 +00:00
|
|
|
class Dtype(str, Enum):
|
|
|
|
float16 = "float16"
|
|
|
|
bloat16 = "bfloat16"
|
|
|
|
|
|
|
|
|
2022-10-17 12:59:00 +00:00
|
|
|
@app.command()
|
2022-10-18 13:19:03 +00:00
|
|
|
def serve(
|
2023-02-03 11:43:37 +00:00
|
|
|
model_id: str,
|
2023-01-31 17:53:56 +00:00
|
|
|
revision: Optional[str] = None,
|
2022-10-18 13:19:03 +00:00
|
|
|
sharded: bool = False,
|
2023-05-12 12:46:41 +00:00
|
|
|
quantize: Optional[Quantization] = None,
|
2023-12-11 11:46:30 +00:00
|
|
|
speculate: Optional[int] = None,
|
2023-06-30 18:30:09 +00:00
|
|
|
dtype: Optional[Dtype] = None,
|
2023-05-23 18:40:39 +00:00
|
|
|
trust_remote_code: bool = False,
|
2023-03-30 13:26:27 +00:00
|
|
|
uds_path: Path = "/tmp/text-generation-server",
|
2023-01-05 11:01:23 +00:00
|
|
|
logger_level: str = "INFO",
|
|
|
|
json_output: bool = False,
|
2023-02-13 12:02:45 +00:00
|
|
|
otlp_endpoint: Optional[str] = None,
|
2022-10-17 12:59:00 +00:00
|
|
|
):
|
2022-10-18 13:19:03 +00:00
|
|
|
if sharded:
|
2023-12-11 11:46:30 +00:00
|
|
|
assert (
|
|
|
|
os.getenv("WORLD_SIZE", None) is not None
|
|
|
|
), "WORLD_SIZE must be set when sharded is True"
|
|
|
|
assert (
|
|
|
|
os.getenv("MASTER_ADDR", None) is not None
|
|
|
|
), "MASTER_ADDR must be set when sharded is True"
|
|
|
|
assert (
|
|
|
|
os.getenv("MASTER_PORT", None) is not None
|
|
|
|
), "MASTER_PORT must be set when sharded is True"
|
2022-10-18 13:19:03 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
# Remove default handler
|
|
|
|
logger.remove()
|
|
|
|
logger.add(
|
|
|
|
sys.stdout,
|
|
|
|
format="{message}",
|
2023-03-07 17:52:22 +00:00
|
|
|
filter="text_generation_server",
|
2023-02-13 12:02:45 +00:00
|
|
|
level=logger_level,
|
|
|
|
serialize=json_output,
|
|
|
|
backtrace=True,
|
|
|
|
diagnose=False,
|
|
|
|
)
|
2023-04-16 22:26:47 +00:00
|
|
|
|
|
|
|
# Import here after the logger is added to log potential import exceptions
|
|
|
|
from text_generation_server import server
|
|
|
|
from text_generation_server.tracing import setup_tracing
|
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
# Setup OpenTelemetry distributed tracing
|
|
|
|
if otlp_endpoint is not None:
|
|
|
|
setup_tracing(shard=os.getenv("RANK", 0), otlp_endpoint=otlp_endpoint)
|
|
|
|
|
2023-05-12 12:46:41 +00:00
|
|
|
# Downgrade enum into str for easier management later on
|
|
|
|
quantize = None if quantize is None else quantize.value
|
2023-12-05 10:12:16 +00:00
|
|
|
dtype = "bfloat16" if dtype is None else dtype.value
|
|
|
|
|
|
|
|
logger.info("CLI SHARDED = {} DTYPE = {}".format(sharded, dtype))
|
|
|
|
|
|
|
|
if sharded:
|
|
|
|
tgi_file = Path(__file__).resolve().parent / "tgi_service.py"
|
|
|
|
num_shard = int(os.getenv("WORLD_SIZE", "1"))
|
|
|
|
logger.info("CLI SHARDED = {}".format(num_shard))
|
|
|
|
import subprocess
|
|
|
|
|
2023-12-11 11:46:30 +00:00
|
|
|
cmd = f"deepspeed --num_nodes 1 --num_gpus {num_shard} --no_local_rank {tgi_file}"
|
|
|
|
cmd += f" --model_id {model_id} --revision {revision} --sharded {sharded}"
|
|
|
|
cmd += f" --dtype {dtype} --trust_remote_code {trust_remote_code} --uds_path {uds_path}"
|
|
|
|
if speculate is not None:
|
|
|
|
cmd += f"--speculate {speculate}"
|
2023-12-05 10:12:16 +00:00
|
|
|
logger.info("CLI server start deepspeed ={} ".format(cmd))
|
|
|
|
sys.stdout.flush()
|
|
|
|
sys.stderr.flush()
|
|
|
|
with subprocess.Popen(cmd, shell=True, executable="/bin/bash") as proc:
|
2024-01-17 08:57:03 +00:00
|
|
|
do_terminate = False
|
|
|
|
current_handler = signal.getsignal(signal.SIGTERM)
|
|
|
|
def terminate_handler(sig, frame):
|
|
|
|
nonlocal do_terminate
|
|
|
|
do_terminate = True
|
|
|
|
if callable(current_handler):
|
|
|
|
current_handler(sig, frame)
|
|
|
|
|
|
|
|
signal.signal(signal.SIGTERM, terminate_handler)
|
|
|
|
|
|
|
|
finished = False
|
|
|
|
while not finished:
|
|
|
|
try:
|
|
|
|
if do_terminate:
|
|
|
|
parent = psutil.Process(proc.pid)
|
|
|
|
all_procs = parent.children(recursive=True) + [parent]
|
|
|
|
for p in all_procs:
|
|
|
|
try:
|
|
|
|
p.terminate()
|
|
|
|
except psutil.NoSuchProcess:
|
|
|
|
pass
|
|
|
|
_, alive = psutil.wait_procs(all_procs, timeout=30)
|
|
|
|
for p in alive:
|
|
|
|
p.kill()
|
|
|
|
|
|
|
|
do_terminate = False
|
|
|
|
|
|
|
|
proc.wait(timeout=3)
|
|
|
|
except subprocess.TimeoutExpired:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
finished = True
|
|
|
|
|
2023-12-05 10:12:16 +00:00
|
|
|
sys.stdout.flush()
|
|
|
|
sys.stderr.flush()
|
|
|
|
if proc.returncode != 0:
|
|
|
|
logger.error(f"{cmd} exited with status = {proc.returncode}")
|
|
|
|
return proc.returncode
|
|
|
|
else:
|
2023-12-11 11:46:30 +00:00
|
|
|
server.serve(
|
2023-12-11 13:49:52 +00:00
|
|
|
model_id,
|
|
|
|
revision,
|
|
|
|
sharded,
|
|
|
|
speculate,
|
|
|
|
dtype,
|
|
|
|
trust_remote_code,
|
|
|
|
uds_path
|
2023-12-11 11:46:30 +00:00
|
|
|
)
|
2022-10-17 12:59:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
@app.command()
|
2022-10-22 18:00:15 +00:00
|
|
|
def download_weights(
|
2023-02-03 11:43:37 +00:00
|
|
|
model_id: str,
|
2023-01-31 17:53:56 +00:00
|
|
|
revision: Optional[str] = None,
|
2022-10-28 17:24:00 +00:00
|
|
|
extension: str = ".safetensors",
|
2023-05-03 09:36:24 +00:00
|
|
|
auto_convert: bool = True,
|
2023-02-14 12:02:16 +00:00
|
|
|
logger_level: str = "INFO",
|
|
|
|
json_output: bool = False,
|
2024-02-19 06:53:24 +00:00
|
|
|
trust_remote_code: bool = False,
|
2022-10-17 12:59:00 +00:00
|
|
|
):
|
2023-02-14 12:02:16 +00:00
|
|
|
# Remove default handler
|
|
|
|
logger.remove()
|
|
|
|
logger.add(
|
|
|
|
sys.stdout,
|
|
|
|
format="{message}",
|
2023-03-07 17:52:22 +00:00
|
|
|
filter="text_generation_server",
|
2023-02-14 12:02:16 +00:00
|
|
|
level=logger_level,
|
|
|
|
serialize=json_output,
|
|
|
|
backtrace=True,
|
|
|
|
diagnose=False,
|
|
|
|
)
|
|
|
|
|
2023-04-16 22:26:47 +00:00
|
|
|
# Import here after the logger is added to log potential import exceptions
|
|
|
|
from text_generation_server import utils
|
|
|
|
|
2023-02-14 12:02:16 +00:00
|
|
|
# Test if files were already download
|
|
|
|
try:
|
|
|
|
utils.weight_files(model_id, revision, extension)
|
2023-05-03 09:36:24 +00:00
|
|
|
logger.info("Files are already present on the host. " "Skipping download.")
|
2023-02-14 12:02:16 +00:00
|
|
|
return
|
|
|
|
# Local files not found
|
2023-12-11 11:46:30 +00:00
|
|
|
except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError):
|
2023-02-14 12:02:16 +00:00
|
|
|
pass
|
|
|
|
|
2023-05-03 09:36:24 +00:00
|
|
|
is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv(
|
|
|
|
"WEIGHTS_CACHE_OVERRIDE", None
|
|
|
|
) is not None
|
|
|
|
|
|
|
|
if not is_local_model:
|
2023-12-11 11:46:30 +00:00
|
|
|
try:
|
|
|
|
adapter_config_filename = hf_hub_download(
|
|
|
|
model_id, revision=revision, filename="adapter_config.json"
|
|
|
|
)
|
|
|
|
utils.download_and_unload_peft(
|
|
|
|
model_id, revision, trust_remote_code=trust_remote_code
|
|
|
|
)
|
|
|
|
is_local_model = True
|
|
|
|
utils.weight_files(model_id, revision, extension)
|
|
|
|
return
|
|
|
|
except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
try:
|
|
|
|
import json
|
2023-12-11 13:49:52 +00:00
|
|
|
|
|
|
|
medusa_head = hf_hub_download(
|
2024-02-26 18:49:28 +00:00
|
|
|
model_id, revision=revision, filename="medusa_lm_head.safetensors"
|
2023-12-11 13:49:52 +00:00
|
|
|
)
|
|
|
|
medusa_config = hf_hub_download(
|
|
|
|
model_id, revision=revision, filename="config.json"
|
|
|
|
)
|
2023-12-11 11:46:30 +00:00
|
|
|
with open(medusa_config, "r") as f:
|
|
|
|
config = json.load(f)
|
|
|
|
|
|
|
|
model_id = config["base_model_name_or_path"]
|
|
|
|
revision = "main"
|
|
|
|
try:
|
|
|
|
utils.weight_files(model_id, revision, extension)
|
2023-12-11 13:49:52 +00:00
|
|
|
logger.info(
|
|
|
|
f"Files for parent {model_id} are already present on the host. "
|
|
|
|
"Skipping download."
|
|
|
|
)
|
2023-12-11 11:46:30 +00:00
|
|
|
return
|
|
|
|
# Local files not found
|
2023-12-11 13:49:52 +00:00
|
|
|
except (
|
|
|
|
utils.LocalEntryNotFoundError,
|
|
|
|
FileNotFoundError,
|
|
|
|
utils.EntryNotFoundError,
|
|
|
|
):
|
2023-12-11 11:46:30 +00:00
|
|
|
pass
|
|
|
|
except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
|
|
|
|
pass
|
|
|
|
|
2023-05-03 09:36:24 +00:00
|
|
|
# Try to download weights from the hub
|
|
|
|
try:
|
|
|
|
filenames = utils.weight_hub_files(model_id, revision, extension)
|
|
|
|
utils.download_weights(filenames, model_id, revision)
|
|
|
|
# Successfully downloaded weights
|
|
|
|
return
|
|
|
|
|
|
|
|
# No weights found on the hub with this extension
|
|
|
|
except utils.EntryNotFoundError as e:
|
|
|
|
# Check if we want to automatically convert to safetensors or if we can use .bin weights instead
|
|
|
|
if not extension == ".safetensors" or not auto_convert:
|
|
|
|
raise e
|
|
|
|
|
2024-02-26 18:49:28 +00:00
|
|
|
elif (Path(model_id) / "medusa_lm_head.safetensors").exists():
|
2024-01-10 17:36:20 +00:00
|
|
|
# Try to load as a local Medusa model
|
|
|
|
try:
|
|
|
|
import json
|
|
|
|
|
2024-02-26 18:49:28 +00:00
|
|
|
medusa_head = Path(model_id) / "medusa_lm_head.safetensors"
|
2024-01-10 17:36:20 +00:00
|
|
|
medusa_config = Path(model_id) / "config.json"
|
|
|
|
with open(medusa_config, "r") as f:
|
|
|
|
config = json.load(f)
|
|
|
|
|
|
|
|
model_id = config["base_model_name_or_path"]
|
|
|
|
revision = "main"
|
|
|
|
try:
|
|
|
|
utils.weight_files(model_id, revision, extension)
|
|
|
|
logger.info(
|
|
|
|
f"Files for parent {model_id} are already present on the host. "
|
|
|
|
"Skipping download."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
# Local files not found
|
|
|
|
except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
|
|
|
|
pass
|
|
|
|
except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
|
|
|
|
pass
|
2024-01-26 18:04:57 +00:00
|
|
|
|
2024-01-09 14:21:00 +00:00
|
|
|
elif (Path(model_id) / "adapter_config.json").exists():
|
Load PEFT weights from local directory (#1260)
# What does this PR do?
Enables PEFT weights to be loaded from a local directory, as opposed to
a hf hub repository. It is a continuation of the work in PR
https://github.com/huggingface/text-generation-inference/pull/762
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes #1259
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [x] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section? **Yes but I don't know how to run the tests for
this repo, and it doesn't look like this code is covered anyway**
- [x] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case. **Yes, @Narsil asked for a PR in [this
comment](https://github.com/huggingface/text-generation-inference/pull/762#issuecomment-1728089505)**
- [x] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
**I didn't see any documentation added to the [original
PR](https://github.com/huggingface/text-generation-inference/pull/762),
and am not sure where this belongs. Let me know and I can add some**
- [x] Did you write any new necessary tests? **I didn't see any existing
test coverage for this python module**
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
@Narsil
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@Narsil
-->
---------
Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
2023-11-23 11:56:17 +00:00
|
|
|
# Try to load as a local PEFT model
|
|
|
|
try:
|
|
|
|
utils.download_and_unload_peft(
|
|
|
|
model_id, revision, trust_remote_code=trust_remote_code
|
|
|
|
)
|
|
|
|
utils.weight_files(model_id, revision, extension)
|
|
|
|
return
|
|
|
|
except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
|
|
|
|
pass
|
|
|
|
|
2023-05-03 09:36:24 +00:00
|
|
|
# Try to see if there are local pytorch weights
|
2023-02-14 12:02:16 +00:00
|
|
|
try:
|
2023-05-03 09:36:24 +00:00
|
|
|
# Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE
|
|
|
|
local_pt_files = utils.weight_files(model_id, revision, ".bin")
|
2023-02-14 12:02:16 +00:00
|
|
|
|
2023-05-03 09:36:24 +00:00
|
|
|
# No local pytorch weights
|
|
|
|
except utils.LocalEntryNotFoundError:
|
|
|
|
if extension == ".safetensors":
|
|
|
|
logger.warning(
|
|
|
|
f"No safetensors weights found for model {model_id} at revision {revision}. "
|
|
|
|
f"Downloading PyTorch weights."
|
|
|
|
)
|
2023-02-14 12:02:16 +00:00
|
|
|
|
2023-05-03 09:36:24 +00:00
|
|
|
# Try to see if there are pytorch weights on the hub
|
2023-02-14 12:02:16 +00:00
|
|
|
pt_filenames = utils.weight_hub_files(model_id, revision, ".bin")
|
|
|
|
# Download pytorch weights
|
|
|
|
local_pt_files = utils.download_weights(pt_filenames, model_id, revision)
|
2023-05-03 09:36:24 +00:00
|
|
|
|
|
|
|
if auto_convert:
|
2024-04-05 11:32:53 +00:00
|
|
|
if not trust_remote_code:
|
|
|
|
logger.warning(
|
|
|
|
f"🚨🚨BREAKING CHANGE in 2.0🚨🚨: Safetensors conversion is disabled without `--trust-remote-code` because "
|
|
|
|
f"Pickle files are unsafe and can essentially contain remote code execution!"
|
|
|
|
f"Please check for more information here: https://huggingface.co/docs/text-generation-inference/basic_tutorials/safety",
|
|
|
|
)
|
|
|
|
|
2023-05-03 09:36:24 +00:00
|
|
|
logger.warning(
|
|
|
|
f"No safetensors weights found for model {model_id} at revision {revision}. "
|
|
|
|
f"Converting PyTorch weights to safetensors."
|
|
|
|
)
|
|
|
|
|
|
|
|
# Safetensors final filenames
|
2023-12-05 10:12:16 +00:00
|
|
|
local_st_files = [p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files]
|
2023-07-07 12:50:12 +00:00
|
|
|
try:
|
|
|
|
import transformers
|
2023-12-05 10:12:16 +00:00
|
|
|
from transformers import AutoConfig
|
|
|
|
|
|
|
|
config = AutoConfig.from_pretrained(
|
|
|
|
model_id,
|
|
|
|
revision=revision,
|
|
|
|
)
|
|
|
|
architecture = config.architectures[0]
|
2023-07-07 12:50:12 +00:00
|
|
|
|
|
|
|
class_ = getattr(transformers, architecture)
|
|
|
|
|
|
|
|
# Name for this varible depends on transformers version.
|
|
|
|
discard_names = getattr(class_, "_tied_weights_keys", [])
|
2023-12-05 10:12:16 +00:00
|
|
|
discard_names.extend(getattr(class_, "_keys_to_ignore_on_load_missing", []))
|
2023-07-07 12:50:12 +00:00
|
|
|
|
2023-12-05 10:12:16 +00:00
|
|
|
except Exception:
|
2023-07-07 12:50:12 +00:00
|
|
|
discard_names = []
|
2023-02-14 12:02:16 +00:00
|
|
|
# Convert pytorch weights to safetensors
|
2023-07-07 12:50:12 +00:00
|
|
|
utils.convert_files(local_pt_files, local_st_files, discard_names)
|
2022-10-17 12:59:00 +00:00
|
|
|
|
|
|
|
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 10:27:01 +00:00
|
|
|
@app.command()
|
|
|
|
def quantize(
|
|
|
|
model_id: str,
|
|
|
|
output_dir: str,
|
|
|
|
revision: Optional[str] = None,
|
|
|
|
logger_level: str = "INFO",
|
|
|
|
json_output: bool = False,
|
|
|
|
trust_remote_code: bool = False,
|
|
|
|
upload_to_model_id: Optional[str] = None,
|
|
|
|
percdamp: float = 0.01,
|
|
|
|
act_order: bool = False,
|
|
|
|
):
|
|
|
|
download_weights(
|
|
|
|
model_id=model_id,
|
|
|
|
revision=revision,
|
|
|
|
logger_level=logger_level,
|
|
|
|
json_output=json_output,
|
|
|
|
)
|
|
|
|
from text_generation_server.utils.gptq.quantize import quantize
|
|
|
|
|
|
|
|
quantize(
|
|
|
|
model_id=model_id,
|
|
|
|
bits=4,
|
|
|
|
groupsize=128,
|
|
|
|
output_dir=output_dir,
|
|
|
|
trust_remote_code=trust_remote_code,
|
|
|
|
upload_to_model_id=upload_to_model_id,
|
|
|
|
percdamp=percdamp,
|
|
|
|
act_order=act_order,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-10-17 12:59:00 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
app()
|