text-generation-inference/server/text_generation_server/models/__init__.py

94 lines
3.0 KiB
Python
Raw Normal View History

2023-03-24 13:02:14 +00:00
import os
2023-01-20 11:24:39 +00:00
import torch
2023-03-24 13:02:14 +00:00
from loguru import logger
2023-01-31 17:53:56 +00:00
from transformers import AutoConfig
from typing import Optional
2023-03-07 17:52:22 +00:00
from text_generation_server.models.model import Model
from text_generation_server.models.causal_lm import CausalLM
from text_generation_server.models.bloom import BLOOM, BLOOMSharded
from text_generation_server.models.seq2seq_lm import Seq2SeqLM
from text_generation_server.models.galactica import Galactica, GalacticaSharded
from text_generation_server.models.santacoder import SantaCoder
from text_generation_server.models.gpt_neox import GPTNeoxSharded
2023-03-07 17:52:22 +00:00
from text_generation_server.models.t5 import T5Sharded
2023-01-20 11:24:39 +00:00
2023-03-24 13:02:14 +00:00
try:
from text_generation_server.models.flash_neox import FlashNeoX, FlashNeoXSharded
FLASH_NEOX = torch.cuda.is_available() and int(os.environ.get("FLASH_NEOX", 0)) == 1
except ImportError:
if int(os.environ.get("FLASH_NEOX", 0)) == 1:
logger.exception("Could not import FlashNeoX")
FLASH_NEOX = False
2023-01-20 11:24:39 +00:00
__all__ = [
"Model",
"BLOOM",
"BLOOMSharded",
"CausalLM",
2023-02-07 17:25:17 +00:00
"Galactica",
"GalacticaSharded",
"GPTNeoxSharded",
2023-01-20 11:24:39 +00:00
"Seq2SeqLM",
"SantaCoder",
2023-02-07 17:25:17 +00:00
"T5Sharded",
2023-01-20 11:24:39 +00:00
"get_model",
]
2023-03-24 13:02:14 +00:00
if FLASH_NEOX:
__all__.append(FlashNeoX)
__all__.append(FlashNeoXSharded)
2023-01-20 11:24:39 +00:00
# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
2023-01-20 11:24:39 +00:00
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
# Disable gradients
torch.set_grad_enabled(False)
2023-01-31 17:53:56 +00:00
def get_model(
model_id: str, revision: Optional[str], sharded: bool, quantize: bool
2023-01-31 17:53:56 +00:00
) -> Model:
if "facebook/galactica" in model_id:
2023-02-14 12:02:16 +00:00
if sharded:
return GalacticaSharded(model_id, revision, quantize=quantize)
else:
return Galactica(model_id, revision, quantize=quantize)
if "santacoder" in model_id:
return SantaCoder(model_id, revision, quantize)
config = AutoConfig.from_pretrained(model_id, revision=revision)
2023-01-31 17:53:56 +00:00
if config.model_type == "bloom":
if sharded:
return BLOOMSharded(model_id, revision, quantize=quantize)
2023-01-31 17:53:56 +00:00
else:
return BLOOM(model_id, revision, quantize=quantize)
2023-02-14 12:02:16 +00:00
if config.model_type == "gpt_neox":
if sharded:
2023-03-24 13:02:14 +00:00
neox_cls = FlashNeoXSharded if FLASH_NEOX else GPTNeoxSharded
return neox_cls(model_id, revision, quantize=quantize)
else:
2023-03-24 13:02:14 +00:00
neox_cls = FlashNeoX if FLASH_NEOX else CausalLM
return neox_cls(model_id, revision, quantize=quantize)
2023-02-14 12:02:16 +00:00
if config.model_type == "t5":
2023-02-07 17:25:17 +00:00
if sharded:
return T5Sharded(model_id, revision, quantize=quantize)
else:
return Seq2SeqLM(model_id, revision, quantize=quantize)
2023-02-14 12:02:16 +00:00
if sharded:
raise ValueError("sharded is not supported for AutoModel")
try:
return CausalLM(model_id, revision, quantize=quantize)
except Exception:
return Seq2SeqLM(model_id, revision, quantize=quantize)