2024-04-08 16:06:21 +00:00
|
|
|
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company.
|
|
|
|
|
2022-10-28 17:24:00 +00:00
|
|
|
import torch
|
|
|
|
|
2023-06-08 12:51:52 +00:00
|
|
|
from typing import Optional, Type
|
2022-10-28 17:24:00 +00:00
|
|
|
|
2023-12-05 10:12:16 +00:00
|
|
|
from transformers import PreTrainedTokenizerBase
|
2022-10-28 17:24:00 +00:00
|
|
|
|
2023-03-07 17:52:22 +00:00
|
|
|
from text_generation_server.models import CausalLM
|
|
|
|
from text_generation_server.models.causal_lm import CausalLMBatch
|
|
|
|
from text_generation_server.pb import generate_pb2
|
2022-10-28 17:24:00 +00:00
|
|
|
|
|
|
|
|
2022-12-08 17:49:33 +00:00
|
|
|
class BloomCausalLMBatch(CausalLMBatch):
|
|
|
|
@classmethod
|
|
|
|
def from_pb(
|
2023-01-20 11:24:39 +00:00
|
|
|
cls,
|
|
|
|
pb: generate_pb2.Batch,
|
|
|
|
tokenizer: PreTrainedTokenizerBase,
|
2023-05-26 10:30:27 +00:00
|
|
|
dtype: torch.dtype,
|
2023-01-20 11:24:39 +00:00
|
|
|
device: torch.device,
|
2022-12-08 17:49:33 +00:00
|
|
|
) -> "CausalLMBatch":
|
2023-12-05 10:12:16 +00:00
|
|
|
batch = super().from_pb(
|
|
|
|
pb=pb,
|
|
|
|
tokenizer=tokenizer,
|
|
|
|
dtype=dtype,
|
|
|
|
device=device,
|
|
|
|
)
|
2022-12-08 17:49:33 +00:00
|
|
|
batch.keys_head_dim_last = False
|
|
|
|
return batch
|
|
|
|
|
|
|
|
|
2023-12-05 10:12:16 +00:00
|
|
|
class BLOOM(CausalLM):
|
2023-01-31 17:53:56 +00:00
|
|
|
def __init__(
|
2023-05-12 12:46:41 +00:00
|
|
|
self,
|
|
|
|
model_id: str,
|
|
|
|
revision: Optional[str] = None,
|
2023-06-30 18:30:09 +00:00
|
|
|
dtype: Optional[torch.dtype] = None,
|
2023-01-31 17:53:56 +00:00
|
|
|
):
|
2023-12-05 10:12:16 +00:00
|
|
|
super(BLOOM, self).__init__(
|
|
|
|
model_id=model_id,
|
2023-05-23 18:40:39 +00:00
|
|
|
revision=revision,
|
2023-04-21 13:36:29 +00:00
|
|
|
dtype=dtype,
|
2022-11-04 17:03:04 +00:00
|
|
|
)
|
2022-10-28 17:24:00 +00:00
|
|
|
|
2023-06-08 12:51:52 +00:00
|
|
|
@property
|
|
|
|
def batch_type(self) -> Type[CausalLMBatch]:
|
|
|
|
return BloomCausalLMBatch
|