text-generation-inference/server/text_generation_server/models/bloom.py

47 lines
1.3 KiB
Python
Raw Normal View History

import torch
import torch.distributed
from typing import Optional, Type
from transformers import (
PreTrainedTokenizerBase,
)
2023-03-07 17:52:22 +00:00
from text_generation_server.models import CausalLM
from text_generation_server.models.causal_lm import CausalLMBatch
from text_generation_server.pb import generate_pb2
2022-12-08 17:49:33 +00:00
class BloomCausalLMBatch(CausalLMBatch):
@classmethod
def from_pb(
2023-01-20 11:24:39 +00:00
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
2023-01-20 11:24:39 +00:00
device: torch.device,
2022-12-08 17:49:33 +00:00
) -> "CausalLMBatch":
batch = super().from_pb(pb=pb, tokenizer=tokenizer, dtype=dtype, device=device)
2022-12-08 17:49:33 +00:00
batch.keys_head_dim_last = False
return batch
class BLOOMSharded(CausalLM):
@property
def batch_type(self) -> Type[CausalLMBatch]:
return BloomCausalLMBatch
def forward(
self, input_ids, attention_mask, position_ids, past_key_values: Optional = None
):
outputs, speculative_logits = self.model.forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=True,
)
logits = outputs.logits
return logits, speculative_logits, outputs.past_key_values