This commit is contained in:
OlivierDehaene 2023-05-04 19:37:12 +02:00
parent 5d5a2de96c
commit f6df8db680
2 changed files with 133 additions and 86 deletions

View File

@ -484,6 +484,7 @@ class FlashSantacoderModel(nn.Module):
input_ids, input_ids,
position_ids, position_ids,
cu_seqlens, cu_seqlens,
cu_seqlens_q,
max_s, max_s,
past_key_values: Optional[torch.Tensor] = None, past_key_values: Optional[torch.Tensor] = None,
pre_allocate_past_size: Optional[int] = None, pre_allocate_past_size: Optional[int] = None,
@ -507,15 +508,11 @@ class FlashSantacoderModel(nn.Module):
) )
) )
layer_past_present_indices = None layer_past_present_indices = None
cu_seqlens_q = None
slice_past_index = len(hidden_states) slice_past_index = len(hidden_states)
# Decode # Decode
else: else:
# Create indices from cumulative sequence lengths # Create indices from cumulative sequence lengths
layer_past_present_indices = cu_seqlens[1:] - 1 layer_past_present_indices = cu_seqlens[1:] - 1
cu_seqlens_q = torch.arange(
cu_seqlens.shape[0], dtype=torch.int32, device=hidden_states.device
)
slice_past_index = None slice_past_index = None
residual = None residual = None
@ -566,6 +563,7 @@ class FlashSantacoderForCausalLM(nn.Module):
input_ids, input_ids,
position_ids, position_ids,
cu_seqlens, cu_seqlens,
cu_seqlens_q,
max_s, max_s,
past_key_values: Optional[torch.Tensor] = None, past_key_values: Optional[torch.Tensor] = None,
pre_allocate_past_size: Optional[int] = None, pre_allocate_past_size: Optional[int] = None,
@ -574,6 +572,7 @@ class FlashSantacoderForCausalLM(nn.Module):
input_ids, input_ids,
position_ids, position_ids,
cu_seqlens, cu_seqlens,
cu_seqlens_q,
max_s, max_s,
past_key_values, past_key_values,
pre_allocate_past_size, pre_allocate_past_size,

View File

@ -39,6 +39,7 @@ class FlashCausalLMBatch(Batch):
position_ids: torch.Tensor position_ids: torch.Tensor
# cumulative sequence lengths # cumulative sequence lengths
cu_seqlens: torch.Tensor cu_seqlens: torch.Tensor
cu_seqlens_q: Optional[torch.Tensor]
max_seqlen: int max_seqlen: int
past_key_values: Optional[torch.Tensor] past_key_values: Optional[torch.Tensor]
@ -68,10 +69,10 @@ class FlashCausalLMBatch(Batch):
@classmethod @classmethod
def from_pb( def from_pb(
cls, cls,
pb: generate_pb2.Batch, pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase, tokenizer: PreTrainedTokenizerBase,
device: torch.device, device: torch.device,
) -> "FlashCausalLMBatch": ) -> "FlashCausalLMBatch":
position_ids = [] position_ids = []
cu_seqlens = [0] cu_seqlens = [0]
@ -127,11 +128,13 @@ class FlashCausalLMBatch(Batch):
cumulative_length += input_length cumulative_length += input_length
max_tokens += input_length + max_new_tokens max_tokens += input_length + max_new_tokens
input_ids = torch.tensor(np.concatenate(all_input_ids), dtype=torch.int32, device=device) input_ids = torch.tensor(
position_ids = torch.tensor(np.concatenate(position_ids), dtype=torch.int32, device=device) np.concatenate(all_input_ids), dtype=torch.int32, device=device
cu_seqlens = torch.tensor(
cu_seqlens, device=device, dtype=torch.int32
) )
position_ids = torch.tensor(
np.concatenate(position_ids), dtype=torch.int32, device=device
)
cu_seqlens = torch.tensor(cu_seqlens, device=device, dtype=torch.int32)
return cls( return cls(
batch_id=pb.id, batch_id=pb.id,
@ -140,6 +143,7 @@ class FlashCausalLMBatch(Batch):
input_ids=input_ids, input_ids=input_ids,
position_ids=position_ids, position_ids=position_ids,
cu_seqlens=cu_seqlens, cu_seqlens=cu_seqlens,
cu_seqlens_q=None,
max_seqlen=max_seqlen, max_seqlen=max_seqlen,
past_key_values=None, past_key_values=None,
input_lengths=input_lengths, input_lengths=input_lengths,
@ -218,7 +222,7 @@ class FlashCausalLMBatch(Batch):
cumulative_length += request_input_length cumulative_length += request_input_length
max_tokens += request_input_length + ( max_tokens += request_input_length + (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
) )
if single_request: if single_request:
@ -354,12 +358,12 @@ class FlashCausalLMBatch(Batch):
class FlashCausalLM(Model): class FlashCausalLM(Model):
def __init__( def __init__(
self, self,
model_cls: Type[PreTrainedModel], model_cls: Type[PreTrainedModel],
model_id: str, model_id: str,
revision: Optional[str] = None, revision: Optional[str] = None,
quantize: bool = False, quantize: bool = False,
decode_buffer: int = 3, decode_buffer: int = 3,
): ):
if torch.cuda.is_available(): if torch.cuda.is_available():
device = torch.device("cuda") device = torch.device("cuda")
@ -399,19 +403,21 @@ class FlashCausalLM(Model):
) )
def forward( def forward(
self, self,
input_ids: torch.Tensor, input_ids: torch.Tensor,
position_ids: torch.Tensor, position_ids: torch.Tensor,
cu_seqlens: torch.Tensor, cu_seqlens: torch.Tensor,
max_s: int, cu_seqlens_q: Optional[torch.Tensor],
past_key_values: Optional = None, max_s: int,
pre_allocate_past_size: Optional[int] = None, past_key_values: Optional = None,
pre_allocate_past_size: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]: ) -> Tuple[torch.Tensor, torch.Tensor]:
# Model Forward # Model Forward
return self.model.forward( return self.model.forward(
input_ids=input_ids, input_ids=input_ids,
position_ids=position_ids, position_ids=position_ids,
cu_seqlens=cu_seqlens, cu_seqlens=cu_seqlens,
cu_seqlens_q=cu_seqlens_q,
max_s=max_s, max_s=max_s,
past_key_values=past_key_values, past_key_values=past_key_values,
pre_allocate_past_size=pre_allocate_past_size, pre_allocate_past_size=pre_allocate_past_size,
@ -419,16 +425,16 @@ class FlashCausalLM(Model):
@tracer.start_as_current_span("generate_token") @tracer.start_as_current_span("generate_token")
def generate_token( def generate_token(
self, batch: FlashCausalLMBatch self, batch: FlashCausalLMBatch
) -> Tuple[List[Generation], Optional[FlashCausalLMBatch]]: ) -> Tuple[List[Generation], Optional[FlashCausalLMBatch]]:
# Shortcut when batch_size == 1 prefill = batch.past_key_values is None
# if prefill and bs == 1 # Shortcut when batch_size == 1
if batch.past_key_values is None and len(batch) == 1: if prefill and len(batch) == 1:
# Ask to pre-allocate kv to its max size # Ask to pre-allocate kv to its max size
# == number of tokens + max_new_tokens # == number of tokens + max_new_tokens
pre_allocate_past_size = ( pre_allocate_past_size = (
batch.input_lengths[0] + batch.stopping_criterias[0].max_new_tokens batch.input_lengths[0] + batch.stopping_criterias[0].max_new_tokens
) )
else: else:
pre_allocate_past_size = None pre_allocate_past_size = None
@ -437,11 +443,23 @@ class FlashCausalLM(Model):
batch.input_ids, batch.input_ids,
batch.position_ids, batch.position_ids,
batch.cu_seqlens, batch.cu_seqlens,
batch.cu_seqlens_q,
batch.max_seqlen, batch.max_seqlen,
batch.past_key_values, batch.past_key_values,
pre_allocate_past_size, pre_allocate_past_size,
) )
if prefill:
# Compute logprobs for the whole batch
prefill_logprobs_tensor = torch.log_softmax(out, -1)
else:
prefill_logprobs_tensor = None
# Used to slice next batch past
past_indices = []
prefill_logprobs = []
next_token_logprobs = []
# Cumulative length # Cumulative length
cumulative_length = 0 cumulative_length = 0
@ -451,28 +469,18 @@ class FlashCausalLM(Model):
# Zipped iterator # Zipped iterator
iterator = zip( iterator = zip(
batch.requests,
batch.input_lengths, batch.input_lengths,
batch.offsets,
batch.token_offsets,
batch.next_token_choosers, batch.next_token_choosers,
batch.stopping_criterias, batch.stopping_criterias,
batch.all_input_ids, batch.all_input_ids,
) )
past_indices = []
prefill = batch.past_key_values is None
# For each member of the batch # For each member of the batch
for i, ( for i, (
request, input_length,
input_length, next_token_chooser,
offset, stopping_criteria,
token_offset, all_input_ids,
next_token_chooser,
stopping_criteria,
all_input_ids,
) in enumerate(iterator): ) in enumerate(iterator):
# Indexing metadata # Indexing metadata
start_index = cumulative_length start_index = cumulative_length
@ -481,21 +489,34 @@ class FlashCausalLM(Model):
if prefill: if prefill:
# Prefill mode # Prefill mode
# out is of shape [cumulative_sequence_lengths, vocab_size] # out is of shape [cumulative_sequence_lengths, vocab_size]
logits = out[start_index:end_index] # only take last token logit
batch.all_input_ids_tensor.append( logits = out[end_index - 1 : end_index]
F.pad(batch.input_ids[start_index:end_index], (0, stopping_criteria.max_new_tokens)) all_input_ids_tensor = F.pad(
batch.input_ids[start_index:end_index],
(0, stopping_criteria.max_new_tokens),
) )
batch.all_input_ids_tensor.append(all_input_ids_tensor)
batch.position_ids[i] = input_length batch.position_ids[i] = input_length
prefill_logprobs.append(
prefill_logprobs_tensor[start_index:end_index]
.gather(
1,
all_input_ids_tensor[1:input_length]
.unsqueeze(1)
.to(torch.int64),
)
.squeeze(1)[:-1]
)
else: else:
# Decode mode # Decode mode
# out is of shape [batch_size, vocab_size] # out is of shape [batch_size, vocab_size]
logits = out[i].unsqueeze(0) logits = out[i].unsqueeze(0)
all_input_ids_tensor = batch.all_input_ids_tensor[i] all_input_ids_tensor = batch.all_input_ids_tensor[i]
# Select next token # Select next token
next_token_id, logprobs = next_token_chooser( next_token_id, logprob = next_token_chooser(
all_input_ids_tensor[None, :input_length], logits all_input_ids_tensor[None, :input_length], logits
) )
next_token_id_squeezed = next_token_id.squeeze() next_token_id_squeezed = next_token_id.squeeze()
@ -503,27 +524,49 @@ class FlashCausalLM(Model):
past_indices.extend([j for j in range(start_index + i, end_index + i)]) past_indices.extend([j for j in range(start_index + i, end_index + i)])
batch.input_ids[i] = next_token_id_squeezed batch.input_ids[i] = next_token_id_squeezed
next_token_logprobs.append(logprob[-1, next_token_id])
cumulative_length += input_length
if prefill: if prefill:
batch.input_ids = batch.input_ids[:len(batch)] batch.input_ids = batch.input_ids[: len(batch)]
batch.position_ids = batch.position_ids[:len(batch)] batch.position_ids = batch.position_ids[: len(batch)]
batch.cu_seqlens_q = torch.arange(
0, len(batch) + 1, device=self.device, dtype=torch.int32
)
else: else:
batch.position_ids += 1 batch.position_ids += 1
# Initialize past_key_values in prefill # Initialize past_key_values in prefill
if batch.past_key_values is None and len(batch) == 1: if prefill and len(batch) == 1:
# present is already pre-padded # present is already pre-padded
batch.past_key_values = present batch.past_key_values = present
batch.cu_seqlens = batch.cu_seqlens + batch.cu_seqlens_q
if len(batch) > 1: if len(batch) > 1:
prefill_logprobs = torch.cat(prefill_logprobs) if prefill else None
next_token_logprobs = torch.cat(next_token_logprobs)
batch.past_key_values = present.new_empty( batch.past_key_values = present.new_empty(
(present.shape[0], present.shape[1] + len(batch.requests), *present.shape[2:])) (
present.shape[0],
present.shape[1] + len(batch.requests),
*present.shape[2:],
)
)
batch.past_key_values[:, past_indices] = present batch.past_key_values[:, past_indices] = present
batch.cu_seqlens = batch.cu_seqlens + torch.arange(0, len(batch) + 1, device=self.device, dtype=torch.int32) prefill_logprobs = prefill_logprobs.to("cpu") if prefill else None
next_token_logprobs = next_token_logprobs.to("cpu")
else:
prefill_logprobs = prefill_logprobs[0] if prefill else None
next_token_logprobs = next_token_logprobs[0]
next_token_ids = batch.input_ids.to("cpu").detach() next_token_ids = batch.input_ids.to("cpu")
prefill_logprobs_cumulative_length = 0
# Zipped iterator # Zipped iterator
iterator = zip( iterator = zip(
@ -535,26 +578,29 @@ class FlashCausalLM(Model):
batch.stopping_criterias, batch.stopping_criterias,
batch.all_input_ids, batch.all_input_ids,
batch.all_input_ids_tensor, batch.all_input_ids_tensor,
next_token_ids,
next_token_logprobs,
) )
# For each member of the batch # For each member of the batch
for i, ( for i, (
request, request,
input_length, input_length,
offset, offset,
token_offset, token_offset,
next_token_chooser, next_token_chooser,
stopping_criteria, stopping_criteria,
all_input_ids, all_input_ids,
all_input_ids_tensor, all_input_ids_tensor,
next_token_id,
next_token_logprob,
) in enumerate(iterator): ) in enumerate(iterator):
next_token_id_item = next_token_ids[i] next_token_id_item = next_token_id.item()
# Append next token to all tokens # Append next token to all tokens
all_input_ids.append(next_token_id_item) all_input_ids.append(next_token_id_item)
# Generated token # Generated token
next_token_logprob = 0.0
next_token_text, offset, token_offset = self.decode_token( next_token_text, offset, token_offset = self.decode_token(
all_input_ids, all_input_ids,
offset, offset,
@ -570,7 +616,7 @@ class FlashCausalLM(Model):
if stop: if stop:
# Decode generated tokens # Decode generated tokens
output_text = self.decode( output_text = self.decode(
all_input_ids[-stopping_criteria.current_tokens:] all_input_ids[-stopping_criteria.current_tokens :]
) )
# Get seed # Get seed
if isinstance(next_token_chooser.choice, Sampling): if isinstance(next_token_chooser.choice, Sampling):
@ -585,36 +631,38 @@ class FlashCausalLM(Model):
stopped = False stopped = False
generated_text = None generated_text = None
# # Prefill # Prefill
# if prefill: if prefill:
# # Remove generated token to only have prefill and add nan for first prompt token start_index = prefill_logprobs_cumulative_length
# prefill_logprobs = [float("nan")] + logprobs.gather( end_index = prefill_logprobs_cumulative_length + input_length - 1
# 1, all_input_ids_tensor[1:input_length].unsqueeze(1)
# ).squeeze(1)[:-1].tolist() # Remove generated token to only have prefill and add nan for first prompt token
# prefill_token_ids = all_input_ids[:-1] request_prefill_logprobs = [float("nan")] + prefill_logprobs[start_index:end_index].tolist()
# prefill_texts = self.tokenizer.batch_decode( prefill_token_ids = all_input_ids[:-1]
# prefill_token_ids, prefill_texts = self.tokenizer.batch_decode(
# clean_up_tokenization_spaces=False, prefill_token_ids,
# skip_special_tokens=False, clean_up_tokenization_spaces=False,
# ) skip_special_tokens=False,
# prefill_tokens = PrefillTokens( )
# prefill_token_ids, prefill_logprobs, prefill_texts prefill_tokens = PrefillTokens(
# ) prefill_token_ids, request_prefill_logprobs, prefill_texts
# else: )
prefill_tokens = None
prefill_logprobs_cumulative_length += input_length - 1
else:
prefill_tokens = None
generation = Generation( generation = Generation(
request.id, request.id,
prefill_tokens, prefill_tokens,
next_token_id_item, next_token_id_item,
next_token_logprob, next_token_logprob.item(),
next_token_text, next_token_text,
next_token_id_item in self.all_special_ids, next_token_id_item in self.all_special_ids,
generated_text, generated_text,
) )
generations.append(generation) generations.append(generation)
cumulative_length += input_length
new_input_length = input_length + 1 new_input_length = input_length + 1
# Update values # Update values