fix: adjust batch_tokenized_inputs output in mllama

This commit is contained in:
drbh 2024-12-13 15:51:06 +00:00
parent 2ae152a188
commit 5c7bc91a2f

View File

@ -161,7 +161,7 @@ class MllamaCausalLMBatch(VlmCausalLMBatch):
dtype: torch.dtype,
device: torch.device,
) -> "VlmCausalLMBatch":
batch_tokenized_inputs, image_inputs = cls.batch_tokenized_inputs(
batch_tokenized_inputs, image_inputs, _video_inputs = cls.batch_tokenized_inputs(
pb.requests, tokenizer, processor, config
)
batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)