mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-05-15 14:42:11 +00:00
fix: adjust batch_tokenized_inputs output in mllama
This commit is contained in:
parent
2ae152a188
commit
5c7bc91a2f
@ -161,7 +161,7 @@ class MllamaCausalLMBatch(VlmCausalLMBatch):
|
||||
dtype: torch.dtype,
|
||||
device: torch.device,
|
||||
) -> "VlmCausalLMBatch":
|
||||
batch_tokenized_inputs, image_inputs = cls.batch_tokenized_inputs(
|
||||
batch_tokenized_inputs, image_inputs, _video_inputs = cls.batch_tokenized_inputs(
|
||||
pb.requests, tokenizer, processor, config
|
||||
)
|
||||
batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)
|
||||
|
Loading…
Reference in New Issue
Block a user