From 5c7bc91a2fc7117752c65d439033cc875eec2558 Mon Sep 17 00:00:00 2001 From: drbh Date: Fri, 13 Dec 2024 15:51:06 +0000 Subject: [PATCH] fix: adjust batch_tokenized_inputs output in mllama --- server/text_generation_server/models/mllama_causal_lm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/models/mllama_causal_lm.py b/server/text_generation_server/models/mllama_causal_lm.py index ce899a48..f212312f 100644 --- a/server/text_generation_server/models/mllama_causal_lm.py +++ b/server/text_generation_server/models/mllama_causal_lm.py @@ -161,7 +161,7 @@ class MllamaCausalLMBatch(VlmCausalLMBatch): dtype: torch.dtype, device: torch.device, ) -> "VlmCausalLMBatch": - batch_tokenized_inputs, image_inputs = cls.batch_tokenized_inputs( + batch_tokenized_inputs, image_inputs, _video_inputs = cls.batch_tokenized_inputs( pb.requests, tokenizer, processor, config ) batch = cls.from_tokenized(pb, tokenizer, batch_tokenized_inputs, dtype, device)