From e55067a475a632a33d0087527ef120a34259a149 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Mon, 23 Sep 2024 15:50:43 +0200 Subject: [PATCH] Cleaner condition. --- server/text_generation_server/models/idefics_causal_lm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/models/idefics_causal_lm.py b/server/text_generation_server/models/idefics_causal_lm.py index a82570db..17cbabf0 100644 --- a/server/text_generation_server/models/idefics_causal_lm.py +++ b/server/text_generation_server/models/idefics_causal_lm.py @@ -225,7 +225,7 @@ class IdeficsCausalLMBatch(Batch): aspect_ratio_ids = None aspect_ratio_mask = None cross_attention_mask = None - else: + elif "cross_attention_mask" in tokenized_inputs: image_attention_mask = None aspect_ratio_ids = tokenized_inputs["aspect_ratio_ids"] aspect_ratio_mask = tokenized_inputs["aspect_ratio_mask"] @@ -235,6 +235,8 @@ class IdeficsCausalLMBatch(Batch): tokenized_inputs["input_ids"] = tokenized_inputs["input_ids"].clamp( max=processor.tokenizer.vocab_size - 1 ) + else: + raise RuntimeError("Unhandled state for idefics/mllama") position_ids = tokenized_inputs["attention_mask"].long().cumsum(-1) - 1 position_ids.masked_fill_(tokenized_inputs["attention_mask"] == 0, 1)