diff --git a/server/text_generation_server/models/vlm_causal_lm.py b/server/text_generation_server/models/vlm_causal_lm.py index 37664230..f0db89b2 100644 --- a/server/text_generation_server/models/vlm_causal_lm.py +++ b/server/text_generation_server/models/vlm_causal_lm.py @@ -173,10 +173,7 @@ class VlmCausalLMBatch(FlashMistralBatch): image_id = 0 for chunk in chunks: if chunk["type"] == "text": - if config.model_type == "paligemma": - full_text += "" + chunk["content"] + "\n" - else: - full_text += chunk["content"] + full_text += chunk["content"] elif chunk["type"] == "image": image = chunk["content"] # Should never receive URLs anymore, processing should be done