diff --git a/server/text_generation_server/models/vlm_causal_lm.py b/server/text_generation_server/models/vlm_causal_lm.py index 6292b25f..ef30738f 100644 --- a/server/text_generation_server/models/vlm_causal_lm.py +++ b/server/text_generation_server/models/vlm_causal_lm.py @@ -173,7 +173,10 @@ class VlmCausalLMBatch(FlashMistralBatch): image_id = 0 for chunk in chunks: if chunk["type"] == "text": - full_text += "" + chunk["content"] + "\n" + if config.model_type == "paligemma": + full_text += "" + chunk["content"] + "\n" + else: + full_text += chunk["content"] elif chunk["type"] == "image": image = chunk["content"] # Should never receive URLs anymore, processing should be done