From 17ac93efd33ae69a20516fc8b1c2f0ba13aa3e5e Mon Sep 17 00:00:00 2001 From: drbh Date: Wed, 15 May 2024 04:42:55 +0000 Subject: [PATCH] fix: default add special tokens to avoid vlm regressions --- server/text_generation_server/models/vlm_causal_lm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/models/vlm_causal_lm.py b/server/text_generation_server/models/vlm_causal_lm.py index ef30738f..37664230 100644 --- a/server/text_generation_server/models/vlm_causal_lm.py +++ b/server/text_generation_server/models/vlm_causal_lm.py @@ -203,7 +203,7 @@ class VlmCausalLMBatch(FlashMistralBatch): batch_inputs, truncation=True, max_length=max_truncation, - add_special_tokens=False, + add_special_tokens=not config.model_type == "paligemma", )["input_ids"] if image_inputs: image_input = image_inputs[0]