fix: default add special tokens to avoid vlm regressions

This commit is contained in:
drbh 2024-05-15 04:42:55 +00:00
parent 70713fc292
commit 17ac93efd3

View File

@ -203,7 +203,7 @@ class VlmCausalLMBatch(FlashMistralBatch):
batch_inputs,
truncation=True,
max_length=max_truncation,
add_special_tokens=False,
add_special_tokens=not config.model_type == "paligemma",
)["input_ids"]
if image_inputs:
image_input = image_inputs[0]