disable chunking for qwen

This commit is contained in:
Mohit Sharma 2025-04-23 08:09:51 +00:00
parent dd91b60998
commit 15926210d3

View File

@ -1522,6 +1522,8 @@ def get_model(
kv_cache_dtype=kv_cache_dtype, kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code, trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids, lora_adapter_ids=lora_adapter_ids,
# TODO: Fix bug in rust image_text_replacement implementation
support_chunking=False,
) )
# TODO: Uncomment when transformers is refactored # TODO: Uncomment when transformers is refactored
# elif FLASH_TRANSFORMERS_BACKEND: # elif FLASH_TRANSFORMERS_BACKEND:
@ -1553,6 +1555,8 @@ def get_model(
lora_adapter_ids=lora_adapter_ids, lora_adapter_ids=lora_adapter_ids,
config_class=Qwen2_5_VLConfig, config_class=Qwen2_5_VLConfig,
processor_class=Qwen2_5_VLProcessor, processor_class=Qwen2_5_VLProcessor,
# TODO: Fix bug in rust image_text_replacement implementation
support_chunking=False,
) )
# TODO: Uncomment when transformers is refactored # TODO: Uncomment when transformers is refactored
# elif FLASH_TRANSFORMERS_BACKEND: # elif FLASH_TRANSFORMERS_BACKEND: