Revert pr 235 as flash attention is not really enabled for gemma (#239)

This commit is contained in:
Thanaji Rao Thakkalapelli 2024-10-23 01:58:57 -07:00 committed by GitHub
parent c5e3881051
commit b126bf4785
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -694,13 +694,12 @@ class CausalLM(Model):
"return_dict": True, "return_dict": True,
} }
if model.config.model_type in ["llama", "mistral", "starcoder2", "qwen2", "falcon", "gemma"]: if model.config.model_type in ["llama", "mistral", "starcoder2", "qwen2", "falcon"]:
if model.config.model_type not in ["falcon"]: if model.config.model_type not in ["falcon"]:
kwargs["attn_softmax_bf16"] = True kwargs["attn_softmax_bf16"] = True
if model.config.model_type not in ["gemma"]: kwargs["trim_logits"] = True
kwargs["trim_logits"] = True
if os.getenv("USE_FLASH_ATTENTION", "false").lower() == "true": if os.getenv("USE_FLASH_ATTENTION", "false").lower() == "true":
kwargs["use_flash_attention"] = True kwargs["use_flash_attention"] = True