Less clutter.

This commit is contained in:
Nicolas Patry 2024-07-22 13:49:24 +00:00
parent 620416f13f
commit 5829b7821e
No known key found for this signature in database
GPG Key ID: B154A218C20EBBCA
2 changed files with 3 additions and 5 deletions

View File

@ -757,6 +757,8 @@ def get_model(
default_dtype=torch.bfloat16, default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code, trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids, lora_adapter_ids=lora_adapter_ids,
# hidden_size / num_attention_heads is wrong in `google/gemma-2-9b-it`
head_size=config.head_dim,
) )
elif sharded: elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma2")) raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma2"))

View File

@ -925,10 +925,6 @@ class FlashCausalLM(Model):
assert self.num_kv_heads > 0 assert self.num_kv_heads > 0
if head_size is None: if head_size is None:
if getattr(config, "head_dim", None):
# hidden_size / num_attention_heads is wrong in `google/gemma-2-9b-it`
self.head_size = config.head_dim
else:
self.head_size = config.hidden_size // config.num_attention_heads self.head_size = config.hidden_size // config.num_attention_heads
else: else:
self.head_size = head_size self.head_size = head_size