fix: prefer hidden_activation over hidden_act in gemma2 (#2381)

This commit is contained in:
drbh 2024-08-08 14:08:56 -04:00 committed by yuanwu
parent 1057f28128
commit 853fb96fec

View File

@ -265,7 +265,7 @@ class FlashGemma2Attention(torch.nn.Module):
class Gemma2MLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
act = config.hidden_act
act = config.hidden_activation
self.act = (
ACT2FN[act]
if "gelu" not in act