[gaudi] Fix the Llama-4-Maverick-17B-128E crash issue (#3246)

Signed-off-by: yuanwu <yuan.wu@intel.com>
This commit is contained in:
Yuan Wu 2025-05-29 17:38:44 +08:00 committed by GitHub
parent 70217ac345
commit 6b6e30a6f6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -48,7 +48,6 @@ from text_generation_server.layers.attention import (
)
from text_generation_server.models.custom_modeling.flash_llama_modeling import (
FlashLlamaAttention,
LlamaMLP,
)
@ -444,7 +443,7 @@ class Llama4TextDecoderLayer(nn.Module):
if self.is_moe_layer: # the 128E model interleaves dense / sparse
self.feed_forward = Llama4TextMoe(f"{prefix}.feed_forward", config, weights)
else:
self.feed_forward = LlamaMLP(f"{prefix}.feed_forward", config, weights)
self.feed_forward = Llama4TextMLP(f"{prefix}.feed_forward", config, weights)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm",