Fix the OOM issue of Llama-4-Scout-17B-16E-Instruct

Signed-off-by: yuanwu <yuan.wu@intel.com>
This commit is contained in:
yuanwu 2025-05-29 06:38:45 +00:00
parent f14044009a
commit fb104d8b42

View File

@ -143,6 +143,8 @@ class FlashLlamaAttention(torch.nn.Module):
config.num_key_value_heads = getattr(
config, "num_key_value_heads", config.num_attention_heads
)
if config.model_type != "llama4_text":
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,