From f746b8e0ae1d9479de6f5ddd79e0d2ec65878298 Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Wed, 8 Nov 2023 19:06:59 +0900 Subject: [PATCH] Update server/text_generation_server/models/custom_modeling/flash_llama_modeling.py Co-authored-by: OlivierDehaene --- .../models/custom_modeling/flash_llama_modeling.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 1292c1b3..f46c9192 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -160,6 +160,8 @@ class LlamaRMSNorm(nn.Module): self.variance_epsilon, ) return out, residual + else: + raise RuntimeError("system not supported") def load_attention(config, prefix, weights):