diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 9a21d043..0a9d57e6 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -244,7 +244,7 @@ class LlamaMLP(nn.Module): ) def forward(self, hidden_states): - if IS_ROCM_SYSTEM and self.hidden_act == "silu" and hidden_states.shape[0] == 1: + if False and IS_ROCM_SYSTEM and self.hidden_act == "silu" and hidden_states.shape[0] == 1: out = torch.empty( hidden_states.shape[0], self.intermediate_size, diff --git a/server/text_generation_server/utils/layers.py b/server/text_generation_server/utils/layers.py index 6635be56..330b7408 100644 --- a/server/text_generation_server/utils/layers.py +++ b/server/text_generation_server/utils/layers.py @@ -366,7 +366,7 @@ class FastLinearROCm(nn.Module): weight = self.weight bias = self.bias - if IS_ROCM_SYSTEM and inp.numel() // inp.size(-1) == 1: + if False and IS_ROCM_SYSTEM and inp.numel() // inp.size(-1) == 1: batched = False if inp.dim() == 3: