From aac547dd8283ecebd9945813b5d5f79c6257f2c5 Mon Sep 17 00:00:00 2001 From: BaihuiJin Date: Thu, 11 Jul 2024 21:19:17 +0800 Subject: [PATCH 1/3] Clear previous hpu_graphs when graph shape changed to save memory (#176) --- server/text_generation_server/models/causal_lm.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index 37d7479b..ad2270ab 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -602,6 +602,7 @@ class CausalLM(Model): dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): + self.prev_bs = 0 if use_medusa: raise RuntimeError("Medusa decoding is not enabled for AutoModel") @@ -965,6 +966,9 @@ class CausalLM(Model): batch = batch.__class__.recombine([batch], self.tokenizer.pad_token_id) scenario = 'PREFILL' if prefill else 'GENERATE' + if self.enable_hpu_graph and batch.batch_size != self.prev_bs: + self.model.clear_cache() + self.prev_bs = batch.batch_size dbg_trace( scenario, f'bs:{batch.batch_size} num_reqs:{len(batch.requests)} seq_len:{batch.seq_length} padding:{batch.right_padding}') assert batch.right_padding > 0, 'No more room for next token!' From 15e5df1cc451dfd6e2fd45b25b97d9c148a513dc Mon Sep 17 00:00:00 2001 From: BaihuiJin Date: Tue, 16 Jul 2024 15:42:46 +0800 Subject: [PATCH 2/3] BS round up to BUCKET_SIZE to prevent capture graph when graph input not change (#185) --- server/text_generation_server/models/causal_lm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index ad2270ab..32c13daf 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -966,9 +966,9 @@ class CausalLM(Model): batch = batch.__class__.recombine([batch], self.tokenizer.pad_token_id) scenario = 'PREFILL' if prefill else 'GENERATE' - if self.enable_hpu_graph and batch.batch_size != self.prev_bs: + if self.enable_hpu_graph and self.limit_hpu_graph and round_up(batch.batch_size, BATCH_BUCKET_SIZE) != self.prev_bs: self.model.clear_cache() - self.prev_bs = batch.batch_size + self.prev_bs = round_up(batch.batch_size, BATCH_BUCKET_SIZE) dbg_trace( scenario, f'bs:{batch.batch_size} num_reqs:{len(batch.requests)} seq_len:{batch.seq_length} padding:{batch.right_padding}') assert batch.right_padding > 0, 'No more room for next token!' From 0ca54b55f84e2fcb5b05bb887d736da1ebe76343 Mon Sep 17 00:00:00 2001 From: bkowalskiINTEL Date: Tue, 16 Jul 2024 14:53:24 +0200 Subject: [PATCH 3/3] Do not schedule decode if max_new_tokens is equal to 1 (#183) Co-authored-by: Bartosz Kowalski --- server/text_generation_server/models/causal_lm.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index 32c13daf..012f6249 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -985,6 +985,10 @@ class CausalLM(Model): batch.past_key_values, bypass_hpu_graph=prefill and self.limit_hpu_graph if self.enable_hpu_graph else None, ) + elif all([req.stopping_criteria.max_new_tokens == 1 for req in batch.requests]): + # Don't schedule next forward if max_new_tokens for all requests equals 1 + # - we've already generated the first and only needed token in the prefill phase + pass else: token_idx = torch.tensor(batch.attention_mask.shape[-1] - batch.right_padding).to(self.device) input_ids = torch.index_select(batch.input_ids, 1, token_idx - 1)