diff --git a/README.md b/README.md index 299f6667..eeebfcb5 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,15 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene ``` > For gated models such as [LLama](https://huggingface.co/meta-llama) or [StarCoder](https://huggingface.co/bigcode/starcoder), you will have to pass `-e HUGGING_FACE_HUB_TOKEN=` to the `docker run` command above with a valid Hugging Face Hub read token. - ii. On 8 Gaudi/Gaudi2 cards: + ii. On 1 Gaudi/Gaudi2 card using pytorch eager mode with torch compile: + ```bash + model=meta-llama/Llama-2-7b-hf + volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run + + docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e PT_HPU_LAZY_MODE=0 -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.0 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048 + ``` + + iii. On 8 Gaudi/Gaudi2 cards: ```bash model=meta-llama/Llama-2-70b-hf volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index e701c9c1..f20db570 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -650,7 +650,7 @@ class CausalLM(Model): model = self.prepare_model_for_quantization(model) model = model.eval().to(device) - self.enable_hpu_graph = os.getenv("ENABLE_HPU_GRAPH", "true").lower() == "true" + self.enable_hpu_graph = os.getenv("ENABLE_HPU_GRAPH", "true").lower() == "true" and LAZY_MODE == 1 self.limit_hpu_graph = os.getenv("LIMIT_HPU_GRAPH", "false").lower() == "true" model = remove_kv_cache_from_output(model) if self.enable_hpu_graph: @@ -850,6 +850,7 @@ class CausalLM(Model): "attention_mask": attention_mask, "past_key_values": past_key_values, "token_idx": token_idx, + "lazy_mode": LAZY_MODE == 1, } if self.has_position_ids: