mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-25 20:12:07 +00:00
[TORCH COMPILE] Ignore HPU GRAPHS env var when eager mode is used (#165)
Co-authored-by: Jacek Czaja <jczaja@habana.ai>
This commit is contained in:
parent
7c475b6226
commit
c64b5b75e2
10
README.md
10
README.md
@ -49,7 +49,15 @@ To use [🤗 text-generation-inference](https://github.com/huggingface/text-gene
|
||||
```
|
||||
> For gated models such as [LLama](https://huggingface.co/meta-llama) or [StarCoder](https://huggingface.co/bigcode/starcoder), you will have to pass `-e HUGGING_FACE_HUB_TOKEN=<token>` to the `docker run` command above with a valid Hugging Face Hub read token.
|
||||
|
||||
ii. On 8 Gaudi/Gaudi2 cards:
|
||||
ii. On 1 Gaudi/Gaudi2 card using pytorch eager mode with torch compile:
|
||||
```bash
|
||||
model=meta-llama/Llama-2-7b-hf
|
||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||
|
||||
docker run -p 8080:80 -v $volume:/data --runtime=habana -e HABANA_VISIBLE_DEVICES=all -e PT_HPU_LAZY_MODE=0 -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --ipc=host ghcr.io/huggingface/tgi-gaudi:2.0.0 --model-id $model --max-input-tokens 1024 --max-total-tokens 2048
|
||||
```
|
||||
|
||||
iii. On 8 Gaudi/Gaudi2 cards:
|
||||
```bash
|
||||
model=meta-llama/Llama-2-70b-hf
|
||||
volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run
|
||||
|
@ -650,7 +650,7 @@ class CausalLM(Model):
|
||||
model = self.prepare_model_for_quantization(model)
|
||||
model = model.eval().to(device)
|
||||
|
||||
self.enable_hpu_graph = os.getenv("ENABLE_HPU_GRAPH", "true").lower() == "true"
|
||||
self.enable_hpu_graph = os.getenv("ENABLE_HPU_GRAPH", "true").lower() == "true" and LAZY_MODE == 1
|
||||
self.limit_hpu_graph = os.getenv("LIMIT_HPU_GRAPH", "false").lower() == "true"
|
||||
model = remove_kv_cache_from_output(model)
|
||||
if self.enable_hpu_graph:
|
||||
@ -850,6 +850,7 @@ class CausalLM(Model):
|
||||
"attention_mask": attention_mask,
|
||||
"past_key_values": past_key_values,
|
||||
"token_idx": token_idx,
|
||||
"lazy_mode": LAZY_MODE == 1,
|
||||
}
|
||||
|
||||
if self.has_position_ids:
|
||||
|
Loading…
Reference in New Issue
Block a user