mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-12 04:44:52 +00:00
refine get xpu free memory
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
This commit is contained in:
parent
b53b21c63a
commit
886bfab23d
@ -1,6 +1,7 @@
|
|||||||
import torch
|
import torch
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
def is_ipex_available():
|
def is_ipex_available():
|
||||||
@ -21,10 +22,13 @@ def get_cuda_free_memory(device, memory_fraction):
|
|||||||
def get_xpu_free_memory(device, memory_fraction):
|
def get_xpu_free_memory(device, memory_fraction):
|
||||||
total_memory = torch.xpu.get_device_properties(device).total_memory
|
total_memory = torch.xpu.get_device_properties(device).total_memory
|
||||||
device_id = device.index
|
device_id = device.index
|
||||||
query = f"xpu-smi dump -d {device_id} -m 18 -n 1"
|
memory_fraction = float(os.getenv("XPU_MEMORY_FRACTION", "1.0"))
|
||||||
output = subprocess.check_output(query.split()).decode("utf-8").split("\n")
|
free_memory = max(
|
||||||
used_memory = float(output[1].split(",")[-1]) * 1024 * 1024
|
0,
|
||||||
free_memory = int(total_memory * 0.95 - used_memory)
|
int(
|
||||||
|
total_memory * 0.9 * memory_fraction - torch.xpu.memory_reserved(device_id)
|
||||||
|
),
|
||||||
|
)
|
||||||
return free_memory
|
return free_memory
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user