mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 13:52:07 +00:00
xpu 2.6 update (#3051)
* xpu 2.6 update Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> * install whl Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> * update get xpu memory api Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> * int Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> * fix awq crash if modules_to_not_convert is None Signed-off-by: Wang, Yi A <yi.a.wang@intel.com> --------- Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
This commit is contained in:
parent
f91434e99b
commit
0b3e3db043
@ -45,7 +45,7 @@ RUN cargo build --profile release-opt --frozen
|
|||||||
|
|
||||||
# Text Generation Inference base image for Intel
|
# Text Generation Inference base image for Intel
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:2024.2.1-0-devel-ubuntu22.04 AS xpu
|
FROM intel/oneapi-basekit:2025.0.1-0-devel-ubuntu22.04 AS xpu
|
||||||
|
|
||||||
USER root
|
USER root
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ RUN echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https:/
|
|||||||
|
|
||||||
RUN mv /tmp/intel-for-pytorch-gpu-dev.list /etc/apt/sources.list.d
|
RUN mv /tmp/intel-for-pytorch-gpu-dev.list /etc/apt/sources.list.d
|
||||||
|
|
||||||
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt install -y xpu-smi cmake ninja-build pciutils intel-pti-dev-0.9
|
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt install -y xpu-smi cmake ninja-build pciutils intel-ocloc
|
||||||
|
|
||||||
# Text Generation Inference base env
|
# Text Generation Inference base env
|
||||||
ENV HF_HOME=/data \
|
ENV HF_HOME=/data \
|
||||||
@ -96,13 +96,11 @@ ENV HF_HOME=/data \
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
WORKDIR /usr/src
|
|
||||||
RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/torch-2.5.0a0%2Bgite84e33f-cp311-cp311-linux_x86_64.whl --no-cache-dir
|
|
||||||
RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/torchaudio-2.5.0a0%2B56bc006-cp311-cp311-linux_x86_64.whl --no-cache-dir
|
|
||||||
RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/torchvision-0.20.0a0%2B8e8a208-cp311-cp311-linux_x86_64.whl --no-cache-dir
|
|
||||||
RUN pip install https://intel-optimized-pytorch.s3.cn-north-1.amazonaws.com.cn/ipex_dev/xpu/oneccl_bind_pt-2.5.0%2Bxpu-cp311-cp311-linux_x86_64.whl --no-cache-dir
|
|
||||||
|
|
||||||
RUN pip install triton-xpu==3.0.0b2 --no-cache-dir
|
WORKDIR /usr/src
|
||||||
|
RUN pip install torch==2.6.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/test/xpu
|
||||||
|
|
||||||
|
RUN pip install triton-xpu==3.2.0b1 --no-cache-dir
|
||||||
|
|
||||||
# Install server
|
# Install server
|
||||||
COPY proto proto
|
COPY proto proto
|
||||||
@ -114,15 +112,14 @@ RUN cd server && \
|
|||||||
pip install -U pip uv && \
|
pip install -U pip uv && \
|
||||||
uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir
|
uv pip install -e ".[accelerate, compressed-tensors, peft, outlines]" --no-cache-dir
|
||||||
|
|
||||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/intel/oneapi/pti/0.9/lib:/opt/conda/lib
|
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/conda/lib
|
||||||
ENV CCL_ZE_IPC_EXCHANGE=sockets
|
ENV CCL_ZE_IPC_EXCHANGE=sockets
|
||||||
#ENV TORCH_LLM_ALLREDUCE=1
|
ENV TORCH_LLM_ALLREDUCE=1
|
||||||
#ENV CCL_TOPO_FABRIC_VERTEX_CONNECTION_CHECK=0
|
ENV CCL_TOPO_FABRIC_VERTEX_CONNECTION_CHECK=0
|
||||||
ENV TORCH_DEVICE_BACKEND_AUTOLOAD=0
|
ENV TORCH_DEVICE_BACKEND_AUTOLOAD=0
|
||||||
|
|
||||||
RUN git clone https://github.com/intel/intel-extension-for-pytorch && cd intel-extension-for-pytorch && git checkout 1ccf72b2d11cd00b47aef6d6cd054c088aa6f083
|
RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/xpu/oneccl_bind_pt-2.6.0%2Bxpu-cp311-cp311-linux_x86_64.whl
|
||||||
RUN cd intel-extension-for-pytorch && git submodule update --init --recursive && USE_AOT_DEVLIST='pvc,ats-m150' BUILD_SEPARATE_OPS=OFF BUILD_WITH_CPU=OFF USE_XETLA=ON python setup.py install && rm -rf /usr/src/intel-extension-for-pytorch
|
RUN pip install https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_stable/xpu/intel_extension_for_pytorch-2.6.10%2Bxpu-cp311-cp311-linux_x86_64.whl
|
||||||
|
|
||||||
# Install benchmarker
|
# Install benchmarker
|
||||||
COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
|
COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
|
||||||
# Install router
|
# Install router
|
||||||
|
@ -18,14 +18,10 @@ def get_cuda_free_memory(device, memory_fraction):
|
|||||||
|
|
||||||
|
|
||||||
def get_xpu_free_memory(device, memory_fraction):
|
def get_xpu_free_memory(device, memory_fraction):
|
||||||
total_memory = torch.xpu.get_device_properties(device).total_memory
|
total_free_memory, total_xpu_memory = torch.xpu.mem_get_info(device)
|
||||||
device_id = device.index
|
memory_fraction = float(os.getenv("XPU_MEMORY_FRACTION", "0.9"))
|
||||||
memory_fraction = float(os.getenv("XPU_MEMORY_FRACTION", "1.0"))
|
|
||||||
free_memory = max(
|
free_memory = max(
|
||||||
0,
|
0, int(total_free_memory - (1 - memory_fraction) * total_xpu_memory)
|
||||||
int(
|
|
||||||
total_memory * 0.9 * memory_fraction - torch.xpu.memory_reserved(device_id)
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
return free_memory
|
return free_memory
|
||||||
|
|
||||||
|
@ -79,6 +79,8 @@ def _get_quantizer_config(model_id, revision):
|
|||||||
modules_to_not_convert = data["quantization_config"].get(
|
modules_to_not_convert = data["quantization_config"].get(
|
||||||
"modules_to_not_convert", []
|
"modules_to_not_convert", []
|
||||||
)
|
)
|
||||||
|
if modules_to_not_convert is None:
|
||||||
|
modules_to_not_convert = []
|
||||||
except Exception:
|
except Exception:
|
||||||
filename = "quantize_config.json"
|
filename = "quantize_config.json"
|
||||||
try:
|
try:
|
||||||
|
Loading…
Reference in New Issue
Block a user