2025-01-31 21:13:59 +00:00
|
|
|
ARG llama_version=b4599
|
|
|
|
ARG llama_hardware_target=cpu
|
|
|
|
|
|
|
|
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04 AS base
|
2025-01-24 09:05:37 +00:00
|
|
|
|
|
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
2025-01-31 21:13:59 +00:00
|
|
|
RUN apt update && apt install -y \
|
2025-01-24 09:05:37 +00:00
|
|
|
python3-venv \
|
|
|
|
python3-pip
|
|
|
|
|
|
|
|
RUN python3 -m venv /venv
|
|
|
|
ENV PATH="/venv/bin:$PATH"
|
|
|
|
RUN pip3 install --no-cache-dir transformers
|
|
|
|
|
|
|
|
FROM base AS deps
|
2025-01-31 21:13:59 +00:00
|
|
|
WORKDIR /opt/src
|
2025-01-24 09:05:37 +00:00
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
RUN apt install -y \
|
|
|
|
clang \
|
|
|
|
cmake \
|
|
|
|
curl \
|
|
|
|
git \
|
|
|
|
libssl-dev \
|
|
|
|
pkg-config \
|
|
|
|
tar
|
2025-01-24 09:05:37 +00:00
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
FROM deps AS llamacpp-builder
|
|
|
|
ARG llama_version
|
|
|
|
ENV LLAMA_VERSION=${llama_version}
|
2025-01-24 09:05:37 +00:00
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
ADD https://github.com/ggerganov/llama.cpp/archive/refs/tags/${LLAMA_VERSION}.tar.gz /opt/src/
|
|
|
|
RUN tar -xzf ${LLAMA_VERSION}.tar.gz && \
|
|
|
|
cd llama.cpp-${LLAMA_VERSION} && \
|
|
|
|
cmake \
|
|
|
|
-B build \
|
|
|
|
-DCMAKE_INSTALL_PREFIX=/usr/llama \
|
2025-01-24 09:05:37 +00:00
|
|
|
-DCMAKE_C_COMPILER=clang \
|
|
|
|
-DCMAKE_CXX_COMPILER=clang++ \
|
2025-01-31 21:13:59 +00:00
|
|
|
-DGGML_CUDA=1 \
|
2025-01-24 09:05:37 +00:00
|
|
|
-DLLAMA_BUILD_COMMON=OFF \
|
|
|
|
-DLLAMA_BUILD_TESTS=OFF \
|
|
|
|
-DLLAMA_BUILD_EXAMPLES=OFF \
|
|
|
|
-DLLAMA_BUILD_SERVER=OFF \
|
2025-01-31 21:13:59 +00:00
|
|
|
&& cmake --build build --parallel --config Release -j \
|
2025-01-24 09:05:37 +00:00
|
|
|
&& cmake --install build
|
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
FROM deps AS rust-builder
|
2025-01-24 09:05:37 +00:00
|
|
|
COPY rust-toolchain.toml rust-toolchain.toml
|
|
|
|
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
|
|
|
ENV PATH="/root/.cargo/bin:$PATH"
|
|
|
|
|
|
|
|
COPY . .
|
2025-01-31 21:13:59 +00:00
|
|
|
COPY --from=llamacpp-builder /usr/llama/lib/ /usr/lib/
|
|
|
|
COPY --from=llamacpp-builder /usr/llama/include/ /usr/include/
|
|
|
|
|
2025-01-24 09:05:37 +00:00
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
ARG llama_hardware_target
|
|
|
|
ENV TGI_LLAMA_HARDWARE_TARGET=${llama_hardware_target}
|
|
|
|
RUN export TGI_LIB_SEARCH_PATH=/usr/local/cuda/lib64:/usr/local/cuda/lib64/stubs && \
|
|
|
|
ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
|
|
|
|
cargo build \
|
2025-01-24 09:05:37 +00:00
|
|
|
--profile release-opt \
|
|
|
|
--package text-generation-router-llamacpp
|
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04
|
|
|
|
WORKDIR /usr/bin
|
|
|
|
|
|
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
|
|
ENV PATH="/venv/bin:$PATH"
|
|
|
|
|
|
|
|
RUN apt update && apt install -y \
|
|
|
|
openssl \
|
|
|
|
python3-venv \
|
|
|
|
python3-pip
|
|
|
|
|
|
|
|
RUN python3 -m venv /venv && \
|
|
|
|
pip3 install --no-cache-dir -r backends/llamacpp/requirements.txt
|
2025-01-24 09:05:37 +00:00
|
|
|
|
2025-01-31 21:13:59 +00:00
|
|
|
COPY --from=llamacpp-builder /usr/llama/lib/ /usr/lib/
|
|
|
|
COPY --from=llamacpp-builder /usr/llama/include/ /usr/include/
|
|
|
|
COPY --from=llamacpp-builder /usr/llama/bin/ /usr/bin/
|
|
|
|
COPY --from=rust-builder /opt/src/target/release-opt/text-generation-router-llamacpp /usr/bin/text-generation-launcher
|
2025-01-24 09:05:37 +00:00
|
|
|
|
|
|
|
ENTRYPOINT ["text-generation-launcher"]
|