mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-06-19 15:52:08 +00:00
backend(llama): add CUDA Dockerfile_llamacpp for now
This commit is contained in:
parent
f38c34aeb7
commit
960c12bd6e
@ -1,7 +1,10 @@
|
|||||||
FROM ubuntu:24.04 AS base
|
ARG llama_version=b4599
|
||||||
|
ARG llama_hardware_target=cpu
|
||||||
|
|
||||||
|
FROM nvidia/cuda:12.6.3-cudnn-devel-ubuntu24.04 AS base
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
RUN apt-get update && apt-get install -y \
|
RUN apt update && apt install -y \
|
||||||
python3-venv \
|
python3-venv \
|
||||||
python3-pip
|
python3-pip
|
||||||
|
|
||||||
@ -10,68 +13,72 @@ ENV PATH="/venv/bin:$PATH"
|
|||||||
RUN pip3 install --no-cache-dir transformers
|
RUN pip3 install --no-cache-dir transformers
|
||||||
|
|
||||||
FROM base AS deps
|
FROM base AS deps
|
||||||
WORKDIR /deps
|
WORKDIR /opt/src
|
||||||
|
|
||||||
RUN apt-get install -y \
|
RUN apt install -y \
|
||||||
clang cmake git
|
clang \
|
||||||
|
cmake \
|
||||||
|
curl \
|
||||||
|
git \
|
||||||
|
libssl-dev \
|
||||||
|
pkg-config \
|
||||||
|
tar
|
||||||
|
|
||||||
# nvidia-cuda-toolkit
|
FROM deps AS llamacpp-builder
|
||||||
# -DGGML_CUDA=ON \
|
ARG llama_version
|
||||||
|
ENV LLAMA_VERSION=${llama_version}
|
||||||
|
|
||||||
ENV LLAMA_VERSION=b4599
|
ADD https://github.com/ggerganov/llama.cpp/archive/refs/tags/${LLAMA_VERSION}.tar.gz /opt/src/
|
||||||
RUN git clone --depth 1 -b ${LLAMA_VERSION} https://github.com/ggerganov/llama.cpp \
|
RUN tar -xzf ${LLAMA_VERSION}.tar.gz && \
|
||||||
&& cd llama.cpp \
|
cd llama.cpp-${LLAMA_VERSION} && \
|
||||||
&& cmake -B build \
|
cmake \
|
||||||
-DCMAKE_INSTALL_PREFIX=/usr \
|
-B build \
|
||||||
-DCMAKE_INSTALL_LIBDIR=/usr/lib \
|
-DCMAKE_INSTALL_PREFIX=/usr/llama \
|
||||||
-DCMAKE_C_COMPILER=clang \
|
-DCMAKE_C_COMPILER=clang \
|
||||||
-DCMAKE_CXX_COMPILER=clang++ \
|
-DCMAKE_CXX_COMPILER=clang++ \
|
||||||
|
-DGGML_CUDA=1 \
|
||||||
-DLLAMA_BUILD_COMMON=OFF \
|
-DLLAMA_BUILD_COMMON=OFF \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DLLAMA_BUILD_EXAMPLES=OFF \
|
-DLLAMA_BUILD_EXAMPLES=OFF \
|
||||||
-DLLAMA_BUILD_SERVER=OFF \
|
-DLLAMA_BUILD_SERVER=OFF \
|
||||||
&& cmake --build build --config Release -j \
|
&& cmake --build build --parallel --config Release -j \
|
||||||
&& cmake --install build
|
&& cmake --install build
|
||||||
|
|
||||||
# ENV MIMALLOC_VERSION=v3.0.1
|
FROM deps AS rust-builder
|
||||||
# RUN git clone --depth 1 -b ${MIMALLOC_VERSION} https://github.com/microsoft/mimalloc \
|
|
||||||
# && cd mimalloc \
|
|
||||||
# && cmake -B build \
|
|
||||||
# -DCMAKE_INSTALL_PREFIX=/usr \
|
|
||||||
# -DCMAKE_INSTALL_LIBDIR=/usr/lib \
|
|
||||||
# -DCMAKE_C_COMPILER=clang \
|
|
||||||
# -DCMAKE_CXX_COMPILER=clang++ \
|
|
||||||
# && cmake --build build --config Release -j \
|
|
||||||
# && cmake --install build
|
|
||||||
|
|
||||||
RUN apt-get install -y \
|
|
||||||
curl pkg-config libssl-dev
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY rust-toolchain.toml rust-toolchain.toml
|
COPY rust-toolchain.toml rust-toolchain.toml
|
||||||
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
||||||
ENV PATH="/root/.cargo/bin:$PATH"
|
ENV PATH="/root/.cargo/bin:$PATH"
|
||||||
RUN cargo install cargo-chef --locked
|
|
||||||
|
|
||||||
FROM deps AS planner
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN cargo chef prepare --recipe-path recipe.json
|
COPY --from=llamacpp-builder /usr/llama/lib/ /usr/lib/
|
||||||
|
COPY --from=llamacpp-builder /usr/llama/include/ /usr/include/
|
||||||
|
|
||||||
FROM deps AS builder
|
|
||||||
COPY --from=planner /app/recipe.json recipe.json
|
ARG llama_hardware_target
|
||||||
RUN cargo chef cook \
|
ENV TGI_LLAMA_HARDWARE_TARGET=${llama_hardware_target}
|
||||||
--recipe-path recipe.json \
|
RUN export TGI_LIB_SEARCH_PATH=/usr/local/cuda/lib64:/usr/local/cuda/lib64/stubs && \
|
||||||
|
ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
|
||||||
|
cargo build \
|
||||||
--profile release-opt \
|
--profile release-opt \
|
||||||
--package text-generation-router-llamacpp
|
--package text-generation-router-llamacpp
|
||||||
COPY . .
|
|
||||||
RUN cargo build \
|
|
||||||
--profile release-opt \
|
|
||||||
--package text-generation-router-llamacpp --frozen
|
|
||||||
|
|
||||||
FROM base AS runtime
|
FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04
|
||||||
|
WORKDIR /usr/bin
|
||||||
|
|
||||||
COPY --from=deps /usr/lib/libllama.so /usr/lib/
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
COPY --from=deps /usr/lib/libggml*.so /usr/lib/
|
ENV PATH="/venv/bin:$PATH"
|
||||||
COPY --from=builder /app/target/release-opt/text-generation-router-llamacpp /bin/text-generation-launcher
|
|
||||||
|
RUN apt update && apt install -y \
|
||||||
|
openssl \
|
||||||
|
python3-venv \
|
||||||
|
python3-pip
|
||||||
|
|
||||||
|
RUN python3 -m venv /venv && \
|
||||||
|
pip3 install --no-cache-dir -r backends/llamacpp/requirements.txt
|
||||||
|
|
||||||
|
COPY --from=llamacpp-builder /usr/llama/lib/ /usr/lib/
|
||||||
|
COPY --from=llamacpp-builder /usr/llama/include/ /usr/include/
|
||||||
|
COPY --from=llamacpp-builder /usr/llama/bin/ /usr/bin/
|
||||||
|
COPY --from=rust-builder /opt/src/target/release-opt/text-generation-router-llamacpp /usr/bin/text-generation-launcher
|
||||||
|
|
||||||
ENTRYPOINT ["text-generation-launcher"]
|
ENTRYPOINT ["text-generation-launcher"]
|
||||||
|
@ -1,7 +1,29 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
fn inject_transient_dependencies(lib_search_path: Option<&str>, lib_target_hardware: &str) {
|
||||||
|
let hardware_targets = HashMap::from([("cpu", None), ("cuda", Some(vec!["cuda"]))]);
|
||||||
|
|
||||||
|
if let Some(lib_search_path) = lib_search_path {
|
||||||
|
lib_search_path.split(":").for_each(|path| {
|
||||||
|
println!("cargo:rustc-link-search=dependency={path}");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(hardware_transient_deps) = hardware_targets.get(lib_target_hardware) {
|
||||||
|
if let Some(additional_transient_deps) = hardware_transient_deps {
|
||||||
|
additional_transient_deps.iter().for_each(|dep| {
|
||||||
|
println!("cargo:rustc-link-lib={dep}");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
let lib_search_path = option_env!("TGI_LLAMA_LD_LIBRARY_PATH");
|
||||||
|
let lib_target_hardware = option_env!("TGI_LLAMA_HARDWARE_TARGET").unwrap_or("cpu");
|
||||||
|
|
||||||
let bindings = bindgen::Builder::default()
|
let bindings = bindgen::Builder::default()
|
||||||
.header("src/wrapper.h")
|
.header("src/wrapper.h")
|
||||||
.prepend_enum_name(false)
|
.prepend_enum_name(false)
|
||||||
@ -16,5 +38,5 @@ fn main() {
|
|||||||
|
|
||||||
pkg_config::Config::new().probe("llama").unwrap();
|
pkg_config::Config::new().probe("llama").unwrap();
|
||||||
|
|
||||||
println!("cargo::rerun-if-changed=build.rs");
|
inject_transient_dependencies(lib_search_path, lib_target_hardware);
|
||||||
}
|
}
|
||||||
|
2
backends/llamacpp/requirements.txt
Normal file
2
backends/llamacpp/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
transformers==4.48.2
|
||||||
|
huggingface-hub==0.28.1
|
Loading…
Reference in New Issue
Block a user