mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 00:12:08 +00:00
On NixOS, the CUDA driver shim gets mounted on /run/opengl-driver, where Nix packages expect the shim to be. However, on other distributions, some FHS paths are mounted. This is a small change to make the dynamic loader find the shim.
37 lines
723 B
Nix
37 lines
723 B
Nix
{
|
|
stdenv,
|
|
dockerTools,
|
|
cacert,
|
|
text-generation-inference,
|
|
runCommand,
|
|
stream ? false,
|
|
}:
|
|
|
|
let
|
|
build = if stream then dockerTools.streamLayeredImage else dockerTools.buildLayeredImage;
|
|
in
|
|
build {
|
|
name = "tgi-docker";
|
|
tag = "latest";
|
|
config = {
|
|
EntryPoint = [ "${text-generation-inference}/bin/text-generation-inference" ];
|
|
Env = [
|
|
"HF_HOME=/data"
|
|
"PORT=80"
|
|
# The CUDA container toolkit will mount the driver shim into the
|
|
# container. We just have to ensure that the dynamic loader finds
|
|
# the libraries.
|
|
"LD_LIBRARY_PATH=/usr/lib64"
|
|
];
|
|
|
|
};
|
|
extraCommands = ''
|
|
mkdir -p tmp
|
|
chmod -R 1777 tmp
|
|
'';
|
|
contents = [
|
|
cacert
|
|
stdenv.cc
|
|
];
|
|
}
|