mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 13:52:07 +00:00
Add devcontainers for TRTLLM backend. --------- Co-authored-by: Morgan Funtowicz <morgan@huggingface.co>
20 lines
579 B
JSON
20 lines
579 B
JSON
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
|
// README at: https://github.com/devcontainers/templates/tree/main/src/cpp
|
|
{
|
|
"name": "CUDA",
|
|
"build": {
|
|
"dockerfile": "Dockerfile_trtllm",
|
|
"context": ".."
|
|
},
|
|
"remoteEnv": {
|
|
"PATH": "${containerEnv:PATH}:/usr/local/cuda/bin",
|
|
"LD_LIBRARY_PATH": "$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64",
|
|
"XLA_FLAGS": "--xla_gpu_cuda_data_dir=/usr/local/cuda"
|
|
},
|
|
"customizations" : {
|
|
"jetbrains" : {
|
|
"backend" : "CLion"
|
|
}
|
|
}
|
|
}
|