diff --git a/.github/workflows/build_trtllm.yaml b/.github/workflows/build_trtllm.yaml index 431267fd..243084de 100644 --- a/.github/workflows/build_trtllm.yaml +++ b/.github/workflows/build_trtllm.yaml @@ -32,7 +32,7 @@ jobs: - name: Extract TensorRT-LLM version run: | - echo "TENSORRT_LLM_VERSION=$(grep -oP '([a-z,0-9]{40})' backends/trtllm/cmake/trtllm.cmake)" >> $GITHUB_ENV + echo "TENSORRT_LLM_VERSION=$(grep -oP '([a-z,0-9]{40})' $GITHUB_WORKSPACE/text-generation-inference/backends/trtllm/cmake/trtllm.cmake)" >> $GITHUB_ENV echo "TensorRT-LLM version: $TENSORRT_LLM_VERSION" - name: "Configure AWS Credentials" @@ -115,8 +115,8 @@ jobs: sccache_bucket=${{ secrets.AWS_S3_BUCKET_GITHUB_TGI_TEST }} sccache_s3_key_prefix=trtllm-${{ env.TENSORRT_LLM_VERSION }} sccache_region=us-east-1 - cache-from: type=s3,region=us-east-1,bucket=ci-docker-buildx-cache,name=text-generation-inference-cache-${{ env.LABEL }}-trtllm,mode=min,access_key_id=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_ACCESS_KEY_ID }},secret_access_key=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_SECRET_ACCESS_KEY }},mode=min - cache-to: type=s3,region=us-east-1,bucket=ci-docker-buildx-cache,name=text-generation-inference-cache-${{ env.LABEL }}-trtllm,mode=min,access_key_id=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_ACCESS_KEY_ID }},secret_access_key=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_SECRET_ACCESS_KEY }},mode=min + cache-from: type=s3,region=us-east-1,bucket=ci-docker-buildx-cache,name=text-generation-inference-cache-${{ env.LABEL }},mode=min,access_key_id=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_ACCESS_KEY_ID }},secret_access_key=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_SECRET_ACCESS_KEY }},mode=min + cache-to: type=s3,region=us-east-1,bucket=ci-docker-buildx-cache,name=text-generation-inference-cache-${{ env.LABEL }},mode=min,access_key_id=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_ACCESS_KEY_ID }},secret_access_key=${{ secrets.S3_CI_DOCKER_BUILDX_CACHE_SECRET_ACCESS_KEY }},mode=min diff --git a/Dockerfile_trtllm b/Dockerfile_trtllm index 818f0cb3..ce150b3a 100644 --- a/Dockerfile_trtllm +++ b/Dockerfile_trtllm @@ -89,7 +89,7 @@ COPY . . COPY --from=trt-builder /usr/local/tensorrt /usr/local/tensorrt COPY --from=mpi-builder /usr/local/mpi /usr/local/mpi RUN mkdir $TGI_INSTALL_PREFIX && mkdir "$TGI_INSTALL_PREFIX/include" && mkdir "$TGI_INSTALL_PREFIX/lib" && \ -# python3 scripts/setup_sccache.py --is-gha-build ${is_gha_build} -k ${aws_access_key_id} -s ${aws_secret_key_id} -t ${aws_session_token} -b ${sccache_bucket} -r ${sscache_region} -p ${sccache_s3_key_prefix } && \ + python3 scripts/setup_sccache.py --is-gha-build ${is_gha_build} -k ${aws_access_key_id} -s ${aws_secret_key_id} -t ${aws_session_token} -b ${sccache_bucket} -r ${sscache_region} -p ${sccache_s3_key_prefix } && \ RUSTC_WRAPPER=sccache CMAKE_INSTALL_PREFIX=$TGI_INSTALL_PREFIX cargo build --profile ${build_type} --package text-generation-backends-trtllm --bin text-generation-backends-trtllm FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04 AS runtime