Using CPU to build the images (caveat: Waiting on all 3 builds before

integration tests).
This commit is contained in:
Nicolas Patry 2024-06-06 14:23:05 +02:00
parent 5d16af6d35
commit ab7578b9c0

View File

@ -28,7 +28,7 @@ jobs:
concurrency: concurrency:
group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }} group: ${{ github.workflow }}-build-and-push-image-${{ matrix.name }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true cancel-in-progress: true
runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci] runs-on: [self-hosted, cpu]
strategy: strategy:
matrix: matrix:
include: include:
@ -123,6 +123,13 @@ jobs:
labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }} labels: ${{ steps.meta.outputs.labels || steps.meta-pr.outputs.labels }}
cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min cache-from: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min
cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min cache-to: type=registry,ref=registry.internal.huggingface.tech/api-inference/community/text-generation-inference:cache${{ matrix.label }},mode=min
integration-tests:
concurrency:
group: ${{ github.workflow }}-build-and-push-image-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
runs-on: [self-hosted, nvidia-gpu , multi-gpu, 4-a10, ci]
needs: build-and-push-image
steps:
- name: Set up Python - name: Set up Python
if: matrix.name == 'cuda' if: matrix.name == 'cuda'
uses: actions/setup-python@v4 uses: actions/setup-python@v4