From 5fb8c275c3d140bd766f024ba29edc41d26fcfc2 Mon Sep 17 00:00:00 2001 From: Felix Marty <9808326+fxmarty@users.noreply.github.com> Date: Thu, 20 Jun 2024 09:03:00 +0000 Subject: [PATCH] fix style & typo --- .github/workflows/build.yaml | 2 +- integration-tests/models/test_flash_gemma_gptq.py | 1 + integration-tests/models/testing_utils.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index dcfde28a..ad6d9827 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -160,7 +160,7 @@ jobs: if [[ ${{ inputs.hardware }} == "rocm" ]] then - echo "docker_volume=/data/cache/.cache/huggingface/hub" + echo "docker_volume=/data/cache/.cache/huggingface/hub" >> "$GITHUB_OUTPUT" else echo "docker_volume=/mnt/cache" >> "$GITHUB_OUTPUT" fi diff --git a/integration-tests/models/test_flash_gemma_gptq.py b/integration-tests/models/test_flash_gemma_gptq.py index 14a8075d..8dc674b6 100644 --- a/integration-tests/models/test_flash_gemma_gptq.py +++ b/integration-tests/models/test_flash_gemma_gptq.py @@ -4,6 +4,7 @@ from testing_utils import require_backend_async, require_backend # These tests do not pass on ROCm, that does not support head_dim > 128 (2b model is 256). + @pytest.fixture(scope="module") @require_backend("cuda", "xpu") def flash_gemma_gptq_handle(launcher): diff --git a/integration-tests/models/testing_utils.py b/integration-tests/models/testing_utils.py index 606a24c0..759e9de7 100644 --- a/integration-tests/models/testing_utils.py +++ b/integration-tests/models/testing_utils.py @@ -51,6 +51,7 @@ def is_flaky_async( return decorator + def require_backend(*args): def decorator(func): @functools.wraps(func)