remove debug comments

This commit is contained in:
baptiste 2025-04-04 07:45:13 +00:00
parent 32b00e039b
commit 2c837c671d
4 changed files with 2 additions and 42 deletions

View File

@ -129,9 +129,9 @@ jobs:
export label_extension="-gaudi" export label_extension="-gaudi"
export docker_volume="/mnt/cache" export docker_volume="/mnt/cache"
export docker_devices="" export docker_devices=""
export runs_on="aws-dl1-24xlarge" export runs_on="ubuntu-latest"
export platform="" export platform=""
export extra_pytest="--gaudi" export extra_pytest=""
export target="" export target=""
esac esac
echo $dockerfile echo $dockerfile

View File

@ -54,25 +54,6 @@ logger.add(
level="INFO", level="INFO",
) )
# def cleanup_handler(signum, frame):
# logger.info("\nCleaning up containers due to shutdown, please wait...")
# try:
# client = docker.from_env()
# containers = client.containers.list(filters={"name": "tgi-tests-"})
# for container in containers:
# try:
# container.stop()
# container.remove()
# logger.info(f"Successfully cleaned up container {container.name}")
# except Exception as e:
# logger.error(f"Error cleaning up container {container.name}: {str(e)}")
# except Exception as e:
# logger.error(f"Error during cleanup: {str(e)}")
# sys.exit(1)
# signal.signal(signal.SIGINT, cleanup_handler)
# signal.signal(signal.SIGTERM, cleanup_handler)
def stream_container_logs(container, test_name): def stream_container_logs(container, test_name):
"""Stream container logs in a separate thread.""" """Stream container logs in a separate thread."""

View File

@ -1,16 +1,3 @@
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
pytest >= 8.3.5 pytest >= 8.3.5
pytest-asyncio >= 0.26.0 pytest-asyncio >= 0.26.0
docker >= 7.1.0 docker >= 7.1.0

View File

@ -4,14 +4,6 @@ from text_generation import AsyncClient
import pytest import pytest
from Levenshtein import distance as levenshtein_distance from Levenshtein import distance as levenshtein_distance
# Model that are not working but should... :(
# - google/gemma-2-2b-it
# - google/flan-t5-large
# - codellama/CodeLlama-13b-hf
# - ibm-granite/granite-3.0-8b-instruct
# - microsoft/Phi-3.5-MoE-instruct
# - microsoft/Phi-3-mini-4k-instruct
# The config in args is not optimized for speed but only check that inference is working for the different models architectures # The config in args is not optimized for speed but only check that inference is working for the different models architectures
TEST_CONFIGS = { TEST_CONFIGS = {
"meta-llama/Llama-3.1-8B-Instruct-shared": { "meta-llama/Llama-3.1-8B-Instruct-shared": {