mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-12 04:44:52 +00:00
Intel CI ?
This commit is contained in:
parent
43f39f6894
commit
15e178e3ad
6
.github/workflows/build.yaml
vendored
6
.github/workflows/build.yaml
vendored
@ -75,10 +75,10 @@ jobs:
|
|||||||
export label_extension="-intel-cpu"
|
export label_extension="-intel-cpu"
|
||||||
export docker_devices="none"
|
export docker_devices="none"
|
||||||
export docker_volume="/mnt/cache"
|
export docker_volume="/mnt/cache"
|
||||||
export runs_on="ubuntu-latest"
|
# export runs_on="ubuntu-latest"
|
||||||
# export runs_on="aws-highmemory-32-plus-priv"
|
export runs_on="aws-highmemory-32-plus-priv"
|
||||||
export platform="cpu"
|
export platform="cpu"
|
||||||
export extra_pytest="-k test_flash_llama_load"
|
export extra_pytest="-k test_flash_llama_simple"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
echo $dockerfile
|
echo $dockerfile
|
||||||
|
@ -15,7 +15,7 @@ async def flash_llama(flash_llama_handle):
|
|||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@pytest.mark.private
|
@pytest.mark.private
|
||||||
async def test_flash_llama(flash_llama, response_snapshot):
|
async def test_flash_llama_simple(flash_llama, response_snapshot):
|
||||||
response = await flash_llama.generate(
|
response = await flash_llama.generate(
|
||||||
"Test request", max_new_tokens=10, decoder_input_details=True
|
"Test request", max_new_tokens=10, decoder_input_details=True
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user