text-generation-inference/integration-tests/models/test_flash_llama.py
drbh 24ee40d143
feat: support max_image_fetch_size to limit (#3339)
* feat: support max_image_fetch_size to limit

* fix: update model path for test

* fix: adjust model repo id for test again

* fix: apply clippy lints

* fix: clippy fix

* fix: avoid torch build isolation in docker

* fix: bump repo id in flash llama tests

* fix: temporarily avoid problematic repos in tests
2025-11-18 12:29:21 -05:00

59 lines
1.6 KiB
Python

import pytest
@pytest.fixture(scope="module")
def flash_llama_handle(launcher):
with launcher("huggyllama/llama-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama(flash_llama_handle):
await flash_llama_handle.health(300)
return flash_llama_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_simple(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_all_params(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 5
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_load(flash_llama, generate_load, response_snapshot):
responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot