Revert "Update the flaky mllama test."

This reverts commit 8a870b31b9.
This commit is contained in:
Nicolas Patry 2025-02-11 17:13:06 +01:00
parent 8a870b31b9
commit b86c3947ab
No known key found for this signature in database
GPG Key ID: 4242CEF24CB6DBF9
3 changed files with 18 additions and 19 deletions

View File

@ -6,7 +6,7 @@
"index": 0,
"logprobs": null,
"message": {
"content": "A chicken sits on a pile of money, looking",
"content": "In a small town, a chicken named Cluck",
"name": null,
"role": "assistant",
"tool_calls": null
@ -14,15 +14,15 @@
"usage": null
}
],
"created": 1739290197,
"created": 1738753835,
"id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.1.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55
"prompt_tokens": 50,
"total_tokens": 60
}
},
{
@ -32,7 +32,7 @@
"index": 0,
"logprobs": null,
"message": {
"content": "A chicken sits on a pile of money, looking",
"content": "In a small town, a chicken named Cluck",
"name": null,
"role": "assistant",
"tool_calls": null
@ -40,15 +40,15 @@
"usage": null
}
],
"created": 1739290197,
"created": 1738753835,
"id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.1.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55
"prompt_tokens": 50,
"total_tokens": 60
}
}
]

View File

@ -5,7 +5,7 @@
"index": 0,
"logprobs": null,
"message": {
"content": "A chicken sits on a pile of money, looking",
"content": "In a small town, a chicken named Cluck",
"name": null,
"role": "assistant",
"tool_calls": null
@ -13,14 +13,14 @@
"usage": null
}
],
"created": 1739290152,
"created": 1738753833,
"id": "",
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"object": "chat.completion",
"system_fingerprint": "3.1.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55
"prompt_tokens": 50,
"total_tokens": 60
}
}

View File

@ -28,7 +28,7 @@ async def test_mllama_simpl(mllama, response_snapshot):
"content": [
{
"type": "text",
"text": "Describe the image in 10 words.",
"text": "Can you tell me a very short story based on the image?",
},
{
"type": "image_url",
@ -43,12 +43,11 @@ async def test_mllama_simpl(mllama, response_snapshot):
assert response.usage == {
"completion_tokens": 10,
"prompt_tokens": 45,
"total_tokens": 55,
"prompt_tokens": 50,
"total_tokens": 60,
}
assert (
response.choices[0].message.content
== "A chicken sits on a pile of money, looking"
response.choices[0].message.content == "In a small town, a chicken named Cluck"
)
assert response == response_snapshot
@ -66,7 +65,7 @@ async def test_mllama_load(mllama, generate_load, response_snapshot):
"content": [
{
"type": "text",
"text": "Describe the image in 10 words.",
"text": "Can you tell me a very short story based on the image?",
},
{
"type": "image_url",
@ -87,7 +86,7 @@ async def test_mllama_load(mllama, generate_load, response_snapshot):
generated_texts = [response.choices[0].message.content for response in responses]
# XXX: TODO: Fix this test.
assert generated_texts[0] == "A chicken sits on a pile of money, looking"
assert generated_texts[0] == "In a small town, a chicken named Cluck"
assert len(generated_texts) == 2
assert generated_texts, all(
[text == generated_texts[0] for text in generated_texts]