diff --git a/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_stream_usage.json b/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_stream_usage.json index fbb3669f..898fd7fb 100644 --- a/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_stream_usage.json +++ b/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_stream_usage.json @@ -12,7 +12,7 @@ "logprobs": null } ], - "created": 1741338471, + "created": 1741373593, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -32,7 +32,7 @@ "logprobs": null } ], - "created": 1741338471, + "created": 1741373593, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -52,7 +52,7 @@ "logprobs": null } ], - "created": 1741338471, + "created": 1741373593, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -72,7 +72,7 @@ "logprobs": null } ], - "created": 1741338471, + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -92,7 +92,7 @@ "logprobs": null } ], - "created": 1741338472, + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -112,7 +112,7 @@ "logprobs": null } ], - "created": 1741338472, + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -132,7 +132,7 @@ "logprobs": null } ], - "created": 1741338472, + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -152,7 +152,7 @@ "logprobs": null } ], - "created": 1741338472, + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -172,7 +172,7 @@ "logprobs": null } ], - "created": 1741338472, + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", @@ -192,7 +192,16 @@ "logprobs": null } ], - "created": 1741338472, + "created": 1741373594, + "id": "", + "model": "meta-llama/Llama-3.1-8B-Instruct", + "object": "chat.completion.chunk", + "system_fingerprint": "3.1.2-dev0-native", + "usage": null + }, + { + "choices": [], + "created": 1741373594, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion.chunk", diff --git a/integration-tests/models/test_completion_prompts.py b/integration-tests/models/test_completion_prompts.py index b5a7920a..a2c037c5 100644 --- a/integration-tests/models/test_completion_prompts.py +++ b/integration-tests/models/test_completion_prompts.py @@ -70,7 +70,6 @@ async def test_flash_llama_completion_stream_usage( for chunk in stream: # remove "data:" chunks.append(chunk) - print(f"Chunk {chunk}") if len(chunk.choices) == 1: index = chunk.choices[0].index assert index == 0