From e31eb6a6b2fce670217c5a89e8f4759e321e87cb Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 23 Apr 2024 19:26:19 -0700 Subject: [PATCH] test(test_openai_endpoints.py): add unit testing for streaming + logprobs on docker image --- tests/test_openai_endpoints.py | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index c77eeba5b..c6df43050 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -3,7 +3,7 @@ import pytest import asyncio import aiohttp, openai -from openai import OpenAI +from openai import OpenAI, AsyncOpenAI def response_header_check(response): @@ -296,6 +296,29 @@ async def test_chat_completion_different_deployments(): pass +@pytest.mark.asyncio +async def test_chat_completion_streaming(): + """ + [PROD Test] Ensures logprobs are returned correctly + """ + client = AsyncOpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000") + + response = await client.chat.completions.create( + model="gpt-3.5-turbo-large", + messages=[{"role": "user", "content": "Hello!"}], + logprobs=True, + top_logprobs=2, + stream=True, + ) + + response_str = "" + + async for chunk in response: + response_str += chunk.choices[0].delta.content or "" + + print(f"response_str: {response_str}") + + @pytest.mark.asyncio async def test_chat_completion_old_key(): """