forked from phoenix/litellm-mirror
test(test_openai_endpoints.py): add unit testing for streaming + logprobs on docker image
This commit is contained in:
parent
df60e475e8
commit
e31eb6a6b2
1 changed files with 24 additions and 1 deletions
|
@ -3,7 +3,7 @@
|
|||
import pytest
|
||||
import asyncio
|
||||
import aiohttp, openai
|
||||
from openai import OpenAI
|
||||
from openai import OpenAI, AsyncOpenAI
|
||||
|
||||
|
||||
def response_header_check(response):
|
||||
|
@ -296,6 +296,29 @@ async def test_chat_completion_different_deployments():
|
|||
pass
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_completion_streaming():
|
||||
"""
|
||||
[PROD Test] Ensures logprobs are returned correctly
|
||||
"""
|
||||
client = AsyncOpenAI(api_key="sk-1234", base_url="http://0.0.0.0:4000")
|
||||
|
||||
response = await client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-large",
|
||||
messages=[{"role": "user", "content": "Hello!"}],
|
||||
logprobs=True,
|
||||
top_logprobs=2,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
response_str = ""
|
||||
|
||||
async for chunk in response:
|
||||
response_str += chunk.choices[0].delta.content or ""
|
||||
|
||||
print(f"response_str: {response_str}")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_chat_completion_old_key():
|
||||
"""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue