mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
refactor: move async text completion testing to test_text_completion.py
This commit is contained in:
parent
4425368065
commit
9cf43cd5dc
2 changed files with 29 additions and 28 deletions
|
@ -215,30 +215,3 @@ def test_get_response_non_openai_streaming():
|
||||||
|
|
||||||
|
|
||||||
# test_get_response_non_openai_streaming()
|
# test_get_response_non_openai_streaming()
|
||||||
|
|
||||||
|
|
||||||
async def test_get_response():
|
|
||||||
try:
|
|
||||||
response = await litellm.atext_completion(
|
|
||||||
model="gpt-3.5-turbo",
|
|
||||||
prompt="good morning",
|
|
||||||
stream=True,
|
|
||||||
max_tokens=10,
|
|
||||||
)
|
|
||||||
print(f"response: {response}")
|
|
||||||
|
|
||||||
num_finish_reason = 0
|
|
||||||
async for chunk in response:
|
|
||||||
print(chunk)
|
|
||||||
if chunk["choices"][0].get("finish_reason") is not None:
|
|
||||||
num_finish_reason += 1
|
|
||||||
print("finish_reason", chunk["choices"][0].get("finish_reason"))
|
|
||||||
|
|
||||||
assert (
|
|
||||||
num_finish_reason == 1
|
|
||||||
), f"expected only one finish reason. Got {num_finish_reason}"
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"GOT exception for gpt-3.5 instruct In streaming{e}")
|
|
||||||
|
|
||||||
|
|
||||||
# asyncio.run(test_get_response())
|
|
||||||
|
|
|
@ -2924,4 +2924,32 @@ def test_async_text_completion_stream():
|
||||||
asyncio.run(test_get_response())
|
asyncio.run(test_get_response())
|
||||||
|
|
||||||
|
|
||||||
# test_async_text_completion_stream()
|
test_async_text_completion_stream()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_async_text_completion_chat_model_stream():
|
||||||
|
try:
|
||||||
|
response = await litellm.atext_completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
prompt="good morning",
|
||||||
|
stream=True,
|
||||||
|
max_tokens=10,
|
||||||
|
)
|
||||||
|
print(f"response: {response}")
|
||||||
|
|
||||||
|
num_finish_reason = 0
|
||||||
|
async for chunk in response:
|
||||||
|
print(chunk)
|
||||||
|
if chunk["choices"][0].get("finish_reason") is not None:
|
||||||
|
num_finish_reason += 1
|
||||||
|
print("finish_reason", chunk["choices"][0].get("finish_reason"))
|
||||||
|
|
||||||
|
assert (
|
||||||
|
num_finish_reason == 1
|
||||||
|
), f"expected only one finish reason. Got {num_finish_reason}"
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"GOT exception for gpt-3.5 In streaming{e}")
|
||||||
|
|
||||||
|
|
||||||
|
# asyncio.run(test_async_text_completion_chat_model_stream())
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue