fix - mock completion request

This commit is contained in:
Ishaan Jaff 2024-06-18 17:08:54 -07:00
parent 5ad095ad9d
commit 2d15a3c586
2 changed files with 25 additions and 4 deletions

View file

@ -1,9 +1,12 @@
#### What this tests ####
# This tests mock request calls to litellm
import sys, os
import os
import sys
import traceback
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
@ -29,11 +32,29 @@ def test_streaming_mock_request():
response = litellm.mock_completion(model=model, messages=messages, stream=True)
complete_response = ""
for chunk in response:
complete_response += chunk["choices"][0]["delta"]["content"]
complete_response += chunk["choices"][0]["delta"]["content"] or ""
if complete_response == "":
raise Exception("Empty response received")
except:
traceback.print_exc()
test_streaming_mock_request()
# test_streaming_mock_request()
@pytest.mark.asyncio()
async def test_async_mock_streaming_request():
generator = await litellm.acompletion(
messages=[{"role": "user", "content": "Why is LiteLLM amazing?"}],
mock_response="LiteLLM is awesome",
stream=True,
model="gpt-3.5-turbo",
)
complete_response = ""
async for chunk in generator:
print(chunk)
complete_response += chunk["choices"][0]["delta"]["content"] or ""
assert (
complete_response == "LiteLLM is awesome"
), f"Unexpected response got {complete_response}"