forked from phoenix/litellm-mirror
fix - mock completion request
This commit is contained in:
parent
5ad095ad9d
commit
2d15a3c586
2 changed files with 25 additions and 4 deletions
|
@ -1,9 +1,12 @@
|
||||||
#### What this tests ####
|
#### What this tests ####
|
||||||
# This tests mock request calls to litellm
|
# This tests mock request calls to litellm
|
||||||
|
|
||||||
import sys, os
|
import os
|
||||||
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
sys.path.insert(
|
sys.path.insert(
|
||||||
0, os.path.abspath("../..")
|
0, os.path.abspath("../..")
|
||||||
) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
|
@ -29,11 +32,29 @@ def test_streaming_mock_request():
|
||||||
response = litellm.mock_completion(model=model, messages=messages, stream=True)
|
response = litellm.mock_completion(model=model, messages=messages, stream=True)
|
||||||
complete_response = ""
|
complete_response = ""
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
complete_response += chunk["choices"][0]["delta"]["content"]
|
complete_response += chunk["choices"][0]["delta"]["content"] or ""
|
||||||
if complete_response == "":
|
if complete_response == "":
|
||||||
raise Exception("Empty response received")
|
raise Exception("Empty response received")
|
||||||
except:
|
except:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
|
||||||
test_streaming_mock_request()
|
# test_streaming_mock_request()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio()
|
||||||
|
async def test_async_mock_streaming_request():
|
||||||
|
generator = await litellm.acompletion(
|
||||||
|
messages=[{"role": "user", "content": "Why is LiteLLM amazing?"}],
|
||||||
|
mock_response="LiteLLM is awesome",
|
||||||
|
stream=True,
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
)
|
||||||
|
complete_response = ""
|
||||||
|
async for chunk in generator:
|
||||||
|
print(chunk)
|
||||||
|
complete_response += chunk["choices"][0]["delta"]["content"] or ""
|
||||||
|
|
||||||
|
assert (
|
||||||
|
complete_response == "LiteLLM is awesome"
|
||||||
|
), f"Unexpected response got {complete_response}"
|
||||||
|
|
|
@ -9435,7 +9435,7 @@ def mock_completion_streaming_obj(model_response, mock_response, model):
|
||||||
|
|
||||||
async def async_mock_completion_streaming_obj(model_response, mock_response, model):
|
async def async_mock_completion_streaming_obj(model_response, mock_response, model):
|
||||||
for i in range(0, len(mock_response), 3):
|
for i in range(0, len(mock_response), 3):
|
||||||
completion_obj = Delta(role="assistant", content=mock_response)
|
completion_obj = Delta(role="assistant", content=mock_response[i : i + 3])
|
||||||
model_response.choices[0].delta = completion_obj
|
model_response.choices[0].delta = completion_obj
|
||||||
model_response.choices[0].finish_reason = "stop"
|
model_response.choices[0].finish_reason = "stop"
|
||||||
yield model_response
|
yield model_response
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue