forked from phoenix/litellm-mirror
(test) callback - strict test - async callback resp
This commit is contained in:
parent
34428a8e67
commit
b50faed5ab
1 changed files with 23 additions and 13 deletions
|
@ -8,6 +8,7 @@ import litellm
|
||||||
from litellm.integrations.custom_logger import CustomLogger
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
|
|
||||||
async_success = False
|
async_success = False
|
||||||
|
complete_streaming_response_in_callback = ""
|
||||||
class MyCustomHandler(CustomLogger):
|
class MyCustomHandler(CustomLogger):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.success: bool = False # type: ignore
|
self.success: bool = False # type: ignore
|
||||||
|
@ -64,28 +65,37 @@ class MyCustomHandler(CustomLogger):
|
||||||
self.async_completion_kwargs_fail = kwargs
|
self.async_completion_kwargs_fail = kwargs
|
||||||
|
|
||||||
async def async_test_logging_fn(kwargs, completion_obj, start_time, end_time):
|
async def async_test_logging_fn(kwargs, completion_obj, start_time, end_time):
|
||||||
global async_success
|
global async_success, complete_streaming_response_in_callback
|
||||||
print(f"ON ASYNC LOGGING")
|
print(f"ON ASYNC LOGGING")
|
||||||
async_success = True
|
async_success = True
|
||||||
|
print("\nKWARGS", kwargs)
|
||||||
|
complete_streaming_response_in_callback = kwargs.get("complete_streaming_response")
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_chat_openai():
|
def test_async_chat_openai_stream():
|
||||||
try:
|
try:
|
||||||
|
global complete_streaming_response_in_callback
|
||||||
# litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
litellm.success_callback = [async_test_logging_fn]
|
litellm.success_callback = [async_test_logging_fn]
|
||||||
response = await litellm.acompletion(model="gpt-3.5-turbo",
|
complete_streaming_response = ""
|
||||||
messages=[{
|
async def call_gpt():
|
||||||
"role": "user",
|
nonlocal complete_streaming_response
|
||||||
"content": "Hi 👋 - i'm openai"
|
response = await litellm.acompletion(model="gpt-3.5-turbo",
|
||||||
}],
|
messages=[{
|
||||||
stream=True)
|
"role": "user",
|
||||||
async for chunk in response:
|
"content": "Hi 👋 - i'm openai"
|
||||||
continue
|
}],
|
||||||
|
stream=True)
|
||||||
|
async for chunk in response:
|
||||||
|
complete_streaming_response += chunk["choices"][0]["delta"]["content"] or ""
|
||||||
|
print(complete_streaming_response)
|
||||||
|
asyncio.run(call_gpt())
|
||||||
|
assert complete_streaming_response_in_callback["choices"][0]["message"]["content"] == complete_streaming_response
|
||||||
assert async_success == True
|
assert async_success == True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
pytest.fail(f"An error occurred - {str(e)}")
|
pytest.fail(f"An error occurred - {str(e)}")
|
||||||
# test_chat_openai()
|
# test_async_chat_openai_stream()
|
||||||
|
|
||||||
def test_completion_azure_stream_moderation_failure():
|
def test_completion_azure_stream_moderation_failure():
|
||||||
try:
|
try:
|
||||||
|
@ -192,4 +202,4 @@ def test_async_custom_handler():
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_async_custom_handler()
|
# test_async_custom_handler()
|
Loading…
Add table
Add a link
Reference in a new issue