forked from phoenix/litellm-mirror
test(test_custom_logger.py): reset cache test correctly
This commit is contained in:
parent
6821d74588
commit
1f18093b63
3 changed files with 15 additions and 15 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -19,3 +19,4 @@ litellm/proxy/_secret_config.yaml
|
|||
litellm/tests/aiologs.log
|
||||
litellm/tests/exception_data.txt
|
||||
litellm/tests/config_*.yaml
|
||||
litellm/tests/langfuse.log
|
||||
|
|
|
@ -258,10 +258,10 @@ class CompletionCustomHandler(CustomLogger): # https://docs.litellm.ai/docs/obse
|
|||
@pytest.mark.asyncio
|
||||
async def test_async_chat_azure():
|
||||
try:
|
||||
customHandler = CompletionCustomHandler()
|
||||
customHandler_streaming = CompletionCustomHandler()
|
||||
customHandler_completion_azure_router = CompletionCustomHandler()
|
||||
customHandler_streaming_azure_router = CompletionCustomHandler()
|
||||
customHandler_failure = CompletionCustomHandler()
|
||||
litellm.callbacks = [customHandler]
|
||||
litellm.callbacks = [customHandler_completion_azure_router]
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo", # openai model name
|
||||
|
@ -282,10 +282,10 @@ async def test_async_chat_azure():
|
|||
"content": "Hi 👋 - i'm openai"
|
||||
}])
|
||||
await asyncio.sleep(2)
|
||||
assert len(customHandler.errors) == 0
|
||||
assert len(customHandler.states) == 3 # pre, post, success
|
||||
assert len(customHandler_completion_azure_router.errors) == 0
|
||||
assert len(customHandler_completion_azure_router.states) == 3 # pre, post, success
|
||||
# streaming
|
||||
litellm.callbacks = [customHandler_streaming]
|
||||
litellm.callbacks = [customHandler_streaming_azure_router]
|
||||
router2 = Router(model_list=model_list) # type: ignore
|
||||
response = await router2.acompletion(model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
|
@ -294,11 +294,12 @@ async def test_async_chat_azure():
|
|||
}],
|
||||
stream=True)
|
||||
async for chunk in response:
|
||||
print(f"async azure router chunk: {chunk}")
|
||||
continue
|
||||
await asyncio.sleep(1)
|
||||
print(f"customHandler.states: {customHandler_streaming.states}")
|
||||
assert len(customHandler_streaming.errors) == 0
|
||||
assert len(customHandler_streaming.states) >= 4 # pre, post, stream (multiple times), success
|
||||
print(f"customHandler.states: {customHandler_streaming_azure_router.states}")
|
||||
assert len(customHandler_streaming_azure_router.errors) == 0
|
||||
assert len(customHandler_streaming_azure_router.states) >= 4 # pre, post, stream (multiple times), success
|
||||
# failure
|
||||
model_list = [
|
||||
{
|
||||
|
@ -396,6 +397,7 @@ async def test_async_embedding_azure():
|
|||
async def test_async_chat_azure_with_fallbacks():
|
||||
try:
|
||||
customHandler_fallbacks = CompletionCustomHandler()
|
||||
litellm.callbacks = [customHandler_fallbacks]
|
||||
# with fallbacks
|
||||
model_list = [
|
||||
{
|
||||
|
@ -428,6 +430,7 @@ async def test_async_chat_azure_with_fallbacks():
|
|||
print(f"customHandler_fallbacks.states: {customHandler_fallbacks.states}")
|
||||
assert len(customHandler_fallbacks.errors) == 0
|
||||
assert len(customHandler_fallbacks.states) == 6 # pre, post, failure, pre, post, success
|
||||
litellm.callbacks = []
|
||||
except Exception as e:
|
||||
print(f"Assertion Error: {traceback.format_exc()}")
|
||||
pytest.fail(f"An exception occurred - {str(e)}")
|
||||
|
|
|
@ -205,7 +205,6 @@ def test_azure_completion_stream():
|
|||
assert response_in_success_handler == complete_streaming_response
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
test_azure_completion_stream()
|
||||
|
||||
def test_async_custom_handler():
|
||||
try:
|
||||
|
@ -316,13 +315,10 @@ def test_redis_cache_completion_stream():
|
|||
print("\nresponse 2", response_2_content)
|
||||
assert response_1_content == response_2_content, f"Response 1 != Response 2. Same params, Response 1{response_1_content} != Response 2{response_2_content}"
|
||||
litellm.success_callback = []
|
||||
litellm._async_success_callback = []
|
||||
litellm.cache = None
|
||||
except Exception as e:
|
||||
print(e)
|
||||
litellm.success_callback = []
|
||||
raise e
|
||||
"""
|
||||
|
||||
1 & 2 should be exactly the same
|
||||
"""
|
||||
# test_redis_cache_completion_stream()
|
||||
pytest.fail(f"Error occurred: {e}")
|
Loading…
Add table
Add a link
Reference in a new issue