forked from phoenix/litellm-mirror
fix(utils.py): support cache logging for async router calls
This commit is contained in:
parent
cffd190887
commit
73e5b96d8e
2 changed files with 52 additions and 3 deletions
|
@ -5,7 +5,7 @@ from datetime import datetime
|
|||
import pytest
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
from typing import Optional, Literal, List
|
||||
from litellm import Router
|
||||
from litellm import Router, Cache
|
||||
import litellm
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
|
||||
|
@ -436,4 +436,53 @@ async def test_async_chat_azure_with_fallbacks():
|
|||
except Exception as e:
|
||||
print(f"Assertion Error: {traceback.format_exc()}")
|
||||
pytest.fail(f"An exception occurred - {str(e)}")
|
||||
# asyncio.run(test_async_chat_azure_with_fallbacks())
|
||||
# asyncio.run(test_async_chat_azure_with_fallbacks())
|
||||
|
||||
# CACHING
|
||||
## Test Azure - completion, embedding
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_completion_azure_caching():
|
||||
customHandler_caching = CompletionCustomHandler()
|
||||
litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD'])
|
||||
litellm.callbacks = [customHandler_caching]
|
||||
unique_time = time.time()
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo", # openai model name
|
||||
"litellm_params": { # params for litellm completion/embedding call
|
||||
"model": "azure/chatgpt-v-2",
|
||||
"api_key": os.getenv("AZURE_API_KEY"),
|
||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||
"api_base": os.getenv("AZURE_API_BASE")
|
||||
},
|
||||
"tpm": 240000,
|
||||
"rpm": 1800
|
||||
},
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo-16k",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-16k",
|
||||
},
|
||||
"tpm": 240000,
|
||||
"rpm": 1800
|
||||
}
|
||||
]
|
||||
router = Router(model_list=model_list) # type: ignore
|
||||
response1 = await router.acompletion(model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": f"Hi 👋 - i'm async azure {unique_time}"
|
||||
}],
|
||||
caching=True)
|
||||
await asyncio.sleep(1)
|
||||
print(f"customHandler_caching.states pre-cache hit: {customHandler_caching.states}")
|
||||
response2 = await router.acompletion(model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": f"Hi 👋 - i'm async azure {unique_time}"
|
||||
}],
|
||||
caching=True)
|
||||
await asyncio.sleep(1) # success callbacks are done in parallel
|
||||
print(f"customHandler_caching.states post-cache hit: {customHandler_caching.states}")
|
||||
assert len(customHandler_caching.errors) == 0
|
||||
assert len(customHandler_caching.states) == 4 # pre, post, success, success
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue