mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
fix(main.py): fix caching for router
This commit is contained in:
parent
5efa3860da
commit
e7d1840d5f
3 changed files with 5 additions and 3 deletions
|
@ -147,7 +147,7 @@ async def acompletion(*args, **kwargs):
|
|||
else:
|
||||
# Await normally
|
||||
init_response = completion(*args, **kwargs)
|
||||
if isinstance(init_response, dict):
|
||||
if isinstance(init_response, dict) or isinstance(init_response, ModelResponse):
|
||||
response = init_response
|
||||
else:
|
||||
response = await init_response
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue