mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix(caching.py): dump model response object as json
This commit is contained in:
parent
b8c3896323
commit
1665b872c3
4 changed files with 3 additions and 2 deletions
|
@ -11,7 +11,6 @@ import litellm
|
|||
import time, logging
|
||||
import json, traceback
|
||||
|
||||
|
||||
def get_prompt(*args, **kwargs):
|
||||
# make this safe checks, it should not throw any exceptions
|
||||
if len(args) > 1:
|
||||
|
@ -222,6 +221,8 @@ class Cache:
|
|||
else:
|
||||
cache_key = self.get_cache_key(*args, **kwargs)
|
||||
if cache_key is not None:
|
||||
if isinstance(result, litellm.ModelResponse):
|
||||
result = result.model_dump_json()
|
||||
self.cache.set_cache(cache_key, result, **kwargs)
|
||||
except:
|
||||
pass
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue