mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
feat(proxy_server.py): enable cache controls per key + no-store cache flag
This commit is contained in:
parent
37de964da4
commit
f9acad87dc
8 changed files with 108 additions and 42 deletions
|
@ -2217,7 +2217,7 @@ def client(original_function):
|
|||
litellm.cache is not None
|
||||
and str(original_function.__name__)
|
||||
in litellm.cache.supported_call_types
|
||||
):
|
||||
) and (kwargs.get("cache", {}).get("no-store", False) != True):
|
||||
litellm.cache.add_cache(result, *args, **kwargs)
|
||||
|
||||
# LOG SUCCESS - handle streaming success logging in the _next_ object, remove `handle_success` once it's deprecated
|
||||
|
@ -2430,9 +2430,12 @@ def client(original_function):
|
|||
|
||||
# [OPTIONAL] ADD TO CACHE
|
||||
if (
|
||||
litellm.cache is not None
|
||||
and str(original_function.__name__)
|
||||
in litellm.cache.supported_call_types
|
||||
(litellm.cache is not None)
|
||||
and (
|
||||
str(original_function.__name__)
|
||||
in litellm.cache.supported_call_types
|
||||
)
|
||||
and (kwargs.get("cache", {}).get("no-store", False) != True)
|
||||
):
|
||||
if isinstance(result, litellm.ModelResponse) or isinstance(
|
||||
result, litellm.EmbeddingResponse
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue