fix(caching.py): support default ttl for caching

This commit is contained in:
Krrish Dholakia 2024-03-25 13:40:17 -07:00
parent e1c75c009f
commit 591a0a376e
2 changed files with 23 additions and 0 deletions

View file

@ -874,6 +874,7 @@ class Cache:
port: Optional[str] = None,
password: Optional[str] = None,
namespace: Optional[str] = None,
ttl: Optional[float] = None,
similarity_threshold: Optional[float] = None,
supported_call_types: Optional[
List[
@ -967,6 +968,7 @@ class Cache:
self.supported_call_types = supported_call_types # default to ["completion", "acompletion", "embedding", "aembedding"]
self.type = type
self.namespace = namespace
self.ttl = ttl
def get_cache_key(self, *args, **kwargs):
"""
@ -1206,6 +1208,9 @@ class Cache:
if isinstance(result, OpenAIObject):
result = result.model_dump_json()
## DEFAULT TTL ##
if self.ttl is not None:
kwargs["ttl"] = self.ttl
## Get Cache-Controls ##
if kwargs.get("cache", None) is not None and isinstance(
kwargs.get("cache"), dict
@ -1213,6 +1218,7 @@ class Cache:
for k, v in kwargs.get("cache").items():
if k == "ttl":
kwargs["ttl"] = v
cached_data = {"timestamp": time.time(), "response": result}
return cache_key, cached_data, kwargs
else: