diff --git a/litellm/__init__.py b/litellm/__init__.py index 9e7c26186..52ef19b57 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -76,6 +76,8 @@ caching_with_models: bool = ( cache: Optional[Cache] = ( None # cache object <- use this - https://docs.litellm.ai/docs/caching ) +default_in_memory_ttl: Optional[float] = None +default_redis_ttl: Optional[float] = None model_alias_map: Dict[str, str] = {} model_group_alias_map: Dict[str, str] = {} max_budget: float = 0.0 # set the max budget across all providers diff --git a/litellm/caching.py b/litellm/caching.py index 301acd593..197dc0cc5 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -845,8 +845,10 @@ class DualCache(BaseCache): # If redis_cache is not provided, use the default RedisCache self.redis_cache = redis_cache - self.default_in_memory_ttl = default_in_memory_ttl - self.default_redis_ttl = default_redis_ttl + self.default_in_memory_ttl = ( + default_in_memory_ttl or litellm.default_in_memory_ttl + ) + self.default_redis_ttl = default_redis_ttl or litellm.default_redis_ttl def set_cache(self, key, value, local_only: bool = False, **kwargs): # Update both Redis and in-memory cache @@ -956,6 +958,8 @@ class Cache: password: Optional[str] = None, namespace: Optional[str] = None, ttl: Optional[float] = None, + default_in_memory_ttl: Optional[float] = None, + default_in_redis_ttl: Optional[float] = None, similarity_threshold: Optional[float] = None, supported_call_types: Optional[ List[ @@ -1055,6 +1059,14 @@ class Cache: self.redis_flush_size = redis_flush_size self.ttl = ttl + if self.type == "local" and default_in_memory_ttl is not None: + self.ttl = default_in_memory_ttl + + if ( + self.type == "redis" or self.type == "redis-semantic" + ) and default_in_redis_ttl is not None: + self.ttl = default_in_redis_ttl + if self.namespace is not None and isinstance(self.cache, RedisCache): self.cache.namespace = self.namespace diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 51e425edc..7822bc61d 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1908,6 +1908,12 @@ class ProxyConfig: global redis_usage_cache from litellm import Cache + if "default_in_memory_ttl" in cache_params: + litellm.default_in_memory_ttl = cache_params["default_in_memory_ttl"] + + if "default_redis_ttl" in cache_params: + litellm.default_redis_ttl = cache_params["default_in_redis_ttl"] + litellm.cache = Cache(**cache_params) if litellm.cache is not None and isinstance(litellm.cache.cache, RedisCache):