diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 17658df903..fe8d73d26a 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -4,3 +4,12 @@ model_list: model: openai/fake api_key: fake-key api_base: https://exampleopenaiendpoint-production.up.railway.app/ + +general_settings: + use_redis_transaction_buffer: true + +litellm_settings: + cache: True + cache_params: + type: redis + supported_call_types: [] \ No newline at end of file diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 0b87444628..eb733e7370 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -349,6 +349,7 @@ class ProxyLogging: if redis_cache is not None: self.internal_usage_cache.dual_cache.redis_cache = redis_cache self.db_spend_update_writer.redis_update_buffer.redis_cache = redis_cache + self.db_spend_update_writer.pod_lock_manager.redis_cache = redis_cache def _init_litellm_callbacks(self, llm_router: Optional[Router] = None): litellm.logging_callback_manager.add_litellm_callback(self.max_parallel_request_limiter) # type: ignore