fix pod lock manager

This commit is contained in:
Ishaan Jaff 2025-04-02 14:52:55 -07:00
parent a64631edfb
commit 8b12a2e5dc
2 changed files with 10 additions and 0 deletions

View file

@ -4,3 +4,12 @@ model_list:
model: openai/fake model: openai/fake
api_key: fake-key api_key: fake-key
api_base: https://exampleopenaiendpoint-production.up.railway.app/ api_base: https://exampleopenaiendpoint-production.up.railway.app/
general_settings:
use_redis_transaction_buffer: true
litellm_settings:
cache: True
cache_params:
type: redis
supported_call_types: []

View file

@ -349,6 +349,7 @@ class ProxyLogging:
if redis_cache is not None: if redis_cache is not None:
self.internal_usage_cache.dual_cache.redis_cache = redis_cache self.internal_usage_cache.dual_cache.redis_cache = redis_cache
self.db_spend_update_writer.redis_update_buffer.redis_cache = redis_cache self.db_spend_update_writer.redis_update_buffer.redis_cache = redis_cache
self.db_spend_update_writer.pod_lock_manager.redis_cache = redis_cache
def _init_litellm_callbacks(self, llm_router: Optional[Router] = None): def _init_litellm_callbacks(self, llm_router: Optional[Router] = None):
litellm.logging_callback_manager.add_litellm_callback(self.max_parallel_request_limiter) # type: ignore litellm.logging_callback_manager.add_litellm_callback(self.max_parallel_request_limiter) # type: ignore