mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
refactor(redis_cache.py): use a default cache value when writing to r… (#6358)
* refactor(redis_cache.py): use a default cache value when writing to redis prevent redis from blowing up in high traffic * refactor(redis_cache.py): refactor all cache writes to use self.get_ttl ensures default ttl always used when writing to redis Prevents redis db from blowing up in prod
This commit is contained in:
parent
199896f912
commit
7338b24a74
3 changed files with 199 additions and 29 deletions
|
@ -8,8 +8,18 @@ Has 4 methods:
|
|||
- async_get_cache
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class BaseCache:
|
||||
def __init__(self, default_ttl: int = 60):
|
||||
self.default_ttl = default_ttl
|
||||
|
||||
def get_ttl(self, **kwargs) -> Optional[int]:
|
||||
if kwargs.get("ttl") is not None:
|
||||
return kwargs.get("ttl")
|
||||
return self.default_ttl
|
||||
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
|
@ -22,7 +32,7 @@ class BaseCache:
|
|||
async def async_get_cache(self, key, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
async def batch_cache_write(self, result, *args, **kwargs):
|
||||
async def batch_cache_write(self, key, value, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
async def disconnect(self):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue