mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* refactor(redis_cache.py): use a default cache value when writing to redis prevent redis from blowing up in high traffic * refactor(redis_cache.py): refactor all cache writes to use self.get_ttl ensures default ttl always used when writing to redis Prevents redis db from blowing up in prod
39 lines
982 B
Python
39 lines
982 B
Python
"""
|
|
Base Cache implementation. All cache implementations should inherit from this class.
|
|
|
|
Has 4 methods:
|
|
- set_cache
|
|
- get_cache
|
|
- async_set_cache
|
|
- async_get_cache
|
|
"""
|
|
|
|
from typing import Optional
|
|
|
|
|
|
class BaseCache:
|
|
def __init__(self, default_ttl: int = 60):
|
|
self.default_ttl = default_ttl
|
|
|
|
def get_ttl(self, **kwargs) -> Optional[int]:
|
|
if kwargs.get("ttl") is not None:
|
|
return kwargs.get("ttl")
|
|
return self.default_ttl
|
|
|
|
def set_cache(self, key, value, **kwargs):
|
|
raise NotImplementedError
|
|
|
|
async def async_set_cache(self, key, value, **kwargs):
|
|
raise NotImplementedError
|
|
|
|
def get_cache(self, key, **kwargs):
|
|
raise NotImplementedError
|
|
|
|
async def async_get_cache(self, key, **kwargs):
|
|
raise NotImplementedError
|
|
|
|
async def batch_cache_write(self, key, value, **kwargs):
|
|
raise NotImplementedError
|
|
|
|
async def disconnect(self):
|
|
raise NotImplementedError
|