mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix: setting cache responses on proxy
This commit is contained in:
parent
665939bc48
commit
30204497e0
2 changed files with 7 additions and 3 deletions
|
@ -53,7 +53,7 @@ class Router:
|
|||
```
|
||||
"""
|
||||
model_names: List = []
|
||||
cache_responses: Optional[bool] = None
|
||||
cache_responses: Optional[bool] = False
|
||||
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
|
||||
num_retries: int = 0
|
||||
tenacity = None
|
||||
|
@ -65,7 +65,7 @@ class Router:
|
|||
redis_host: Optional[str] = None,
|
||||
redis_port: Optional[int] = None,
|
||||
redis_password: Optional[str] = None,
|
||||
cache_responses: Optional[bool] = None,
|
||||
cache_responses: Optional[bool] = False,
|
||||
cache_kwargs: dict = {}, # additional kwargs to pass to RedisCache (see caching.py)
|
||||
## RELIABILITY ##
|
||||
num_retries: int = 0,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue