mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(feat) redis-semantic cache on proxy
This commit is contained in:
parent
c1e9041506
commit
755f44613d
2 changed files with 5 additions and 1 deletions
|
@ -1178,7 +1178,7 @@ class ProxyConfig:
|
|||
|
||||
verbose_proxy_logger.debug(f"passed cache type={cache_type}")
|
||||
|
||||
if cache_type == "redis":
|
||||
if cache_type == "redis" or cache_type == "redis-semantic":
|
||||
cache_host = litellm.get_secret("REDIS_HOST", None)
|
||||
cache_port = litellm.get_secret("REDIS_PORT", None)
|
||||
cache_password = litellm.get_secret("REDIS_PASSWORD", None)
|
||||
|
@ -1205,6 +1205,9 @@ class ProxyConfig:
|
|||
f"{blue_color_code}Cache Password:{reset_color_code} {cache_password}"
|
||||
)
|
||||
print() # noqa
|
||||
if cache_type == "redis-semantic":
|
||||
# by default this should always be async
|
||||
cache_params.update({"redis_semantic_cache_use_async": True})
|
||||
|
||||
# users can pass os.environ/ variables on the proxy - we should read them from the env
|
||||
for key, value in cache_params.items():
|
||||
|
|
|
@ -10,6 +10,7 @@ gunicorn==21.2.0 # server dep
|
|||
boto3==1.28.58 # aws bedrock/sagemaker calls
|
||||
redis==4.6.0 # caching
|
||||
redisvl==0.0.7 # semantic caching
|
||||
numpy==1.24.3 # semantic caching
|
||||
prisma==0.11.0 # for db
|
||||
mangum==0.17.0 # for aws lambda functions
|
||||
google-generativeai==0.3.2 # for vertex ai calls
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue