mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(nvidia_nim/embed.py): add 'dimensions' support (#8302)
* fix(nvidia_nim/embed.py): add 'dimensions' support Fixes https://github.com/BerriAI/litellm/issues/8238 * fix(proxy_Server.py): initialize router redis cache if setup on proxy Fixes https://github.com/BerriAI/litellm/issues/6602 * test: add unit testing for new helper function
This commit is contained in:
parent
942446d826
commit
024237077b
5 changed files with 36 additions and 2 deletions
|
@ -1631,7 +1631,7 @@ class ProxyConfig:
|
|||
self,
|
||||
cache_params: dict,
|
||||
):
|
||||
global redis_usage_cache
|
||||
global redis_usage_cache, llm_router
|
||||
from litellm import Cache
|
||||
|
||||
if "default_in_memory_ttl" in cache_params:
|
||||
|
@ -1646,6 +1646,10 @@ class ProxyConfig:
|
|||
## INIT PROXY REDIS USAGE CLIENT ##
|
||||
redis_usage_cache = litellm.cache.cache
|
||||
|
||||
## INIT ROUTER REDIS CACHE ##
|
||||
if llm_router is not None:
|
||||
llm_router._update_redis_cache(cache=redis_usage_cache)
|
||||
|
||||
async def get_config(self, config_file_path: Optional[str] = None) -> dict:
|
||||
"""
|
||||
Load config file
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue