mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(nvidia_nim/embed.py): add 'dimensions' support (#8302)
* fix(nvidia_nim/embed.py): add 'dimensions' support Fixes https://github.com/BerriAI/litellm/issues/8238 * fix(proxy_Server.py): initialize router redis cache if setup on proxy Fixes https://github.com/BerriAI/litellm/issues/6602 * test: add unit testing for new helper function
This commit is contained in:
parent
942446d826
commit
024237077b
5 changed files with 36 additions and 2 deletions
|
@ -573,6 +573,20 @@ class Router:
|
|||
litellm.amoderation, call_type="moderation"
|
||||
)
|
||||
|
||||
def _update_redis_cache(self, cache: RedisCache):
|
||||
"""
|
||||
Update the redis cache for the router, if none set.
|
||||
|
||||
Allows proxy user to just do
|
||||
```yaml
|
||||
litellm_settings:
|
||||
cache: true
|
||||
```
|
||||
and caching to just work.
|
||||
"""
|
||||
if self.cache.redis_cache is None:
|
||||
self.cache.redis_cache = cache
|
||||
|
||||
def initialize_assistants_endpoint(self):
|
||||
## INITIALIZE PASS THROUGH ASSISTANTS ENDPOINT ##
|
||||
self.acreate_assistants = self.factory_function(litellm.acreate_assistants)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue