mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(nvidia_nim/embed.py): add 'dimensions' support (#8302)
* fix(nvidia_nim/embed.py): add 'dimensions' support Fixes https://github.com/BerriAI/litellm/issues/8238 * fix(proxy_Server.py): initialize router redis cache if setup on proxy Fixes https://github.com/BerriAI/litellm/issues/6602 * test: add unit testing for new helper function
This commit is contained in:
parent
16be203283
commit
5d170162d3
5 changed files with 36 additions and 2 deletions
|
@ -384,3 +384,15 @@ def test_router_get_model_access_groups(potential_access_group, expected_result)
|
|||
model_access_group=potential_access_group
|
||||
)
|
||||
assert access_groups == expected_result
|
||||
|
||||
|
||||
def test_router_redis_cache():
|
||||
router = Router(
|
||||
model_list=[{"model_name": "gemini/*", "litellm_params": {"model": "gemini/*"}}]
|
||||
)
|
||||
|
||||
redis_cache = MagicMock()
|
||||
|
||||
router._update_redis_cache(cache=redis_cache)
|
||||
|
||||
assert router.cache.redis_cache == redis_cache
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue