fix(router.py): support multiple orgs in 1 model definition

Closes https://github.com/BerriAI/litellm/issues/3949
This commit is contained in:
Krrish Dholakia 2024-06-18 19:36:58 -07:00
parent 83b97d9763
commit 14b66c3daa
3 changed files with 86 additions and 27 deletions

View file

@ -1,24 +1,54 @@
#### What this tests ####
# This tests litellm router
import sys, os, time, openai
import traceback, asyncio
import asyncio
import os
import sys
import time
import traceback
import openai
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import os
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
import httpx
from dotenv import load_dotenv
import litellm
from litellm import Router
from litellm.router import Deployment, LiteLLM_Params, ModelInfo
from concurrent.futures import ThreadPoolExecutor
from collections import defaultdict
from dotenv import load_dotenv
import os, httpx
load_dotenv()
def test_router_multi_org_list():
"""
Pass list of orgs in 1 model definition,
expect a unique deployment for each to be created
"""
router = litellm.Router(
model_list=[
{
"model_name": "*",
"litellm_params": {
"model": "openai/*",
"api_key": "my-key",
"api_base": "https://api.openai.com/v1",
"organization": ["org-1", "org-2", "org-3"],
},
}
]
)
assert len(router.get_model_list()) == 3
def test_router_sensitive_keys():
try:
router = Router(
@ -527,9 +557,10 @@ def test_router_context_window_fallback():
- Send a 5k prompt
- Assert it works
"""
from large_text import text
import os
from large_text import text
litellm.set_verbose = False
print(f"len(text): {len(text)}")
@ -577,9 +608,10 @@ async def test_async_router_context_window_fallback():
- Send a 5k prompt
- Assert it works
"""
from large_text import text
import os
from large_text import text
litellm.set_verbose = False
print(f"len(text): {len(text)}")
@ -660,9 +692,10 @@ def test_router_context_window_check_pre_call_check_in_group():
- Send a 5k prompt
- Assert it works
"""
from large_text import text
import os
from large_text import text
litellm.set_verbose = False
print(f"len(text): {len(text)}")
@ -708,9 +741,10 @@ def test_router_context_window_check_pre_call_check_out_group():
- Send a 5k prompt
- Assert it works
"""
from large_text import text
import os
from large_text import text
litellm.set_verbose = False
print(f"len(text): {len(text)}")
@ -1536,9 +1570,10 @@ def test_router_anthropic_key_dynamic():
def test_router_timeout():
litellm.set_verbose = True
from litellm._logging import verbose_logger
import logging
from litellm._logging import verbose_logger
verbose_logger.setLevel(logging.DEBUG)
model_list = [
{