mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(test) init router clients
This commit is contained in:
parent
cba98cf530
commit
886b52d448
1 changed files with 68 additions and 29 deletions
|
@ -1,4 +1,4 @@
|
|||
# this tests if the router is intiaized correctly
|
||||
# this tests if the router is initialized correctly
|
||||
import sys, os, time
|
||||
import traceback, asyncio
|
||||
import pytest
|
||||
|
@ -12,40 +12,79 @@ from collections import defaultdict
|
|||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
|
||||
# everytime we load the router we should have 4 clients:
|
||||
# every time we load the router we should have 4 clients:
|
||||
# Async
|
||||
# Sync
|
||||
# Async + Stream
|
||||
# Sync + Stream
|
||||
|
||||
|
||||
def test_init_clients():
|
||||
litellm.set_verbose = True
|
||||
try:
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "azure/chatgpt-v-2",
|
||||
"api_key": os.getenv("AZURE_API_KEY"),
|
||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||
"api_base": os.getenv("AZURE_API_BASE")
|
||||
},
|
||||
},
|
||||
]
|
||||
litellm.set_verbose = True
|
||||
try:
|
||||
print("testing init 4 clients with diff timeouts")
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "azure/chatgpt-v-2",
|
||||
"api_key": os.getenv("AZURE_API_KEY"),
|
||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||
"api_base": os.getenv("AZURE_API_BASE"),
|
||||
"timeout": 0.01,
|
||||
"stream_timeout": 0.000_001,
|
||||
"max_retries": 7
|
||||
},
|
||||
},
|
||||
]
|
||||
router = Router(model_list=model_list)
|
||||
for elem in router.model_list:
|
||||
assert elem["client"] is not None
|
||||
assert elem["async_client"] is not None
|
||||
assert elem["stream_client"] is not None
|
||||
assert elem["stream_async_client"] is not None
|
||||
|
||||
# check if timeout for stream/non stream clients is set correctly
|
||||
async_client = elem["async_client"]
|
||||
stream_async_client = elem["stream_async_client"]
|
||||
|
||||
assert async_client.timeout == 0.01
|
||||
assert stream_async_client.timeout == 0.000_001
|
||||
print("PASSED !")
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_init_clients()
|
||||
|
||||
|
||||
router = Router(model_list=model_list)
|
||||
print(router.model_list)
|
||||
for elem in router.model_list:
|
||||
print(elem)
|
||||
assert elem["client"] is not None
|
||||
assert elem["async_client"] is not None
|
||||
assert elem["stream_client"] is not None
|
||||
assert elem["stream_async_client"] is not None
|
||||
def test_init_clients_basic():
|
||||
litellm.set_verbose = True
|
||||
try:
|
||||
print("Test basic client init")
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "azure/chatgpt-v-2",
|
||||
"api_key": os.getenv("AZURE_API_KEY"),
|
||||
"api_version": os.getenv("AZURE_API_VERSION"),
|
||||
"api_base": os.getenv("AZURE_API_BASE"),
|
||||
},
|
||||
},
|
||||
]
|
||||
router = Router(model_list=model_list)
|
||||
for elem in router.model_list:
|
||||
assert elem["client"] is not None
|
||||
assert elem["async_client"] is not None
|
||||
assert elem["stream_client"] is not None
|
||||
assert elem["stream_async_client"] is not None
|
||||
print("PASSED !")
|
||||
|
||||
# see if we can init clients without timeout or max retries set
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_init_clients_basic()
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_init_clients()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue