From 886b52d4480495514be7aa9054af01b46b6aa7a6 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 4 Dec 2023 16:59:48 -0800 Subject: [PATCH] (test) init router clients --- litellm/tests/test_router_init.py | 97 ++++++++++++++++++++++--------- 1 file changed, 68 insertions(+), 29 deletions(-) diff --git a/litellm/tests/test_router_init.py b/litellm/tests/test_router_init.py index 8068940817..5422429761 100644 --- a/litellm/tests/test_router_init.py +++ b/litellm/tests/test_router_init.py @@ -1,4 +1,4 @@ -# this tests if the router is intiaized correctly +# this tests if the router is initialized correctly import sys, os, time import traceback, asyncio import pytest @@ -12,40 +12,79 @@ from collections import defaultdict from dotenv import load_dotenv load_dotenv() - -# everytime we load the router we should have 4 clients: +# every time we load the router we should have 4 clients: # Async # Sync # Async + Stream # Sync + Stream - def test_init_clients(): - litellm.set_verbose = True - try: - model_list = [ - { - "model_name": "gpt-3.5-turbo", - "litellm_params": { - "model": "azure/chatgpt-v-2", - "api_key": os.getenv("AZURE_API_KEY"), - "api_version": os.getenv("AZURE_API_VERSION"), - "api_base": os.getenv("AZURE_API_BASE") - }, - }, - ] + litellm.set_verbose = True + try: + print("testing init 4 clients with diff timeouts") + model_list = [ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + "timeout": 0.01, + "stream_timeout": 0.000_001, + "max_retries": 7 + }, + }, + ] + router = Router(model_list=model_list) + for elem in router.model_list: + assert elem["client"] is not None + assert elem["async_client"] is not None + assert elem["stream_client"] is not None + assert elem["stream_async_client"] is not None + + # check if timeout for stream/non stream clients is set correctly + async_client = elem["async_client"] + stream_async_client = elem["stream_async_client"] + + assert async_client.timeout == 0.01 + assert stream_async_client.timeout == 0.000_001 + print("PASSED !") + + except Exception as e: + traceback.print_exc() + pytest.fail(f"Error occurred: {e}") + +test_init_clients() - router = Router(model_list=model_list) - print(router.model_list) - for elem in router.model_list: - print(elem) - assert elem["client"] is not None - assert elem["async_client"] is not None - assert elem["stream_client"] is not None - assert elem["stream_async_client"] is not None +def test_init_clients_basic(): + litellm.set_verbose = True + try: + print("Test basic client init") + model_list = [ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + }, + }, + ] + router = Router(model_list=model_list) + for elem in router.model_list: + assert elem["client"] is not None + assert elem["async_client"] is not None + assert elem["stream_client"] is not None + assert elem["stream_async_client"] is not None + print("PASSED !") + + # see if we can init clients without timeout or max retries set + except Exception as e: + traceback.print_exc() + pytest.fail(f"Error occurred: {e}") + +test_init_clients_basic() - except Exception as e: - traceback.print_exc() - pytest.fail(f"Error occurred: {e}") -# test_init_clients()