(refactor) litellm.Router client initialization utils (#6394)

* refactor InitalizeOpenAISDKClient

* use helper func for _should_create_openai_sdk_client_for_model

* use static methods for set client on litellm router

* reduce LOC in _get_client_initialization_params

* fix _should_create_openai_sdk_client_for_model

* code quality fix

* test test_should_create_openai_sdk_client_for_model

* test test_get_client_initialization_params_openai

* fix mypy linting errors

* fix OpenAISDKClientInitializationParams

* test_get_client_initialization_params_all_env_vars

* test_get_client_initialization_params_azure_ai_studio_mistral

* test_get_client_initialization_params_default_values

* fix _get_client_initialization_params
This commit is contained in:
Ishaan Jaff 2024-10-23 17:33:19 +05:30 committed by GitHub
parent 3991d75511
commit b70147f63b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 882 additions and 476 deletions

View file

@ -63,10 +63,7 @@ from litellm.router_utils.batch_utils import (
_get_router_metadata_variable_name,
replace_model_in_jsonl,
)
from litellm.router_utils.client_initalization_utils import (
set_client,
should_initialize_sync_client,
)
from litellm.router_utils.client_initalization_utils import InitalizeOpenAISDKClient
from litellm.router_utils.cooldown_cache import CooldownCache
from litellm.router_utils.cooldown_callbacks import router_cooldown_event_callback
from litellm.router_utils.cooldown_handlers import (
@ -3951,7 +3948,7 @@ class Router:
raise Exception(f"Unsupported provider - {custom_llm_provider}")
# init OpenAI, Azure clients
set_client(
InitalizeOpenAISDKClient.set_client(
litellm_router_instance=self, model=deployment.to_json(exclude_none=True)
)
@ -4661,7 +4658,9 @@ class Router:
"""
Re-initialize the client
"""
set_client(litellm_router_instance=self, model=deployment)
InitalizeOpenAISDKClient.set_client(
litellm_router_instance=self, model=deployment
)
client = self.cache.get_cache(key=cache_key, local_only=True)
return client
else:
@ -4671,7 +4670,9 @@ class Router:
"""
Re-initialize the client
"""
set_client(litellm_router_instance=self, model=deployment)
InitalizeOpenAISDKClient.set_client(
litellm_router_instance=self, model=deployment
)
client = self.cache.get_cache(key=cache_key, local_only=True)
return client
else:
@ -4682,7 +4683,9 @@ class Router:
"""
Re-initialize the client
"""
set_client(litellm_router_instance=self, model=deployment)
InitalizeOpenAISDKClient.set_client(
litellm_router_instance=self, model=deployment
)
client = self.cache.get_cache(key=cache_key)
return client
else:
@ -4692,7 +4695,9 @@ class Router:
"""
Re-initialize the client
"""
set_client(litellm_router_instance=self, model=deployment)
InitalizeOpenAISDKClient.set_client(
litellm_router_instance=self, model=deployment
)
client = self.cache.get_cache(key=cache_key)
return client

File diff suppressed because it is too large Load diff

View file

@ -17,6 +17,10 @@ from dotenv import load_dotenv
import litellm
from litellm import Router
from litellm.router_utils.client_initalization_utils import (
InitalizeOpenAISDKClient,
OpenAISDKClientInitializationParams,
)
load_dotenv()
@ -696,3 +700,283 @@ def test_init_router_with_supported_environments(environment, expected_models):
assert set(_model_list) == set(expected_models)
os.environ.pop("LITELLM_ENVIRONMENT")
def test_should_initialize_sync_client():
from litellm.types.router import RouterGeneralSettings
# Test case 1: Router instance is None
assert InitalizeOpenAISDKClient.should_initialize_sync_client(None) is False
# Test case 2: Router instance without router_general_settings
router = Router(model_list=[])
assert InitalizeOpenAISDKClient.should_initialize_sync_client(router) is True
# Test case 3: Router instance with async_only_mode = False
router = Router(
model_list=[],
router_general_settings=RouterGeneralSettings(async_only_mode=False),
)
assert InitalizeOpenAISDKClient.should_initialize_sync_client(router) is True
# Test case 4: Router instance with async_only_mode = True
router = Router(
model_list=[],
router_general_settings=RouterGeneralSettings(async_only_mode=True),
)
assert InitalizeOpenAISDKClient.should_initialize_sync_client(router) is False
# Test case 5: Router instance with router_general_settings but without async_only_mode
router = Router(model_list=[], router_general_settings=RouterGeneralSettings())
assert InitalizeOpenAISDKClient.should_initialize_sync_client(router) is True
print("All test cases passed!")
@pytest.mark.parametrize(
"model_name, custom_llm_provider, expected_result",
[
("gpt-3.5-turbo", None, True), # OpenAI chat completion model
("text-embedding-ada-002", None, True), # OpenAI embedding model
("claude-2", None, False), # Non-OpenAI model
("gpt-3.5-turbo", "azure", True), # Azure OpenAI
("text-davinci-003", "azure_text", True), # Azure OpenAI
("gpt-3.5-turbo", "openai", True), # OpenAI
("custom-model", "custom_openai", True), # Custom OpenAI compatible
("text-davinci-003", "text-completion-openai", True), # OpenAI text completion
(
"ft:gpt-3.5-turbo-0613:my-org:custom-model:7p4lURel",
None,
True,
), # Fine-tuned GPT model
("mistral-7b", "huggingface", False), # Non-OpenAI provider
("custom-model", "anthropic", False), # Non-OpenAI compatible provider
],
)
def test_should_create_openai_sdk_client_for_model(
model_name, custom_llm_provider, expected_result
):
result = InitalizeOpenAISDKClient._should_create_openai_sdk_client_for_model(
model_name, custom_llm_provider
)
assert (
result == expected_result
), f"Failed for model: {model_name}, provider: {custom_llm_provider}"
def test_should_create_openai_sdk_client_for_model_openai_compatible_providers():
# Test with a known OpenAI compatible provider
assert InitalizeOpenAISDKClient._should_create_openai_sdk_client_for_model(
"custom-model", "groq"
), "Should return True for OpenAI compatible provider"
# Add a new compatible provider and test
litellm.openai_compatible_providers.append("new_provider")
assert InitalizeOpenAISDKClient._should_create_openai_sdk_client_for_model(
"custom-model", "new_provider"
), "Should return True for newly added OpenAI compatible provider"
# Clean up
litellm.openai_compatible_providers.remove("new_provider")
def test_get_client_initialization_params_openai():
"""Test basic OpenAI configuration with direct parameter passing."""
model = {}
model_name = "gpt-3.5-turbo"
custom_llm_provider = None
litellm_params = {"api_key": "sk-openai-key", "timeout": 30, "max_retries": 3}
default_api_key = None
default_api_base = None
result = InitalizeOpenAISDKClient._get_client_initialization_params(
model=model,
model_name=model_name,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
default_api_key=default_api_key,
default_api_base=default_api_base,
)
assert isinstance(result, OpenAISDKClientInitializationParams)
assert result.api_key == "sk-openai-key"
assert result.timeout == 30
assert result.max_retries == 3
assert result.model_name == "gpt-3.5-turbo"
def test_get_client_initialization_params_azure():
"""Test Azure OpenAI configuration with specific Azure parameters."""
model = {}
model_name = "azure/gpt-4"
custom_llm_provider = "azure"
litellm_params = {
"api_key": "azure-key",
"api_base": "https://example.azure.openai.com",
"api_version": "2023-05-15",
}
default_api_key = None
default_api_base = None
result = InitalizeOpenAISDKClient._get_client_initialization_params(
model=model,
model_name=model_name,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
default_api_key=default_api_key,
default_api_base=default_api_base,
)
assert result.api_key == "azure-key"
assert result.api_base == "https://example.azure.openai.com"
assert result.api_version == "2023-05-15"
assert result.custom_llm_provider == "azure"
def test_get_client_initialization_params_environment_variable_parsing():
"""Test parsing of environment variables for configuration."""
os.environ["UNIQUE_OPENAI_API_KEY"] = "env-openai-key"
os.environ["UNIQUE_TIMEOUT"] = "45"
model = {}
model_name = "gpt-4"
custom_llm_provider = None
litellm_params = {
"api_key": "os.environ/UNIQUE_OPENAI_API_KEY",
"timeout": "os.environ/UNIQUE_TIMEOUT",
"organization": "os.environ/UNIQUE_ORG_ID",
}
default_api_key = None
default_api_base = None
result = InitalizeOpenAISDKClient._get_client_initialization_params(
model=model,
model_name=model_name,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
default_api_key=default_api_key,
default_api_base=default_api_base,
)
assert result.api_key == "env-openai-key"
assert result.timeout == 45.0
assert result.organization is None # Since ORG_ID is not set in the environment
def test_get_client_initialization_params_azure_ai_studio_mistral():
"""
Test configuration for Azure AI Studio Mistral model.
- /v1/ is added to the api_base if it is not present
- custom_llm_provider is set to openai (Azure AI Studio Mistral models need to use OpenAI route)
"""
model = {}
model_name = "azure/mistral-large-latest"
custom_llm_provider = "azure"
litellm_params = {
"api_key": "azure-key",
"api_base": "https://example.azure.openai.com",
}
default_api_key = None
default_api_base = None
result = InitalizeOpenAISDKClient._get_client_initialization_params(
model,
model_name,
custom_llm_provider,
litellm_params,
default_api_key,
default_api_base,
)
assert result.custom_llm_provider == "openai"
assert result.model_name == "mistral-large-latest"
assert result.api_base == "https://example.azure.openai.com/v1/"
def test_get_client_initialization_params_default_values():
"""
Test use of default values when specific parameters are not provided.
This is used typically for OpenAI compatible providers - example Together AI
"""
model = {}
model_name = "together/meta-llama-3.1-8b-instruct"
custom_llm_provider = None
litellm_params = {}
default_api_key = "together-api-key"
default_api_base = "https://together.xyz/api.openai.com"
result = InitalizeOpenAISDKClient._get_client_initialization_params(
model=model,
model_name=model_name,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
default_api_key=default_api_key,
default_api_base=default_api_base,
)
assert result.api_key == "together-api-key"
assert result.api_base == "https://together.xyz/api.openai.com"
assert result.timeout == litellm.request_timeout
assert result.max_retries == 0
def test_get_client_initialization_params_all_env_vars():
# Set up environment variables
os.environ["TEST_API_KEY"] = "test-api-key"
os.environ["TEST_API_BASE"] = "https://test.openai.com"
os.environ["TEST_API_VERSION"] = "2023-05-15"
os.environ["TEST_TIMEOUT"] = "30"
os.environ["TEST_STREAM_TIMEOUT"] = "60"
os.environ["TEST_MAX_RETRIES"] = "3"
os.environ["TEST_ORGANIZATION"] = "test-org"
model = {}
model_name = "gpt-4"
custom_llm_provider = None
litellm_params = {
"api_key": "os.environ/TEST_API_KEY",
"api_base": "os.environ/TEST_API_BASE",
"api_version": "os.environ/TEST_API_VERSION",
"timeout": "os.environ/TEST_TIMEOUT",
"stream_timeout": "os.environ/TEST_STREAM_TIMEOUT",
"max_retries": "os.environ/TEST_MAX_RETRIES",
"organization": "os.environ/TEST_ORGANIZATION",
}
default_api_key = None
default_api_base = None
result = InitalizeOpenAISDKClient._get_client_initialization_params(
model=model,
model_name=model_name,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
default_api_key=default_api_key,
default_api_base=default_api_base,
)
assert isinstance(result, OpenAISDKClientInitializationParams)
assert result.api_key == "test-api-key"
assert result.api_base == "https://test.openai.com"
assert result.api_version == "2023-05-15"
assert result.timeout == 30.0
assert result.stream_timeout == 60.0
assert result.max_retries == 3
assert result.organization == "test-org"
assert result.model_name == "gpt-4"
assert result.custom_llm_provider is None
# Clean up environment variables
for key in [
"TEST_API_KEY",
"TEST_API_BASE",
"TEST_API_VERSION",
"TEST_TIMEOUT",
"TEST_STREAM_TIMEOUT",
"TEST_MAX_RETRIES",
"TEST_ORGANIZATION",
]:
os.environ.pop(key)