fix(router.py): comment out azure/openai client init - not necessary

This commit is contained in:
Krrish Dholakia 2025-03-10 16:47:43 -07:00
parent 1146bc0b1a
commit 1e89fdec99
3 changed files with 19 additions and 10 deletions

View file

@ -1994,8 +1994,8 @@
"max_tokens": 8191, "max_tokens": 8191,
"max_input_tokens": 32000, "max_input_tokens": 32000,
"max_output_tokens": 8191, "max_output_tokens": 8191,
"input_cost_per_token": 0.000001, "input_cost_per_token": 0.0000001,
"output_cost_per_token": 0.000003, "output_cost_per_token": 0.0000003,
"litellm_provider": "mistral", "litellm_provider": "mistral",
"supports_function_calling": true, "supports_function_calling": true,
"mode": "chat", "mode": "chat",
@ -2006,8 +2006,8 @@
"max_tokens": 8191, "max_tokens": 8191,
"max_input_tokens": 32000, "max_input_tokens": 32000,
"max_output_tokens": 8191, "max_output_tokens": 8191,
"input_cost_per_token": 0.000001, "input_cost_per_token": 0.0000001,
"output_cost_per_token": 0.000003, "output_cost_per_token": 0.0000003,
"litellm_provider": "mistral", "litellm_provider": "mistral",
"supports_function_calling": true, "supports_function_calling": true,
"mode": "chat", "mode": "chat",

View file

@ -1,4 +1,13 @@
model_list: model_list:
- model_name: llama3.2-vision - model_name: gpt-4o
litellm_params: litellm_params:
model: ollama/llama3.2-vision model: azure/gpt-4o
credential_name: default_azure_credential
credential_list:
- credential_name: default_azure_credential
credentials:
api_key: os.environ/AZURE_API_KEY
api_base: os.environ/AZURE_API_BASE
credential_info:
description: "Default Azure credential"

View file

@ -4373,10 +4373,10 @@ class Router:
if custom_llm_provider not in litellm.provider_list: if custom_llm_provider not in litellm.provider_list:
raise Exception(f"Unsupported provider - {custom_llm_provider}") raise Exception(f"Unsupported provider - {custom_llm_provider}")
# init OpenAI, Azure clients # # init OpenAI, Azure clients
InitalizeOpenAISDKClient.set_client( # InitalizeOpenAISDKClient.set_client(
litellm_router_instance=self, model=deployment.to_json(exclude_none=True) # litellm_router_instance=self, model=deployment.to_json(exclude_none=True)
) # )
self._initialize_deployment_for_pass_through( self._initialize_deployment_for_pass_through(
deployment=deployment, deployment=deployment,