_update_kwargs_with_default_litellm_params

This commit is contained in:
Ishaan Jaff 2025-04-19 13:52:54 -07:00
parent 99db1b7690
commit c26b915ab1
2 changed files with 30 additions and 55 deletions

View file

@ -4979,35 +4979,6 @@
"supports_tool_choice": true "supports_tool_choice": true
}, },
"gemini-2.5-pro-exp-03-25": { "gemini-2.5-pro-exp-03-25": {
"max_tokens": 65536,
"max_input_tokens": 1048576,
"max_output_tokens": 65536,
"max_images_per_prompt": 3000,
"max_videos_per_prompt": 10,
"max_video_length": 1,
"max_audio_length_hours": 8.4,
"max_audio_per_prompt": 1,
"max_pdf_size_mb": 30,
"input_cost_per_token": 0,
"input_cost_per_token_above_200k_tokens": 0,
"output_cost_per_token": 0,
"output_cost_per_token_above_200k_tokens": 0,
"litellm_provider": "vertex_ai-language-models",
"mode": "chat",
"supports_system_messages": true,
"supports_function_calling": true,
"supports_vision": true,
"supports_audio_input": true,
"supports_video_input": true,
"supports_pdf_input": true,
"supports_response_schema": true,
"supports_tool_choice": true,
"supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
"supported_modalities": ["text", "image", "audio", "video"],
"supported_output_modalities": ["text"],
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
},
"gemini-2.5-pro-preview-03-25": {
"max_tokens": 65536, "max_tokens": 65536,
"max_input_tokens": 1048576, "max_input_tokens": 1048576,
"max_output_tokens": 65536, "max_output_tokens": 65536,

View file

@ -339,9 +339,9 @@ class Router:
) # names of models under litellm_params. ex. azure/chatgpt-v-2 ) # names of models under litellm_params. ex. azure/chatgpt-v-2
self.deployment_latency_map = {} self.deployment_latency_map = {}
### CACHING ### ### CACHING ###
cache_type: Literal[ cache_type: Literal["local", "redis", "redis-semantic", "s3", "disk"] = (
"local", "redis", "redis-semantic", "s3", "disk" "local" # default to an in-memory cache
] = "local" # default to an in-memory cache )
redis_cache = None redis_cache = None
cache_config: Dict[str, Any] = {} cache_config: Dict[str, Any] = {}
@ -562,9 +562,9 @@ class Router:
) )
) )
self.model_group_retry_policy: Optional[ self.model_group_retry_policy: Optional[Dict[str, RetryPolicy]] = (
Dict[str, RetryPolicy] model_group_retry_policy
] = model_group_retry_policy )
self.allowed_fails_policy: Optional[AllowedFailsPolicy] = None self.allowed_fails_policy: Optional[AllowedFailsPolicy] = None
if allowed_fails_policy is not None: if allowed_fails_policy is not None:
@ -1104,17 +1104,21 @@ class Router:
) -> None: ) -> None:
""" """
Adds default litellm params to kwargs, if set. Adds default litellm params to kwargs, if set.
Handles inserting this as either "metadata" or "litellm_metadata" depending on the metadata_variable_name
""" """
self.default_litellm_params[ # 1) copy your defaults and pull out metadata
metadata_variable_name defaults = self.default_litellm_params.copy()
] = self.default_litellm_params.pop("metadata", {}) metadata_defaults = defaults.pop("metadata", {}) or {}
for k, v in self.default_litellm_params.items():
if ( # 2) add any non-metadata defaults that aren't already in kwargs
k not in kwargs and v is not None for key, value in defaults.items():
): # prioritize model-specific params > default router params if value is None:
kwargs[k] = v continue
elif k == metadata_variable_name: kwargs.setdefault(key, value)
kwargs[metadata_variable_name].update(v)
# 3) merge in metadata, this handles inserting this as either "metadata" or "litellm_metadata"
kwargs.setdefault(metadata_variable_name, {}).update(metadata_defaults)
def _handle_clientside_credential( def _handle_clientside_credential(
self, deployment: dict, kwargs: dict self, deployment: dict, kwargs: dict
@ -3243,11 +3247,11 @@ class Router:
if isinstance(e, litellm.ContextWindowExceededError): if isinstance(e, litellm.ContextWindowExceededError):
if context_window_fallbacks is not None: if context_window_fallbacks is not None:
fallback_model_group: Optional[ fallback_model_group: Optional[List[str]] = (
List[str] self._get_fallback_model_group_from_fallbacks(
] = self._get_fallback_model_group_from_fallbacks( fallbacks=context_window_fallbacks,
fallbacks=context_window_fallbacks, model_group=model_group,
model_group=model_group, )
) )
if fallback_model_group is None: if fallback_model_group is None:
raise original_exception raise original_exception
@ -3279,11 +3283,11 @@ class Router:
e.message += "\n{}".format(error_message) e.message += "\n{}".format(error_message)
elif isinstance(e, litellm.ContentPolicyViolationError): elif isinstance(e, litellm.ContentPolicyViolationError):
if content_policy_fallbacks is not None: if content_policy_fallbacks is not None:
fallback_model_group: Optional[ fallback_model_group: Optional[List[str]] = (
List[str] self._get_fallback_model_group_from_fallbacks(
] = self._get_fallback_model_group_from_fallbacks( fallbacks=content_policy_fallbacks,
fallbacks=content_policy_fallbacks, model_group=model_group,
model_group=model_group, )
) )
if fallback_model_group is None: if fallback_model_group is None:
raise original_exception raise original_exception