mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
build: merge branch
This commit is contained in:
parent
f63cf00306
commit
4418e6dd14
9 changed files with 301 additions and 56 deletions
|
@ -629,37 +629,6 @@ class Router:
|
|||
self.aget_messages = self.factory_function(litellm.aget_messages)
|
||||
self.arun_thread = self.factory_function(litellm.arun_thread)
|
||||
|
||||
def validate_fallbacks(self, fallback_param: Optional[List]):
|
||||
"""
|
||||
Validate the fallbacks parameter.
|
||||
"""
|
||||
if fallback_param is None:
|
||||
return
|
||||
for fallback_dict in fallback_param:
|
||||
if not isinstance(fallback_dict, dict):
|
||||
raise ValueError(f"Item '{fallback_dict}' is not a dictionary.")
|
||||
if len(fallback_dict) != 1:
|
||||
raise ValueError(
|
||||
f"Dictionary '{fallback_dict}' must have exactly one key, but has {len(fallback_dict)} keys."
|
||||
)
|
||||
|
||||
def add_optional_pre_call_checks(
|
||||
self, optional_pre_call_checks: Optional[OptionalPreCallChecks]
|
||||
):
|
||||
if optional_pre_call_checks is not None:
|
||||
for pre_call_check in optional_pre_call_checks:
|
||||
_callback: Optional[CustomLogger] = None
|
||||
if pre_call_check == "prompt_caching":
|
||||
_callback = PromptCachingDeploymentCheck(cache=self.cache)
|
||||
elif pre_call_check == "router_budget_limiting":
|
||||
_callback = RouterBudgetLimiting(
|
||||
dual_cache=self.cache,
|
||||
provider_budget_config=self.provider_budget_config,
|
||||
model_list=self.model_list,
|
||||
)
|
||||
if _callback is not None:
|
||||
litellm.logging_callback_manager.add_litellm_callback(_callback)
|
||||
|
||||
def routing_strategy_init(
|
||||
self, routing_strategy: Union[RoutingStrategy, str], routing_strategy_args: dict
|
||||
):
|
||||
|
@ -725,6 +694,37 @@ class Router:
|
|||
else:
|
||||
pass
|
||||
|
||||
def validate_fallbacks(self, fallback_param: Optional[List]):
|
||||
"""
|
||||
Validate the fallbacks parameter.
|
||||
"""
|
||||
if fallback_param is None:
|
||||
return
|
||||
for fallback_dict in fallback_param:
|
||||
if not isinstance(fallback_dict, dict):
|
||||
raise ValueError(f"Item '{fallback_dict}' is not a dictionary.")
|
||||
if len(fallback_dict) != 1:
|
||||
raise ValueError(
|
||||
f"Dictionary '{fallback_dict}' must have exactly one key, but has {len(fallback_dict)} keys."
|
||||
)
|
||||
|
||||
def add_optional_pre_call_checks(
|
||||
self, optional_pre_call_checks: Optional[OptionalPreCallChecks]
|
||||
):
|
||||
if optional_pre_call_checks is not None:
|
||||
for pre_call_check in optional_pre_call_checks:
|
||||
_callback: Optional[CustomLogger] = None
|
||||
if pre_call_check == "prompt_caching":
|
||||
_callback = PromptCachingDeploymentCheck(cache=self.cache)
|
||||
elif pre_call_check == "router_budget_limiting":
|
||||
_callback = RouterBudgetLimiting(
|
||||
dual_cache=self.cache,
|
||||
provider_budget_config=self.provider_budget_config,
|
||||
model_list=self.model_list,
|
||||
)
|
||||
if _callback is not None:
|
||||
litellm.logging_callback_manager.add_litellm_callback(_callback)
|
||||
|
||||
def print_deployment(self, deployment: dict):
|
||||
"""
|
||||
returns a copy of the deployment with the api key masked
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue