mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
add supports_reasoning
This commit is contained in:
parent
02c265181c
commit
8a40fa0f56
1 changed files with 25 additions and 20 deletions
|
@ -339,9 +339,9 @@ class Router:
|
||||||
) # names of models under litellm_params. ex. azure/chatgpt-v-2
|
) # names of models under litellm_params. ex. azure/chatgpt-v-2
|
||||||
self.deployment_latency_map = {}
|
self.deployment_latency_map = {}
|
||||||
### CACHING ###
|
### CACHING ###
|
||||||
cache_type: Literal[
|
cache_type: Literal["local", "redis", "redis-semantic", "s3", "disk"] = (
|
||||||
"local", "redis", "redis-semantic", "s3", "disk"
|
"local" # default to an in-memory cache
|
||||||
] = "local" # default to an in-memory cache
|
)
|
||||||
redis_cache = None
|
redis_cache = None
|
||||||
cache_config: Dict[str, Any] = {}
|
cache_config: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
@ -562,9 +562,9 @@ class Router:
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_group_retry_policy: Optional[
|
self.model_group_retry_policy: Optional[Dict[str, RetryPolicy]] = (
|
||||||
Dict[str, RetryPolicy]
|
model_group_retry_policy
|
||||||
] = model_group_retry_policy
|
)
|
||||||
|
|
||||||
self.allowed_fails_policy: Optional[AllowedFailsPolicy] = None
|
self.allowed_fails_policy: Optional[AllowedFailsPolicy] = None
|
||||||
if allowed_fails_policy is not None:
|
if allowed_fails_policy is not None:
|
||||||
|
@ -619,7 +619,7 @@ class Router:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_redis_cache(
|
def _create_redis_cache(
|
||||||
cache_config: Dict[str, Any]
|
cache_config: Dict[str, Any],
|
||||||
) -> Union[RedisCache, RedisClusterCache]:
|
) -> Union[RedisCache, RedisClusterCache]:
|
||||||
"""
|
"""
|
||||||
Initializes either a RedisCache or RedisClusterCache based on the cache_config.
|
Initializes either a RedisCache or RedisClusterCache based on the cache_config.
|
||||||
|
@ -1099,9 +1099,9 @@ class Router:
|
||||||
"""
|
"""
|
||||||
Adds default litellm params to kwargs, if set.
|
Adds default litellm params to kwargs, if set.
|
||||||
"""
|
"""
|
||||||
self.default_litellm_params[
|
self.default_litellm_params[metadata_variable_name] = (
|
||||||
metadata_variable_name
|
self.default_litellm_params.pop("metadata", {})
|
||||||
] = self.default_litellm_params.pop("metadata", {})
|
)
|
||||||
for k, v in self.default_litellm_params.items():
|
for k, v in self.default_litellm_params.items():
|
||||||
if (
|
if (
|
||||||
k not in kwargs and v is not None
|
k not in kwargs and v is not None
|
||||||
|
@ -3217,12 +3217,12 @@ class Router:
|
||||||
|
|
||||||
if isinstance(e, litellm.ContextWindowExceededError):
|
if isinstance(e, litellm.ContextWindowExceededError):
|
||||||
if context_window_fallbacks is not None:
|
if context_window_fallbacks is not None:
|
||||||
fallback_model_group: Optional[
|
fallback_model_group: Optional[List[str]] = (
|
||||||
List[str]
|
self._get_fallback_model_group_from_fallbacks(
|
||||||
] = self._get_fallback_model_group_from_fallbacks(
|
|
||||||
fallbacks=context_window_fallbacks,
|
fallbacks=context_window_fallbacks,
|
||||||
model_group=model_group,
|
model_group=model_group,
|
||||||
)
|
)
|
||||||
|
)
|
||||||
if fallback_model_group is None:
|
if fallback_model_group is None:
|
||||||
raise original_exception
|
raise original_exception
|
||||||
|
|
||||||
|
@ -3253,12 +3253,12 @@ class Router:
|
||||||
e.message += "\n{}".format(error_message)
|
e.message += "\n{}".format(error_message)
|
||||||
elif isinstance(e, litellm.ContentPolicyViolationError):
|
elif isinstance(e, litellm.ContentPolicyViolationError):
|
||||||
if content_policy_fallbacks is not None:
|
if content_policy_fallbacks is not None:
|
||||||
fallback_model_group: Optional[
|
fallback_model_group: Optional[List[str]] = (
|
||||||
List[str]
|
self._get_fallback_model_group_from_fallbacks(
|
||||||
] = self._get_fallback_model_group_from_fallbacks(
|
|
||||||
fallbacks=content_policy_fallbacks,
|
fallbacks=content_policy_fallbacks,
|
||||||
model_group=model_group,
|
model_group=model_group,
|
||||||
)
|
)
|
||||||
|
)
|
||||||
if fallback_model_group is None:
|
if fallback_model_group is None:
|
||||||
raise original_exception
|
raise original_exception
|
||||||
|
|
||||||
|
@ -5020,6 +5020,11 @@ class Router:
|
||||||
and model_info["supports_web_search"] is True # type: ignore
|
and model_info["supports_web_search"] is True # type: ignore
|
||||||
):
|
):
|
||||||
model_group_info.supports_web_search = True
|
model_group_info.supports_web_search = True
|
||||||
|
if (
|
||||||
|
model_info.get("supports_reasoning", None) is not None
|
||||||
|
and model_info["supports_reasoning"] is True # type: ignore
|
||||||
|
):
|
||||||
|
model_group_info.supports_reasoning = True
|
||||||
if (
|
if (
|
||||||
model_info.get("supported_openai_params", None) is not None
|
model_info.get("supported_openai_params", None) is not None
|
||||||
and model_info["supported_openai_params"] is not None
|
and model_info["supported_openai_params"] is not None
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue