mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
[Feat] Add litellm.supports_reasoning() util to track if an llm supports reasoning (#9923)
* add supports_reasoning for xai models * add "supports_reasoning": true for o1 series models * add supports_reasoning util * add litellm.supports_reasoning * add supports reasoning for claude 3-7 models * add deepseek as supports reasoning * test_supports_reasoning * add supports reasoning to model group info * add supports_reasoning * docs supports reasoning * fix supports_reasoning test * "supports_reasoning": false, * fix test * supports_reasoning
This commit is contained in:
parent
3afc4fe770
commit
f7dfa264bb
13 changed files with 301 additions and 73 deletions
|
@ -484,7 +484,7 @@ def load_credentials_from_list(kwargs: dict):
|
|||
|
||||
|
||||
def get_dynamic_callbacks(
|
||||
dynamic_callbacks: Optional[List[Union[str, Callable, CustomLogger]]]
|
||||
dynamic_callbacks: Optional[List[Union[str, Callable, CustomLogger]]],
|
||||
) -> List:
|
||||
returned_callbacks = litellm.callbacks.copy()
|
||||
if dynamic_callbacks:
|
||||
|
@ -516,9 +516,9 @@ def function_setup( # noqa: PLR0915
|
|||
function_id: Optional[str] = kwargs["id"] if "id" in kwargs else None
|
||||
|
||||
## DYNAMIC CALLBACKS ##
|
||||
dynamic_callbacks: Optional[
|
||||
List[Union[str, Callable, CustomLogger]]
|
||||
] = kwargs.pop("callbacks", None)
|
||||
dynamic_callbacks: Optional[List[Union[str, Callable, CustomLogger]]] = (
|
||||
kwargs.pop("callbacks", None)
|
||||
)
|
||||
all_callbacks = get_dynamic_callbacks(dynamic_callbacks=dynamic_callbacks)
|
||||
|
||||
if len(all_callbacks) > 0:
|
||||
|
@ -1202,9 +1202,9 @@ def client(original_function): # noqa: PLR0915
|
|||
exception=e,
|
||||
retry_policy=kwargs.get("retry_policy"),
|
||||
)
|
||||
kwargs[
|
||||
"retry_policy"
|
||||
] = reset_retry_policy() # prevent infinite loops
|
||||
kwargs["retry_policy"] = (
|
||||
reset_retry_policy()
|
||||
) # prevent infinite loops
|
||||
litellm.num_retries = (
|
||||
None # set retries to None to prevent infinite loops
|
||||
)
|
||||
|
@ -2229,6 +2229,15 @@ def supports_vision(model: str, custom_llm_provider: Optional[str] = None) -> bo
|
|||
)
|
||||
|
||||
|
||||
def supports_reasoning(model: str, custom_llm_provider: Optional[str] = None) -> bool:
|
||||
"""
|
||||
Check if the given model supports reasoning and return a boolean value.
|
||||
"""
|
||||
return _supports_factory(
|
||||
model=model, custom_llm_provider=custom_llm_provider, key="supports_reasoning"
|
||||
)
|
||||
|
||||
|
||||
def supports_embedding_image_input(
|
||||
model: str, custom_llm_provider: Optional[str] = None
|
||||
) -> bool:
|
||||
|
@ -3004,16 +3013,16 @@ def get_optional_params( # noqa: PLR0915
|
|||
True # so that main.py adds the function call to the prompt
|
||||
)
|
||||
if "tools" in non_default_params:
|
||||
optional_params[
|
||||
"functions_unsupported_model"
|
||||
] = non_default_params.pop("tools")
|
||||
optional_params["functions_unsupported_model"] = (
|
||||
non_default_params.pop("tools")
|
||||
)
|
||||
non_default_params.pop(
|
||||
"tool_choice", None
|
||||
) # causes ollama requests to hang
|
||||
elif "functions" in non_default_params:
|
||||
optional_params[
|
||||
"functions_unsupported_model"
|
||||
] = non_default_params.pop("functions")
|
||||
optional_params["functions_unsupported_model"] = (
|
||||
non_default_params.pop("functions")
|
||||
)
|
||||
elif (
|
||||
litellm.add_function_to_prompt
|
||||
): # if user opts to add it to prompt instead
|
||||
|
@ -3036,10 +3045,10 @@ def get_optional_params( # noqa: PLR0915
|
|||
|
||||
if "response_format" in non_default_params:
|
||||
if provider_config is not None:
|
||||
non_default_params[
|
||||
"response_format"
|
||||
] = provider_config.get_json_schema_from_pydantic_object(
|
||||
response_format=non_default_params["response_format"]
|
||||
non_default_params["response_format"] = (
|
||||
provider_config.get_json_schema_from_pydantic_object(
|
||||
response_format=non_default_params["response_format"]
|
||||
)
|
||||
)
|
||||
else:
|
||||
non_default_params["response_format"] = type_to_response_format_param(
|
||||
|
@ -4055,9 +4064,9 @@ def _count_characters(text: str) -> int:
|
|||
|
||||
|
||||
def get_response_string(response_obj: Union[ModelResponse, ModelResponseStream]) -> str:
|
||||
_choices: Union[
|
||||
List[Union[Choices, StreamingChoices]], List[StreamingChoices]
|
||||
] = response_obj.choices
|
||||
_choices: Union[List[Union[Choices, StreamingChoices]], List[StreamingChoices]] = (
|
||||
response_obj.choices
|
||||
)
|
||||
|
||||
response_str = ""
|
||||
for choice in _choices:
|
||||
|
@ -4597,6 +4606,7 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
"supports_native_streaming", None
|
||||
),
|
||||
supports_web_search=_model_info.get("supports_web_search", False),
|
||||
supports_reasoning=_model_info.get("supports_reasoning", False),
|
||||
search_context_cost_per_query=_model_info.get(
|
||||
"search_context_cost_per_query", None
|
||||
),
|
||||
|
@ -4669,6 +4679,7 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
supports_audio_output: Optional[bool]
|
||||
supports_pdf_input: Optional[bool]
|
||||
supports_web_search: Optional[bool]
|
||||
supports_reasoning: Optional[bool]
|
||||
Raises:
|
||||
Exception: If the model is not mapped yet.
|
||||
|
||||
|
@ -6188,7 +6199,7 @@ def validate_chat_completion_user_messages(messages: List[AllMessageValues]):
|
|||
|
||||
|
||||
def validate_chat_completion_tool_choice(
|
||||
tool_choice: Optional[Union[dict, str]]
|
||||
tool_choice: Optional[Union[dict, str]],
|
||||
) -> Optional[Union[dict, str]]:
|
||||
"""
|
||||
Confirm the tool choice is passed in the OpenAI format.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue