fix(o_series_transformation.py): fix optional param check for o-serie… (#8787)

* fix(o_series_transformation.py): fix optional param check for o-series models

o3-mini and o-1 do not support parallel tool calling

* fix(utils.py): support 'drop_params' for 'thinking' param across models

allows switching to older claude versions (or non-anthropic models) and param to be safely dropped

* fix: fix passing thinking param in optional params

allows dropping thinking_param where not applicable

* test: update old model

* fix(utils.py): fix linting errors

* fix(main.py): add param to acompletion
This commit is contained in:
Krish Dholakia 2025-02-26 12:26:55 -08:00 committed by GitHub
parent aabb5c0df4
commit 017c482d7b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 87 additions and 31 deletions

View file

@ -119,7 +119,10 @@ from litellm.router_utils.get_retry_from_policy import (
reset_retry_policy,
)
from litellm.secret_managers.main import get_secret
from litellm.types.llms.anthropic import ANTHROPIC_API_ONLY_HEADERS
from litellm.types.llms.anthropic import (
ANTHROPIC_API_ONLY_HEADERS,
AnthropicThinkingParam,
)
from litellm.types.llms.openai import (
AllMessageValues,
AllPromptValues,
@ -1969,6 +1972,19 @@ def supports_response_schema(
)
def supports_parallel_function_calling(
model: str, custom_llm_provider: Optional[str] = None
) -> bool:
"""
Check if the given model supports parallel tool calls and return a boolean value.
"""
return _supports_factory(
model=model,
custom_llm_provider=custom_llm_provider,
key="supports_parallel_function_calling",
)
def supports_function_calling(
model: str, custom_llm_provider: Optional[str] = None
) -> bool:
@ -2118,30 +2134,6 @@ def supports_embedding_image_input(
)
def supports_parallel_function_calling(model: str):
"""
Check if the given model supports parallel function calling and return True if it does, False otherwise.
Parameters:
model (str): The model to check for support of parallel function calling.
Returns:
bool: True if the model supports parallel function calling, False otherwise.
Raises:
Exception: If the model is not found in the model_cost dictionary.
"""
if model in litellm.model_cost:
model_info = litellm.model_cost[model]
if model_info.get("supports_parallel_function_calling", False) is True:
return True
return False
else:
raise Exception(
f"Model not supports parallel function calling. You passed model={model}."
)
####### HELPER FUNCTIONS ################
def _update_dictionary(existing_dict: Dict, new_dict: dict) -> dict:
for k, v in new_dict.items():
@ -2752,6 +2744,7 @@ def get_optional_params( # noqa: PLR0915
reasoning_effort=None,
additional_drop_params=None,
messages: Optional[List[AllMessageValues]] = None,
thinking: Optional[AnthropicThinkingParam] = None,
**kwargs,
):
# retrieve all parameters passed to the function
@ -2836,9 +2829,11 @@ def get_optional_params( # noqa: PLR0915
"additional_drop_params": None,
"messages": None,
"reasoning_effort": None,
"thinking": None,
}
# filter out those parameters that were passed with non-default values
non_default_params = {
k: v
for k, v in passed_params.items()