mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
Merge f517e5953a
into b82af5b826
This commit is contained in:
commit
6053a53b64
1 changed files with 34 additions and 67 deletions
101
litellm/utils.py
101
litellm/utils.py
|
@ -2979,84 +2979,51 @@ def get_optional_params( # noqa: PLR0915
|
|||
"thinking": None,
|
||||
}
|
||||
|
||||
# filter out those parameters that were passed with non-default values
|
||||
# Parameters that can be supplied by the user that we don't want to include in non-default-params.
|
||||
excluded_non_default_params = {
|
||||
"additional_drop_params",
|
||||
"allowed_openai_params",
|
||||
"api_version",
|
||||
"custom_llm_provider",
|
||||
"drop_params",
|
||||
"messages"
|
||||
"model",
|
||||
}
|
||||
|
||||
# From the parameters passed into this function, filter for parameters with non-default values.
|
||||
non_default_params = {
|
||||
k: v
|
||||
for k, v in passed_params.items()
|
||||
if (
|
||||
k != "model"
|
||||
and k != "custom_llm_provider"
|
||||
and k != "api_version"
|
||||
and k != "drop_params"
|
||||
and k != "allowed_openai_params"
|
||||
and k != "additional_drop_params"
|
||||
and k != "messages"
|
||||
k not in excluded_non_default_params
|
||||
and k in default_params
|
||||
and v != default_params[k]
|
||||
and _should_drop_param(k=k, additional_drop_params=additional_drop_params)
|
||||
is False
|
||||
and not _should_drop_param(k=k, additional_drop_params=additional_drop_params)
|
||||
)
|
||||
}
|
||||
|
||||
## raise exception if function calling passed in for a provider that doesn't support it
|
||||
if (
|
||||
"functions" in non_default_params
|
||||
or "function_call" in non_default_params
|
||||
or "tools" in non_default_params
|
||||
):
|
||||
if (
|
||||
custom_llm_provider == "ollama"
|
||||
and custom_llm_provider != "text-completion-openai"
|
||||
and custom_llm_provider != "azure"
|
||||
and custom_llm_provider != "vertex_ai"
|
||||
and custom_llm_provider != "anyscale"
|
||||
and custom_llm_provider != "together_ai"
|
||||
and custom_llm_provider != "groq"
|
||||
and custom_llm_provider != "nvidia_nim"
|
||||
and custom_llm_provider != "cerebras"
|
||||
and custom_llm_provider != "xai"
|
||||
and custom_llm_provider != "ai21_chat"
|
||||
and custom_llm_provider != "volcengine"
|
||||
and custom_llm_provider != "deepseek"
|
||||
and custom_llm_provider != "codestral"
|
||||
and custom_llm_provider != "mistral"
|
||||
and custom_llm_provider != "anthropic"
|
||||
and custom_llm_provider != "cohere_chat"
|
||||
and custom_llm_provider != "cohere"
|
||||
and custom_llm_provider != "bedrock"
|
||||
and custom_llm_provider != "ollama_chat"
|
||||
and custom_llm_provider != "openrouter"
|
||||
and custom_llm_provider not in litellm.openai_compatible_providers
|
||||
):
|
||||
if custom_llm_provider == "ollama":
|
||||
# ollama actually supports json output
|
||||
optional_params["format"] = "json"
|
||||
litellm.add_function_to_prompt = (
|
||||
True # so that main.py adds the function call to the prompt
|
||||
)
|
||||
if "tools" in non_default_params:
|
||||
optional_params["functions_unsupported_model"] = (
|
||||
non_default_params.pop("tools")
|
||||
)
|
||||
non_default_params.pop(
|
||||
"tool_choice", None
|
||||
) # causes ollama requests to hang
|
||||
elif "functions" in non_default_params:
|
||||
optional_params["functions_unsupported_model"] = (
|
||||
non_default_params.pop("functions")
|
||||
)
|
||||
elif (
|
||||
litellm.add_function_to_prompt
|
||||
): # if user opts to add it to prompt instead
|
||||
optional_params["functions_unsupported_model"] = non_default_params.pop(
|
||||
"tools", non_default_params.pop("functions", None)
|
||||
)
|
||||
else:
|
||||
raise UnsupportedParamsError(
|
||||
status_code=500,
|
||||
message=f"Function calling is not supported by {custom_llm_provider}.",
|
||||
)
|
||||
if any(key in non_default_params for key in ("functions", "function_call", "tools")):
|
||||
functions_unsupported_model_key = "functions_unsupported_model"
|
||||
|
||||
# Handle Ollama as a special case (ollama actually supports JSON output)
|
||||
if custom_llm_provider == "ollama":
|
||||
optional_params["format"] = "json"
|
||||
litellm.add_function_to_prompt = True # so that main.py adds the function call to the prompt
|
||||
non_default_params.pop("tool_choice", None) # causes ollama requests to hang
|
||||
|
||||
# Handle all other providers that are not OpenAI-compatible
|
||||
if litellm.add_function_to_prompt and (custom_llm_provider not in litellm.openai_compatible_providers):
|
||||
# Attempt to add the supplied function call to the prompt, preferring tools > functions > function_call
|
||||
function_call_value = non_default_params.pop("tools",
|
||||
non_default_params.pop("functions",
|
||||
non_default_params.pop("function_call", None)))
|
||||
optional_params[functions_unsupported_model_key] = function_call_value
|
||||
else:
|
||||
raise UnsupportedParamsError(
|
||||
status_code=500,
|
||||
message=f"Function calling is not supported by {custom_llm_provider}.",
|
||||
)
|
||||
|
||||
provider_config: Optional[BaseConfig] = None
|
||||
if custom_llm_provider is not None and custom_llm_provider in [
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue