mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
add allowed_openai_params
This commit is contained in:
parent
1b40f3d1db
commit
05dcf96e6d
2 changed files with 18 additions and 3 deletions
|
@ -1115,6 +1115,7 @@ def completion( # type: ignore # noqa: PLR0915
|
||||||
messages=messages,
|
messages=messages,
|
||||||
reasoning_effort=reasoning_effort,
|
reasoning_effort=reasoning_effort,
|
||||||
thinking=thinking,
|
thinking=thinking,
|
||||||
|
allowed_openai_params=kwargs.get("allowed_openai_params"),
|
||||||
**non_default_params,
|
**non_default_params,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -2839,6 +2839,7 @@ def get_optional_params( # noqa: PLR0915
|
||||||
api_version=None,
|
api_version=None,
|
||||||
parallel_tool_calls=None,
|
parallel_tool_calls=None,
|
||||||
drop_params=None,
|
drop_params=None,
|
||||||
|
allowed_openai_params: Optional[List[str]] = None,
|
||||||
reasoning_effort=None,
|
reasoning_effort=None,
|
||||||
additional_drop_params=None,
|
additional_drop_params=None,
|
||||||
messages: Optional[List[AllMessageValues]] = None,
|
messages: Optional[List[AllMessageValues]] = None,
|
||||||
|
@ -2924,6 +2925,7 @@ def get_optional_params( # noqa: PLR0915
|
||||||
"api_version": None,
|
"api_version": None,
|
||||||
"parallel_tool_calls": None,
|
"parallel_tool_calls": None,
|
||||||
"drop_params": None,
|
"drop_params": None,
|
||||||
|
"allowed_openai_params": None,
|
||||||
"additional_drop_params": None,
|
"additional_drop_params": None,
|
||||||
"messages": None,
|
"messages": None,
|
||||||
"reasoning_effort": None,
|
"reasoning_effort": None,
|
||||||
|
@ -2940,6 +2942,7 @@ def get_optional_params( # noqa: PLR0915
|
||||||
and k != "custom_llm_provider"
|
and k != "custom_llm_provider"
|
||||||
and k != "api_version"
|
and k != "api_version"
|
||||||
and k != "drop_params"
|
and k != "drop_params"
|
||||||
|
and k != "allowed_openai_params"
|
||||||
and k != "additional_drop_params"
|
and k != "additional_drop_params"
|
||||||
and k != "messages"
|
and k != "messages"
|
||||||
and k in default_params
|
and k in default_params
|
||||||
|
@ -3048,7 +3051,14 @@ def get_optional_params( # noqa: PLR0915
|
||||||
new_parameters.pop("additionalProperties", None)
|
new_parameters.pop("additionalProperties", None)
|
||||||
tool_function["parameters"] = new_parameters
|
tool_function["parameters"] = new_parameters
|
||||||
|
|
||||||
def _check_valid_arg(supported_params: List[str]):
|
def _check_valid_arg(supported_params: List[str], allowed_openai_params: List[str]):
|
||||||
|
"""
|
||||||
|
Check if the params passed to completion() are supported by the provider
|
||||||
|
|
||||||
|
Args:
|
||||||
|
supported_params: List[str] - supported params from the litellm config
|
||||||
|
allowed_openai_params: List[str] - use can override the allowed_openai_params for a model by passing `allowed_openai_params`
|
||||||
|
"""
|
||||||
verbose_logger.info(
|
verbose_logger.info(
|
||||||
f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}"
|
f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}"
|
||||||
)
|
)
|
||||||
|
@ -3058,6 +3068,7 @@ def get_optional_params( # noqa: PLR0915
|
||||||
verbose_logger.debug(
|
verbose_logger.debug(
|
||||||
f"\nLiteLLM: Non-Default params passed to completion() {non_default_params}"
|
f"\nLiteLLM: Non-Default params passed to completion() {non_default_params}"
|
||||||
)
|
)
|
||||||
|
supported_params = supported_params + allowed_openai_params
|
||||||
unsupported_params = {}
|
unsupported_params = {}
|
||||||
for k in non_default_params.keys():
|
for k in non_default_params.keys():
|
||||||
if k not in supported_params:
|
if k not in supported_params:
|
||||||
|
@ -3082,7 +3093,7 @@ def get_optional_params( # noqa: PLR0915
|
||||||
else:
|
else:
|
||||||
raise UnsupportedParamsError(
|
raise UnsupportedParamsError(
|
||||||
status_code=500,
|
status_code=500,
|
||||||
message=f"{custom_llm_provider} does not support parameters: {unsupported_params}, for model={model}. To drop these, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\n",
|
message=f"{custom_llm_provider} does not support parameters: {list(unsupported_params.keys())}, for model={model}. To drop these, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\n. \n If you want to use these params dynamically send allowed_openai_params={list(unsupported_params.keys())} in your request.",
|
||||||
)
|
)
|
||||||
|
|
||||||
supported_params = get_supported_openai_params(
|
supported_params = get_supported_openai_params(
|
||||||
|
@ -3092,7 +3103,10 @@ def get_optional_params( # noqa: PLR0915
|
||||||
supported_params = get_supported_openai_params(
|
supported_params = get_supported_openai_params(
|
||||||
model=model, custom_llm_provider="openai"
|
model=model, custom_llm_provider="openai"
|
||||||
)
|
)
|
||||||
_check_valid_arg(supported_params=supported_params or [])
|
_check_valid_arg(
|
||||||
|
supported_params=supported_params or [],
|
||||||
|
allowed_openai_params=allowed_openai_params or [],
|
||||||
|
)
|
||||||
## raise exception if provider doesn't support passed in param
|
## raise exception if provider doesn't support passed in param
|
||||||
if custom_llm_provider == "anthropic":
|
if custom_llm_provider == "anthropic":
|
||||||
## check if unsupported param passed in
|
## check if unsupported param passed in
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue