feat(utils.py): support dynamically setting 'drop_params'

Allows user to turn this on/off for individual calls by passing in as a completion arg
This commit is contained in:
Krrish Dholakia 2024-06-05 08:37:09 -07:00
parent 31e8b34123
commit 162f9400d2
3 changed files with 71 additions and 24 deletions

View file

@ -39,38 +39,34 @@ This is a list of openai params we translate across providers.
Use `litellm.get_supported_openai_params()` for an updated list of params for each model + provider Use `litellm.get_supported_openai_params()` for an updated list of params for each model + provider
| Provider | temperature | max_tokens | top_p | stream | stop | n | presence_penalty | frequency_penalty | functions | function_call | logit_bias | user | response_format | seed | tools | tool_choice | logprobs | top_logprobs | extra_headers | | Provider | temperature | max_tokens | top_p | stream | stream_options | stop | n | presence_penalty | frequency_penalty | functions | function_call | logit_bias | user | response_format | seed | tools | tool_choice | logprobs | top_logprobs | extra_headers |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--| |---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|Anthropic| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | |✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ |Anthropic| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | |✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ |
|OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ | ✅ | |OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ | ✅ |
|Azure OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | | | ✅ | |Azure OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | | | ✅ |
|Replicate | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | |Replicate | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|Anyscale | ✅ | ✅ | ✅ | ✅ | |Anyscale | ✅ | ✅ | ✅ | ✅ | ✅ |
|Cohere| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | |Cohere| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | |
|Huggingface| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |Huggingface| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|Openrouter| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | ✅ | | | | | |Openrouter| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | ✅ | | | | |
|AI21| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | |AI21| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | |
|VertexAI| ✅ | ✅ | | ✅ | | | | | | | | | | | ✅ | | | |VertexAI| ✅ | ✅ | | ✅ | | | | | | | | | | | ✅ | | |
|Bedrock| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | | | ✅ (for anthropic) | | |Bedrock| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | | | ✅ (for anthropic) | |
|Sagemaker| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |Sagemaker| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|TogetherAI| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | ✅ | |TogetherAI| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | ✅ |
|AlephAlpha| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |AlephAlpha| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|Palm| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |Palm| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|NLP Cloud| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | |NLP Cloud| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|Petals| ✅ | ✅ | | ✅ | | | | | | | |Petals| ✅ | ✅ | | ✅ | ✅ | | | | | |
|Ollama| ✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ | | | | | ✅ | | | |Ollama| ✅ | ✅ | ✅ | ✅ | ✅ | | | ✅ | | | | | ✅ | | |
|Databricks| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | | | | |Databricks| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | | | | | |
|ClarifAI| ✅ | ✅ | | | | | | | | | | | | | | |ClarifAI| ✅ | ✅ | |✅ | ✅ | | | | | | | | | | |
:::note :::note
By default, LiteLLM raises an exception if the openai param being passed in isn't supported. By default, LiteLLM raises an exception if the openai param being passed in isn't supported.
To drop the param instead, set `litellm.drop_params = True`. To drop the param instead, set `litellm.drop_params = True` or `completion(..drop_params=True)`.
**For function calling:**
Add to prompt for non-openai models, set: `litellm.add_function_to_prompt = True`.
::: :::
## Input Params ## Input Params

View file

@ -9,6 +9,7 @@ from litellm.utils import get_optional_params_embeddings, get_optional_params
from litellm.llms.prompt_templates.factory import ( from litellm.llms.prompt_templates.factory import (
map_system_message_pt, map_system_message_pt,
) )
from unittest.mock import patch, MagicMock
from litellm.types.completion import ( from litellm.types.completion import (
ChatCompletionUserMessageParam, ChatCompletionUserMessageParam,
ChatCompletionSystemMessageParam, ChatCompletionSystemMessageParam,
@ -243,3 +244,45 @@ def test_azure_tool_choice(api_version):
), "tool_choice={} for api version={}".format( ), "tool_choice={} for api version={}".format(
optional_params["tool_choice"], api_version optional_params["tool_choice"], api_version
) )
@pytest.mark.parametrize("drop_params", [True, False, None])
def test_dynamic_drop_params(drop_params):
"""
Make a call to cohere w/ drop params = True vs. false.
"""
if drop_params == True:
optional_params = litellm.utils.get_optional_params(
model="command-r",
custom_llm_provider="cohere",
response_format="json",
drop_params=drop_params,
)
else:
try:
optional_params = litellm.utils.get_optional_params(
model="command-r",
custom_llm_provider="cohere",
response_format="json",
drop_params=drop_params,
)
pytest.fail("Expected to fail")
except Exception as e:
pass
def test_dynamic_drop_params_e2e():
with patch("requests.post", new=MagicMock()) as mock_response:
try:
response = litellm.completion(
model="command-r",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
response_format={"key": "value"},
drop_params=True,
)
except Exception as e:
pass
mock_response.assert_called_once()
print(mock_response.call_args.kwargs["data"])
assert "response_format" not in mock_response.call_args.kwargs["data"]

View file

@ -5173,6 +5173,7 @@ def get_optional_params(
top_logprobs=None, top_logprobs=None,
extra_headers=None, extra_headers=None,
api_version=None, api_version=None,
drop_params=None,
**kwargs, **kwargs,
): ):
# retrieve all parameters passed to the function # retrieve all parameters passed to the function
@ -5244,6 +5245,7 @@ def get_optional_params(
"top_logprobs": None, "top_logprobs": None,
"extra_headers": None, "extra_headers": None,
"api_version": None, "api_version": None,
"drop_params": None,
} }
# filter out those parameters that were passed with non-default values # filter out those parameters that were passed with non-default values
non_default_params = { non_default_params = {
@ -5253,6 +5255,7 @@ def get_optional_params(
k != "model" k != "model"
and k != "custom_llm_provider" and k != "custom_llm_provider"
and k != "api_version" and k != "api_version"
and k != "drop_params"
and k in default_params and k in default_params
and v != default_params[k] and v != default_params[k]
) )
@ -5335,7 +5338,12 @@ def get_optional_params(
# Always keeps this in elif code blocks # Always keeps this in elif code blocks
else: else:
unsupported_params[k] = non_default_params[k] unsupported_params[k] = non_default_params[k]
if unsupported_params and not litellm.drop_params: if unsupported_params:
if litellm.drop_params == True or (
drop_params is not None and drop_params == True
):
pass
else:
raise UnsupportedParamsError( raise UnsupportedParamsError(
status_code=500, status_code=500,
message=f"{custom_llm_provider} does not support parameters: {unsupported_params}, for model={model}. To drop these, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\n", message=f"{custom_llm_provider} does not support parameters: {unsupported_params}, for model={model}. To drop these, set `litellm.drop_params=True` or for proxy:\n\n`litellm_settings:\n drop_params: true`\n",