litellm-mirror/litellm/tests/test_optional_params.py
Krish Dholakia f9e6507cd1
LiteLLM Minor Fixes + Improvements (#5474)
* feat(proxy/_types.py): add lago billing to callbacks ui

Closes https://github.com/BerriAI/litellm/issues/5472

* fix(anthropic.py): return anthropic prompt caching information

Fixes https://github.com/BerriAI/litellm/issues/5364

* feat(bedrock/chat.py): support 'json_schema' for bedrock models

Closes https://github.com/BerriAI/litellm/issues/5434

* fix(bedrock/embed/embeddings.py): support async embeddings for amazon titan models

* fix: linting fixes

* fix: handle key errors

* fix(bedrock/chat.py): fix bedrock ai21 streaming object

* feat(bedrock/embed): support bedrock embedding optional params

* fix(databricks.py): fix usage chunk

* fix(internal_user_endpoints.py): apply internal user defaults, if user role updated

Fixes issue where user update wouldn't apply defaults

* feat(slack_alerting.py): provide multiple slack channels for a given alert type

multiple channels might be interested in receiving an alert for a given type

* docs(alerting.md): add multiple channel alerting to docs
2024-09-02 14:29:57 -07:00

591 lines
18 KiB
Python

#### What this tests ####
# This tests if get_optional_params works as expected
import asyncio
import inspect
import os
import sys
import time
import traceback
import pytest
sys.path.insert(0, os.path.abspath("../.."))
from unittest.mock import MagicMock, patch
import litellm
from litellm.llms.prompt_templates.factory import map_system_message_pt
from litellm.types.completion import (
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from litellm.utils import (
get_optional_params,
get_optional_params_embeddings,
get_optional_params_image_gen,
)
## get_optional_params_embeddings
### Models: OpenAI, Azure, Bedrock
### Scenarios: w/ optional params + litellm.drop_params = True
def test_supports_system_message():
"""
Check if litellm.completion(...,supports_system_message=False)
"""
messages = [
ChatCompletionSystemMessageParam(role="system", content="Listen here!"),
ChatCompletionUserMessageParam(role="user", content="Hello there!"),
]
new_messages = map_system_message_pt(messages=messages)
assert len(new_messages) == 1
assert new_messages[0]["role"] == "user"
## confirm you can make a openai call with this param
response = litellm.completion(
model="gpt-3.5-turbo", messages=new_messages, supports_system_message=False
)
assert isinstance(response, litellm.ModelResponse)
@pytest.mark.parametrize(
"stop_sequence, expected_count", [("\n", 0), (["\n"], 0), (["finish_reason"], 1)]
)
def test_anthropic_optional_params(stop_sequence, expected_count):
"""
Test if whitespace character optional param is dropped by anthropic
"""
litellm.drop_params = True
optional_params = get_optional_params(
model="claude-3", custom_llm_provider="anthropic", stop=stop_sequence
)
assert len(optional_params) == expected_count
def test_bedrock_optional_params_embeddings():
litellm.drop_params = True
optional_params = get_optional_params_embeddings(
model="", user="John", encoding_format=None, custom_llm_provider="bedrock"
)
assert len(optional_params) == 0
@pytest.mark.parametrize(
"model, expected_dimensions, dimensions_kwarg",
[
("bedrock/amazon.titan-embed-text-v1", False, None),
("bedrock/amazon.titan-embed-image-v1", True, "embeddingConfig"),
("bedrock/amazon.titan-embed-text-v2:0", True, "dimensions"),
("bedrock/cohere.embed-multilingual-v3", False, None),
],
)
def test_bedrock_optional_params_embeddings_dimension(
model, expected_dimensions, dimensions_kwarg
):
litellm.drop_params = True
optional_params = get_optional_params_embeddings(
model=model,
user="John",
encoding_format=None,
dimensions=20,
custom_llm_provider="bedrock",
)
if expected_dimensions:
assert len(optional_params) == 1
else:
assert len(optional_params) == 0
if dimensions_kwarg is not None:
assert dimensions_kwarg in optional_params
def test_google_ai_studio_optional_params_embeddings():
optional_params = get_optional_params_embeddings(
model="",
user="John",
encoding_format=None,
custom_llm_provider="gemini",
drop_params=True,
)
assert len(optional_params) == 0
def test_openai_optional_params_embeddings():
litellm.drop_params = True
optional_params = get_optional_params_embeddings(
model="", user="John", encoding_format=None, custom_llm_provider="openai"
)
assert len(optional_params) == 1
assert optional_params["user"] == "John"
def test_azure_optional_params_embeddings():
litellm.drop_params = True
optional_params = get_optional_params_embeddings(
model="chatgpt-v-2",
user="John",
encoding_format=None,
custom_llm_provider="azure",
)
assert len(optional_params) == 1
assert optional_params["user"] == "John"
def test_databricks_optional_params():
litellm.drop_params = True
optional_params = get_optional_params(
model="",
user="John",
custom_llm_provider="databricks",
max_tokens=10,
temperature=0.2,
)
print(f"optional_params: {optional_params}")
assert len(optional_params) == 2
assert "user" not in optional_params
def test_gemini_optional_params():
litellm.drop_params = True
optional_params = get_optional_params(
model="",
custom_llm_provider="gemini",
max_tokens=10,
frequency_penalty=10,
)
print(f"optional_params: {optional_params}")
assert len(optional_params) == 1
assert "frequency_penalty" not in optional_params
def test_azure_ai_mistral_optional_params():
litellm.drop_params = True
optional_params = get_optional_params(
model="mistral-large-latest",
user="John",
custom_llm_provider="openai",
max_tokens=10,
temperature=0.2,
)
assert "user" not in optional_params
def test_vertex_ai_llama_3_optional_params():
litellm.vertex_llama3_models = ["meta/llama3-405b-instruct-maas"]
litellm.drop_params = True
optional_params = get_optional_params(
model="meta/llama3-405b-instruct-maas",
user="John",
custom_llm_provider="vertex_ai",
max_tokens=10,
temperature=0.2,
)
assert "user" not in optional_params
def test_vertex_ai_mistral_optional_params():
litellm.vertex_mistral_models = ["mistral-large@2407"]
litellm.drop_params = True
optional_params = get_optional_params(
model="mistral-large@2407",
user="John",
custom_llm_provider="vertex_ai",
max_tokens=10,
temperature=0.2,
)
assert "user" not in optional_params
assert "max_tokens" in optional_params
assert "temperature" in optional_params
def test_azure_gpt_optional_params_gpt_vision():
# for OpenAI, Azure all extra params need to get passed as extra_body to OpenAI python. We assert we actually set extra_body here
optional_params = litellm.utils.get_optional_params(
model="",
user="John",
custom_llm_provider="azure",
max_tokens=10,
temperature=0.2,
enhancements={"ocr": {"enabled": True}, "grounding": {"enabled": True}},
dataSources=[
{
"type": "AzureComputerVision",
"parameters": {
"endpoint": "<your_computer_vision_endpoint>",
"key": "<your_computer_vision_key>",
},
}
],
)
print(optional_params)
assert optional_params["max_tokens"] == 10
assert optional_params["temperature"] == 0.2
assert optional_params["extra_body"] == {
"enhancements": {"ocr": {"enabled": True}, "grounding": {"enabled": True}},
"dataSources": [
{
"type": "AzureComputerVision",
"parameters": {
"endpoint": "<your_computer_vision_endpoint>",
"key": "<your_computer_vision_key>",
},
}
],
}
# test_azure_gpt_optional_params_gpt_vision()
def test_azure_gpt_optional_params_gpt_vision_with_extra_body():
# if user passes extra_body, we should not over write it, we should pass it along to OpenAI python
optional_params = litellm.utils.get_optional_params(
model="",
user="John",
custom_llm_provider="azure",
max_tokens=10,
temperature=0.2,
extra_body={
"meta": "hi",
},
enhancements={"ocr": {"enabled": True}, "grounding": {"enabled": True}},
dataSources=[
{
"type": "AzureComputerVision",
"parameters": {
"endpoint": "<your_computer_vision_endpoint>",
"key": "<your_computer_vision_key>",
},
}
],
)
print(optional_params)
assert optional_params["max_tokens"] == 10
assert optional_params["temperature"] == 0.2
assert optional_params["extra_body"] == {
"enhancements": {"ocr": {"enabled": True}, "grounding": {"enabled": True}},
"dataSources": [
{
"type": "AzureComputerVision",
"parameters": {
"endpoint": "<your_computer_vision_endpoint>",
"key": "<your_computer_vision_key>",
},
}
],
"meta": "hi",
}
# test_azure_gpt_optional_params_gpt_vision_with_extra_body()
def test_openai_extra_headers():
optional_params = litellm.utils.get_optional_params(
model="",
user="John",
custom_llm_provider="openai",
max_tokens=10,
temperature=0.2,
extra_headers={"AI-Resource Group": "ishaan-resource"},
)
print(optional_params)
assert optional_params["max_tokens"] == 10
assert optional_params["temperature"] == 0.2
assert optional_params["extra_headers"] == {"AI-Resource Group": "ishaan-resource"}
@pytest.mark.parametrize(
"api_version",
[
"2024-02-01",
"2024-07-01", # potential future version with tool_choice="required" supported
"2023-07-01-preview",
"2024-03-01-preview",
],
)
def test_azure_tool_choice(api_version):
"""
Test azure tool choice on older + new version
"""
litellm.drop_params = True
optional_params = litellm.utils.get_optional_params(
model="chatgpt-v-2",
user="John",
custom_llm_provider="azure",
max_tokens=10,
temperature=0.2,
extra_headers={"AI-Resource Group": "ishaan-resource"},
tool_choice="required",
api_version=api_version,
)
print(f"{optional_params}")
if api_version == "2024-07-01":
assert optional_params["tool_choice"] == "required"
else:
assert (
"tool_choice" not in optional_params
), "tool choice should not be present. Got - tool_choice={} for api version={}".format(
optional_params["tool_choice"], api_version
)
@pytest.mark.parametrize("drop_params", [True, False, None])
def test_dynamic_drop_params(drop_params):
"""
Make a call to cohere w/ drop params = True vs. false.
"""
if drop_params is True:
optional_params = litellm.utils.get_optional_params(
model="command-r",
custom_llm_provider="cohere",
response_format={"type": "json"},
drop_params=drop_params,
)
else:
try:
optional_params = litellm.utils.get_optional_params(
model="command-r",
custom_llm_provider="cohere",
response_format={"type": "json"},
drop_params=drop_params,
)
pytest.fail("Expected to fail")
except Exception as e:
pass
def test_dynamic_drop_params_e2e():
with patch("requests.post", new=MagicMock()) as mock_response:
try:
response = litellm.completion(
model="command-r",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
response_format={"key": "value"},
drop_params=True,
)
except Exception as e:
pass
mock_response.assert_called_once()
print(mock_response.call_args.kwargs["data"])
assert "response_format" not in mock_response.call_args.kwargs["data"]
@pytest.mark.parametrize(
"model, provider, should_drop",
[("command-r", "cohere", True), ("gpt-3.5-turbo", "openai", False)],
)
def test_drop_params_parallel_tool_calls(model, provider, should_drop):
"""
https://github.com/BerriAI/litellm/issues/4584
"""
response = litellm.utils.get_optional_params(
model=model,
custom_llm_provider=provider,
response_format={"type": "json"},
parallel_tool_calls=True,
drop_params=True,
)
print(response)
if should_drop:
assert "response_format" not in response
assert "parallel_tool_calls" not in response
else:
assert "response_format" in response
assert "parallel_tool_calls" in response
def test_dynamic_drop_params_parallel_tool_calls():
"""
https://github.com/BerriAI/litellm/issues/4584
"""
with patch("requests.post", new=MagicMock()) as mock_response:
try:
response = litellm.completion(
model="command-r",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
parallel_tool_calls=True,
drop_params=True,
)
except Exception as e:
pass
mock_response.assert_called_once()
print(mock_response.call_args.kwargs["data"])
assert "parallel_tool_calls" not in mock_response.call_args.kwargs["data"]
@pytest.mark.parametrize("drop_params", [True, False, None])
def test_dynamic_drop_additional_params(drop_params):
"""
Make a call to cohere, dropping 'response_format' specifically
"""
if drop_params is True:
optional_params = litellm.utils.get_optional_params(
model="command-r",
custom_llm_provider="cohere",
response_format={"type": "json"},
additional_drop_params=["response_format"],
)
else:
try:
optional_params = litellm.utils.get_optional_params(
model="command-r",
custom_llm_provider="cohere",
response_format={"type": "json"},
)
pytest.fail("Expected to fail")
except Exception as e:
pass
def test_dynamic_drop_additional_params_e2e():
with patch("requests.post", new=MagicMock()) as mock_response:
try:
response = litellm.completion(
model="command-r",
messages=[{"role": "user", "content": "Hey, how's it going?"}],
response_format={"key": "value"},
additional_drop_params=["response_format"],
)
except Exception as e:
pass
mock_response.assert_called_once()
print(mock_response.call_args.kwargs["data"])
assert "response_format" not in mock_response.call_args.kwargs["data"]
assert "additional_drop_params" not in mock_response.call_args.kwargs["data"]
def test_get_optional_params_image_gen():
response = litellm.utils.get_optional_params_image_gen(
aws_region_name="us-east-1", custom_llm_provider="openai"
)
print(response)
assert "aws_region_name" not in response
response = litellm.utils.get_optional_params_image_gen(
aws_region_name="us-east-1", custom_llm_provider="bedrock"
)
print(response)
assert "aws_region_name" in response
def test_bedrock_optional_params_embeddings_provider_specific_params():
optional_params = get_optional_params_embeddings(
model="my-custom-model",
custom_llm_provider="huggingface",
wait_for_model=True,
)
assert len(optional_params) == 1
def test_get_optional_params_num_retries():
"""
Relevant issue - https://github.com/BerriAI/litellm/issues/5124
"""
with patch("litellm.main.get_optional_params", new=MagicMock()) as mock_client:
_ = litellm.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello world"}],
num_retries=10,
)
mock_client.assert_called()
print(f"mock_client.call_args: {mock_client.call_args}")
assert mock_client.call_args.kwargs["max_retries"] == 10
@pytest.mark.parametrize(
"provider",
[
"vertex_ai",
"vertex_ai_beta",
],
)
def test_vertex_safety_settings(provider):
litellm.vertex_ai_safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_NONE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_NONE",
},
]
optional_params = get_optional_params(
model="gemini-1.5-pro", custom_llm_provider=provider
)
assert len(optional_params) == 1
@pytest.mark.parametrize(
"model, provider, expectedAddProp",
[("gemini-1.5-pro", "vertex_ai_beta", False), ("gpt-3.5-turbo", "openai", True)],
)
def test_parse_additional_properties_json_schema(model, provider, expectedAddProp):
optional_params = get_optional_params(
model=model,
custom_llm_provider=provider,
response_format={
"type": "json_schema",
"json_schema": {
"name": "math_reasoning",
"schema": {
"type": "object",
"properties": {
"steps": {
"type": "array",
"items": {
"type": "object",
"properties": {
"explanation": {"type": "string"},
"output": {"type": "string"},
},
"required": ["explanation", "output"],
"additionalProperties": False,
},
},
"final_answer": {"type": "string"},
},
"required": ["steps", "final_answer"],
"additionalProperties": False,
},
"strict": True,
},
},
)
print(optional_params)
if provider == "vertex_ai_beta":
schema = optional_params["response_schema"]
elif provider == "openai":
schema = optional_params["response_format"]["json_schema"]["schema"]
assert ("additionalProperties" in schema) == expectedAddProp