LiteLLM Minor Fixes & Improvements (10/28/2024) (#6475)

* fix(anthropic/chat/transformation.py): support anthropic disable_parallel_tool_use param

Fixes https://github.com/BerriAI/litellm/issues/6456

* feat(anthropic/chat/transformation.py): support anthropic computer tool use

Closes https://github.com/BerriAI/litellm/issues/6427

* fix(vertex_ai/common_utils.py): parse out '$schema' when calling vertex ai

Fixes issue when trying to call vertex from vercel sdk

* fix(main.py): add 'extra_headers' support for azure on all translation endpoints

Fixes https://github.com/BerriAI/litellm/issues/6465

* fix: fix linting errors

* fix(transformation.py): handle no beta headers for anthropic

* test: cleanup test

* fix: fix linting error

* fix: fix linting errors

* fix: fix linting errors

* fix(transformation.py): handle dummy tool call

* fix(main.py): fix linting error

* fix(azure.py): pass required param

* LiteLLM Minor Fixes & Improvements (10/24/2024) (#6441)

* fix(azure.py): handle /openai/deployment in azure api base

* fix(factory.py): fix faulty anthropic tool result translation check

Fixes https://github.com/BerriAI/litellm/issues/6422

* fix(gpt_transformation.py): add support for parallel_tool_calls to azure

Fixes https://github.com/BerriAI/litellm/issues/6440

* fix(factory.py): support anthropic prompt caching for tool results

* fix(vertex_ai/common_utils): don't pop non-null required field

Fixes https://github.com/BerriAI/litellm/issues/6426

* feat(vertex_ai.py): support code_execution tool call for vertex ai + gemini

Closes https://github.com/BerriAI/litellm/issues/6434

* build(model_prices_and_context_window.json): Add 'supports_assistant_prefill' for bedrock claude-3-5-sonnet v2 models

Closes https://github.com/BerriAI/litellm/issues/6437

* fix(types/utils.py): fix linting

* test: update test to include required fields

* test: fix test

* test: handle flaky test

* test: remove e2e test - hitting gemini rate limits

* Litellm dev 10 26 2024 (#6472)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* (Testing) Add unit testing for DualCache - ensure in memory cache is used when expected  (#6471)

* test test_dual_cache_get_set

* unit testing for dual cache

* fix async_set_cache_sadd

* test_dual_cache_local_only

* redis otel tracing + async support for latency routing (#6452)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* refactor: pass parent_otel_span for redis caching calls in router

allows for more observability into what calls are causing latency issues

* test: update tests with new params

* refactor: ensure e2e otel tracing for router

* refactor(router.py): add more otel tracing acrosss router

catch all latency issues for router requests

* fix: fix linting error

* fix(router.py): fix linting error

* fix: fix test

* test: fix tests

* fix(dual_cache.py): pass ttl to redis cache

* fix: fix param

* fix(dual_cache.py): set default value for parent_otel_span

* fix(transformation.py): support 'response_format' for anthropic calls

* fix(transformation.py): check for cache_control inside 'function' block

* fix: fix linting error

* fix: fix linting errors

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
This commit is contained in:
Krish Dholakia 2024-10-29 17:20:24 -07:00 committed by GitHub
parent 44e7ffd05c
commit 6b9be5092f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 684 additions and 253 deletions

View file

@ -527,3 +527,98 @@ def test_process_anthropic_headers_with_no_matching_headers():
result = process_anthropic_headers(input_headers)
assert result == expected_output, "Unexpected output for non-matching headers"
def test_anthropic_computer_tool_use():
from litellm import completion
tools = [
{
"type": "computer_20241022",
"function": {
"name": "computer",
"parameters": {
"display_height_px": 100,
"display_width_px": 100,
"display_number": 1,
},
},
}
]
model = "claude-3-5-sonnet-20241022"
messages = [{"role": "user", "content": "Save a picture of a cat to my desktop."}]
resp = completion(
model=model,
messages=messages,
tools=tools,
# headers={"anthropic-beta": "computer-use-2024-10-22"},
)
print(resp)
@pytest.mark.parametrize(
"computer_tool_used, prompt_caching_set, expected_beta_header",
[
(True, False, True),
(False, True, True),
(True, True, True),
(False, False, False),
],
)
def test_anthropic_beta_header(
computer_tool_used, prompt_caching_set, expected_beta_header
):
headers = litellm.AnthropicConfig().get_anthropic_headers(
api_key="fake-api-key",
computer_tool_used=computer_tool_used,
prompt_caching_set=prompt_caching_set,
)
if expected_beta_header:
assert "anthropic-beta" in headers
else:
assert "anthropic-beta" not in headers
@pytest.mark.parametrize(
"cache_control_location",
[
"inside_function",
"outside_function",
],
)
def test_anthropic_tool_helper(cache_control_location):
from litellm.llms.anthropic.chat.transformation import AnthropicConfig
tool = {
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
},
}
if cache_control_location == "inside_function":
tool["function"]["cache_control"] = {"type": "ephemeral"}
else:
tool["cache_control"] = {"type": "ephemeral"}
tool = AnthropicConfig()._map_tool_helper(tool=tool)
assert tool["cache_control"] == {"type": "ephemeral"}

View file

@ -96,6 +96,66 @@ def test_process_azure_headers_with_dict_input():
assert result == expected_output, "Unexpected output for dict input"
from httpx import Client
from unittest.mock import MagicMock, patch
from openai import AzureOpenAI
import litellm
from litellm import completion
import os
@pytest.mark.parametrize(
"input, call_type",
[
({"messages": [{"role": "user", "content": "Hello world"}]}, "completion"),
({"input": "Hello world"}, "embedding"),
({"prompt": "Hello world"}, "image_generation"),
],
)
def test_azure_extra_headers(input, call_type):
from litellm import embedding, image_generation
http_client = Client()
messages = [{"role": "user", "content": "Hello world"}]
with patch.object(http_client, "send", new=MagicMock()) as mock_client:
litellm.client_session = http_client
try:
if call_type == "completion":
func = completion
elif call_type == "embedding":
func = embedding
elif call_type == "image_generation":
func = image_generation
response = func(
model="azure/chatgpt-v-2",
api_base="https://openai-gpt-4-test-v-1.openai.azure.com",
api_version="2023-07-01-preview",
api_key="my-azure-api-key",
extra_headers={
"Authorization": "my-bad-key",
"Ocp-Apim-Subscription-Key": "hello-world-testing",
},
**input,
)
print(response)
except Exception as e:
print(e)
mock_client.assert_called()
print(f"mock_client.call_args: {mock_client.call_args}")
request = mock_client.call_args[0][0]
print(request.method) # This will print 'POST'
print(request.url) # This will print the full URL
print(request.headers) # This will print the full URL
auth_header = request.headers.get("Authorization")
apim_key = request.headers.get("Ocp-Apim-Subscription-Key")
print(auth_header)
assert auth_header == "my-bad-key"
assert apim_key == "hello-world-testing"
@pytest.mark.parametrize(
"api_base, model, expected_endpoint",
[

View file

@ -786,19 +786,122 @@ def test_unmapped_vertex_anthropic_model():
assert "max_retries" not in optional_params
@pytest.mark.parametrize(
"tools, key",
[
([{"googleSearchRetrieval": {}}], "googleSearchRetrieval"),
([{"code_execution": {}}], "code_execution"),
],
)
def test_vertex_tool_params(tools, key):
@pytest.mark.parametrize("provider", ["anthropic", "vertex_ai"])
def test_anthropic_parallel_tool_calls(provider):
optional_params = get_optional_params(
model="claude-3-5-sonnet-v250@20241022",
custom_llm_provider=provider,
parallel_tool_calls=True,
)
print(f"optional_params: {optional_params}")
assert optional_params["tool_choice"]["disable_parallel_tool_use"] is True
def test_anthropic_computer_tool_use():
tools = [
{
"type": "computer_20241022",
"function": {
"name": "computer",
"parameters": {
"display_height_px": 100,
"display_width_px": 100,
"display_number": 1,
},
},
}
]
optional_params = get_optional_params(
model="gemini-1.5-pro",
model="claude-3-5-sonnet-v250@20241022",
custom_llm_provider="anthropic",
tools=tools,
)
assert optional_params["tools"][0]["type"] == "computer_20241022"
assert optional_params["tools"][0]["display_height_px"] == 100
assert optional_params["tools"][0]["display_width_px"] == 100
assert optional_params["tools"][0]["display_number"] == 1
def test_vertex_schema_field():
tools = [
{
"type": "function",
"function": {
"name": "json",
"description": "Respond with a JSON object.",
"parameters": {
"type": "object",
"properties": {
"thinking": {
"type": "string",
"description": "Your internal thoughts on different problem details given the guidance.",
},
"problems": {
"type": "array",
"items": {
"type": "object",
"properties": {
"icon": {
"type": "string",
"enum": [
"BarChart2",
"Bell",
],
"description": "The name of a Lucide icon to display",
},
"color": {
"type": "string",
"description": "A Tailwind color class for the icon, e.g., 'text-red-500'",
},
"problem": {
"type": "string",
"description": "The title of the problem being addressed, approximately 3-5 words.",
},
"description": {
"type": "string",
"description": "A brief explanation of the problem, approximately 20 words.",
},
"impacts": {
"type": "array",
"items": {"type": "string"},
"description": "A list of potential impacts or consequences of the problem, approximately 3 words each.",
},
"automations": {
"type": "array",
"items": {"type": "string"},
"description": "A list of potential automations to address the problem, approximately 3-5 words each.",
},
},
"required": [
"icon",
"color",
"problem",
"description",
"impacts",
"automations",
],
"additionalProperties": False,
},
"description": "Please generate problem cards that match this guidance.",
},
},
"required": ["thinking", "problems"],
"additionalProperties": False,
"$schema": "http://json-schema.org/draft-07/schema#",
},
},
}
]
optional_params = get_optional_params(
model="gemini-1.5-flash",
custom_llm_provider="vertex_ai",
tools=tools,
)
print(optional_params)
assert optional_params["tools"][0][key] == {}
print(optional_params["tools"][0]["function_declarations"][0])
assert (
"$schema"
not in optional_params["tools"][0]["function_declarations"][0]["parameters"]
)