litellm/tests/llm_translation/test_azure_openai.py
Krish Dholakia 6b9be5092f
LiteLLM Minor Fixes & Improvements (10/28/2024) (#6475)
* fix(anthropic/chat/transformation.py): support anthropic disable_parallel_tool_use param

Fixes https://github.com/BerriAI/litellm/issues/6456

* feat(anthropic/chat/transformation.py): support anthropic computer tool use

Closes https://github.com/BerriAI/litellm/issues/6427

* fix(vertex_ai/common_utils.py): parse out '$schema' when calling vertex ai

Fixes issue when trying to call vertex from vercel sdk

* fix(main.py): add 'extra_headers' support for azure on all translation endpoints

Fixes https://github.com/BerriAI/litellm/issues/6465

* fix: fix linting errors

* fix(transformation.py): handle no beta headers for anthropic

* test: cleanup test

* fix: fix linting error

* fix: fix linting errors

* fix: fix linting errors

* fix(transformation.py): handle dummy tool call

* fix(main.py): fix linting error

* fix(azure.py): pass required param

* LiteLLM Minor Fixes & Improvements (10/24/2024) (#6441)

* fix(azure.py): handle /openai/deployment in azure api base

* fix(factory.py): fix faulty anthropic tool result translation check

Fixes https://github.com/BerriAI/litellm/issues/6422

* fix(gpt_transformation.py): add support for parallel_tool_calls to azure

Fixes https://github.com/BerriAI/litellm/issues/6440

* fix(factory.py): support anthropic prompt caching for tool results

* fix(vertex_ai/common_utils): don't pop non-null required field

Fixes https://github.com/BerriAI/litellm/issues/6426

* feat(vertex_ai.py): support code_execution tool call for vertex ai + gemini

Closes https://github.com/BerriAI/litellm/issues/6434

* build(model_prices_and_context_window.json): Add 'supports_assistant_prefill' for bedrock claude-3-5-sonnet v2 models

Closes https://github.com/BerriAI/litellm/issues/6437

* fix(types/utils.py): fix linting

* test: update test to include required fields

* test: fix test

* test: handle flaky test

* test: remove e2e test - hitting gemini rate limits

* Litellm dev 10 26 2024 (#6472)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* (Testing) Add unit testing for DualCache - ensure in memory cache is used when expected  (#6471)

* test test_dual_cache_get_set

* unit testing for dual cache

* fix async_set_cache_sadd

* test_dual_cache_local_only

* redis otel tracing + async support for latency routing (#6452)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* refactor: pass parent_otel_span for redis caching calls in router

allows for more observability into what calls are causing latency issues

* test: update tests with new params

* refactor: ensure e2e otel tracing for router

* refactor(router.py): add more otel tracing acrosss router

catch all latency issues for router requests

* fix: fix linting error

* fix(router.py): fix linting error

* fix: fix test

* test: fix tests

* fix(dual_cache.py): pass ttl to redis cache

* fix: fix param

* fix(dual_cache.py): set default value for parent_otel_span

* fix(transformation.py): support 'response_format' for anthropic calls

* fix(transformation.py): check for cache_control inside 'function' block

* fix: fix linting error

* fix: fix linting errors

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
2024-10-29 17:20:24 -07:00

190 lines
6.4 KiB
Python

import sys
import os
sys.path.insert(
0, os.path.abspath("../../")
) # Adds the parent directory to the system path
import pytest
from litellm.llms.AzureOpenAI.common_utils import process_azure_headers
from httpx import Headers
def test_process_azure_headers_empty():
result = process_azure_headers({})
assert result == {}, "Expected empty dictionary for no input"
def test_process_azure_headers_with_all_headers():
input_headers = Headers(
{
"x-ratelimit-limit-requests": "100",
"x-ratelimit-remaining-requests": "90",
"x-ratelimit-limit-tokens": "10000",
"x-ratelimit-remaining-tokens": "9000",
"other-header": "value",
}
)
expected_output = {
"x-ratelimit-limit-requests": "100",
"x-ratelimit-remaining-requests": "90",
"x-ratelimit-limit-tokens": "10000",
"x-ratelimit-remaining-tokens": "9000",
"llm_provider-x-ratelimit-limit-requests": "100",
"llm_provider-x-ratelimit-remaining-requests": "90",
"llm_provider-x-ratelimit-limit-tokens": "10000",
"llm_provider-x-ratelimit-remaining-tokens": "9000",
"llm_provider-other-header": "value",
}
result = process_azure_headers(input_headers)
assert result == expected_output, "Unexpected output for all Azure headers"
def test_process_azure_headers_with_partial_headers():
input_headers = Headers(
{
"x-ratelimit-limit-requests": "100",
"x-ratelimit-remaining-tokens": "9000",
"other-header": "value",
}
)
expected_output = {
"x-ratelimit-limit-requests": "100",
"x-ratelimit-remaining-tokens": "9000",
"llm_provider-x-ratelimit-limit-requests": "100",
"llm_provider-x-ratelimit-remaining-tokens": "9000",
"llm_provider-other-header": "value",
}
result = process_azure_headers(input_headers)
assert result == expected_output, "Unexpected output for partial Azure headers"
def test_process_azure_headers_with_no_matching_headers():
input_headers = Headers(
{"unrelated-header-1": "value1", "unrelated-header-2": "value2"}
)
expected_output = {
"llm_provider-unrelated-header-1": "value1",
"llm_provider-unrelated-header-2": "value2",
}
result = process_azure_headers(input_headers)
assert result == expected_output, "Unexpected output for non-matching headers"
def test_process_azure_headers_with_dict_input():
input_headers = {
"x-ratelimit-limit-requests": "100",
"x-ratelimit-remaining-requests": "90",
"other-header": "value",
}
expected_output = {
"x-ratelimit-limit-requests": "100",
"x-ratelimit-remaining-requests": "90",
"llm_provider-x-ratelimit-limit-requests": "100",
"llm_provider-x-ratelimit-remaining-requests": "90",
"llm_provider-other-header": "value",
}
result = process_azure_headers(input_headers)
assert result == expected_output, "Unexpected output for dict input"
from httpx import Client
from unittest.mock import MagicMock, patch
from openai import AzureOpenAI
import litellm
from litellm import completion
import os
@pytest.mark.parametrize(
"input, call_type",
[
({"messages": [{"role": "user", "content": "Hello world"}]}, "completion"),
({"input": "Hello world"}, "embedding"),
({"prompt": "Hello world"}, "image_generation"),
],
)
def test_azure_extra_headers(input, call_type):
from litellm import embedding, image_generation
http_client = Client()
messages = [{"role": "user", "content": "Hello world"}]
with patch.object(http_client, "send", new=MagicMock()) as mock_client:
litellm.client_session = http_client
try:
if call_type == "completion":
func = completion
elif call_type == "embedding":
func = embedding
elif call_type == "image_generation":
func = image_generation
response = func(
model="azure/chatgpt-v-2",
api_base="https://openai-gpt-4-test-v-1.openai.azure.com",
api_version="2023-07-01-preview",
api_key="my-azure-api-key",
extra_headers={
"Authorization": "my-bad-key",
"Ocp-Apim-Subscription-Key": "hello-world-testing",
},
**input,
)
print(response)
except Exception as e:
print(e)
mock_client.assert_called()
print(f"mock_client.call_args: {mock_client.call_args}")
request = mock_client.call_args[0][0]
print(request.method) # This will print 'POST'
print(request.url) # This will print the full URL
print(request.headers) # This will print the full URL
auth_header = request.headers.get("Authorization")
apim_key = request.headers.get("Ocp-Apim-Subscription-Key")
print(auth_header)
assert auth_header == "my-bad-key"
assert apim_key == "hello-world-testing"
@pytest.mark.parametrize(
"api_base, model, expected_endpoint",
[
(
"https://my-endpoint-sweden-berri992.openai.azure.com",
"dall-e-3-test",
"https://my-endpoint-sweden-berri992.openai.azure.com/openai/deployments/dall-e-3-test/images/generations?api-version=2023-12-01-preview",
),
(
"https://my-endpoint-sweden-berri992.openai.azure.com/openai/deployments/my-custom-deployment",
"dall-e-3",
"https://my-endpoint-sweden-berri992.openai.azure.com/openai/deployments/my-custom-deployment/images/generations?api-version=2023-12-01-preview",
),
],
)
def test_process_azure_endpoint_url(api_base, model, expected_endpoint):
from litellm.llms.AzureOpenAI.azure import AzureChatCompletion
azure_chat_completion = AzureChatCompletion()
input_args = {
"azure_client_params": {
"api_version": "2023-12-01-preview",
"azure_endpoint": api_base,
"azure_deployment": model,
"max_retries": 2,
"timeout": 600,
"api_key": "f28ab7b695af4154bc53498e5bdccb07",
},
"model": model,
}
result = azure_chat_completion.create_azure_base_url(**input_args)
assert result == expected_endpoint, "Unexpected endpoint"