LiteLLM Minor Fixes & Improvements (10/15/2024) (#6242)

* feat(litellm_pre_call_utils.py): support forwarding request headers to backend llm api

* fix(litellm_pre_call_utils.py): handle custom litellm key header

* test(router_code_coverage.py): check if all router functions are dire… (#6186)

* test(router_code_coverage.py): check if all router functions are directly tested

prevent regressions

* docs(configs.md): document all environment variables (#6185)

* docs: make it easier to find anthropic/openai prompt caching doc

* aded codecov yml (#6207)

* fix codecov.yaml

* run ci/cd again

* (refactor) caching use LLMCachingHandler for async_get_cache and set_cache  (#6208)

* use folder for caching

* fix importing caching

* fix clickhouse pyright

* fix linting

* fix correctly pass kwargs and args

* fix test case for embedding

* fix linting

* fix embedding caching logic

* fix refactor handle utils.py

* fix test_embedding_caching_azure_individual_items_reordered

* (feat) prometheus have well defined latency buckets (#6211)

* fix prometheus have well defined latency buckets

* use a well define latency bucket

* use types file for prometheus logging

* add test for LATENCY_BUCKETS

* fix prom testing

* fix config.yml

* (refactor caching) use LLMCachingHandler for caching streaming responses  (#6210)

* use folder for caching

* fix importing caching

* fix clickhouse pyright

* fix linting

* fix correctly pass kwargs and args

* fix test case for embedding

* fix linting

* fix embedding caching logic

* fix refactor handle utils.py

* refactor async set stream cache

* fix linting

* bump (#6187)

* update code cov yaml

* fix config.yml

* add caching component to code cov

* fix config.yml ci/cd

* add coverage for proxy auth

* (refactor caching) use common `_retrieve_from_cache` helper  (#6212)

* use folder for caching

* fix importing caching

* fix clickhouse pyright

* fix linting

* fix correctly pass kwargs and args

* fix test case for embedding

* fix linting

* fix embedding caching logic

* fix refactor handle utils.py

* refactor async set stream cache

* fix linting

* refactor - use _retrieve_from_cache

* refactor use _convert_cached_result_to_model_response

* fix linting errors

* bump: version 1.49.2 → 1.49.3

* fix code cov components

* test(test_router_helpers.py): add router component unit tests

* test: add additional router tests

* test: add more router testing

* test: add more router testing + more mock functions

* ci(router_code_coverage.py): fix check

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>

* bump: version 1.49.3 → 1.49.4

* (refactor) use helper function `_assemble_complete_response_from_streaming_chunks` to assemble complete responses in caching and logging callbacks (#6220)

* (refactor) use _assemble_complete_response_from_streaming_chunks

* add unit test for test_assemble_complete_response_from_streaming_chunks_1

* fix assemble complete_streaming_response

* config add logging_testing

* add logging_coverage in codecov

* test test_assemble_complete_response_from_streaming_chunks_3

* add unit tests for _assemble_complete_response_from_streaming_chunks

* fix remove unused / junk function

* add test for streaming_chunks when error assembling

* (refactor) OTEL - use safe_set_attribute for setting attributes (#6226)

* otel - use safe_set_attribute for setting attributes

* fix OTEL only use safe_set_attribute

* (fix) prompt caching cost calculation OpenAI, Azure OpenAI  (#6231)

* fix prompt caching cost calculation

* fix testing for prompt cache cost calc

* fix(allowed_model_region): allow us as allowed region (#6234)

* test(router_code_coverage.py): check if all router functions are dire… (#6186)

* test(router_code_coverage.py): check if all router functions are directly tested

prevent regressions

* docs(configs.md): document all environment variables (#6185)

* docs: make it easier to find anthropic/openai prompt caching doc

* aded codecov yml (#6207)

* fix codecov.yaml

* run ci/cd again

* (refactor) caching use LLMCachingHandler for async_get_cache and set_cache  (#6208)

* use folder for caching

* fix importing caching

* fix clickhouse pyright

* fix linting

* fix correctly pass kwargs and args

* fix test case for embedding

* fix linting

* fix embedding caching logic

* fix refactor handle utils.py

* fix test_embedding_caching_azure_individual_items_reordered

* (feat) prometheus have well defined latency buckets (#6211)

* fix prometheus have well defined latency buckets

* use a well define latency bucket

* use types file for prometheus logging

* add test for LATENCY_BUCKETS

* fix prom testing

* fix config.yml

* (refactor caching) use LLMCachingHandler for caching streaming responses  (#6210)

* use folder for caching

* fix importing caching

* fix clickhouse pyright

* fix linting

* fix correctly pass kwargs and args

* fix test case for embedding

* fix linting

* fix embedding caching logic

* fix refactor handle utils.py

* refactor async set stream cache

* fix linting

* bump (#6187)

* update code cov yaml

* fix config.yml

* add caching component to code cov

* fix config.yml ci/cd

* add coverage for proxy auth

* (refactor caching) use common `_retrieve_from_cache` helper  (#6212)

* use folder for caching

* fix importing caching

* fix clickhouse pyright

* fix linting

* fix correctly pass kwargs and args

* fix test case for embedding

* fix linting

* fix embedding caching logic

* fix refactor handle utils.py

* refactor async set stream cache

* fix linting

* refactor - use _retrieve_from_cache

* refactor use _convert_cached_result_to_model_response

* fix linting errors

* bump: version 1.49.2 → 1.49.3

* fix code cov components

* test(test_router_helpers.py): add router component unit tests

* test: add additional router tests

* test: add more router testing

* test: add more router testing + more mock functions

* ci(router_code_coverage.py): fix check

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>

* bump: version 1.49.3 → 1.49.4

* (refactor) use helper function `_assemble_complete_response_from_streaming_chunks` to assemble complete responses in caching and logging callbacks (#6220)

* (refactor) use _assemble_complete_response_from_streaming_chunks

* add unit test for test_assemble_complete_response_from_streaming_chunks_1

* fix assemble complete_streaming_response

* config add logging_testing

* add logging_coverage in codecov

* test test_assemble_complete_response_from_streaming_chunks_3

* add unit tests for _assemble_complete_response_from_streaming_chunks

* fix remove unused / junk function

* add test for streaming_chunks when error assembling

* (refactor) OTEL - use safe_set_attribute for setting attributes (#6226)

* otel - use safe_set_attribute for setting attributes

* fix OTEL only use safe_set_attribute

* fix(allowed_model_region): allow us as allowed region

---------

Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com>
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>

* fix(litellm_pre_call_utils.py): support 'us' region routing + fix header forwarding to filter on `x-` headers

* docs(customer_routing.md): fix region-based routing example

* feat(azure.py): handle empty arguments function call - azure

Closes https://github.com/BerriAI/litellm/issues/6241

* feat(guardrails_ai.py): support guardrails ai integration

Adds support for on-prem guardrails via guardrails ai

* fix(proxy/utils.py): prevent sql injection attack

Fixes https://huntr.com/bounties/a4f6d357-5b44-4e00-9cac-f1cc351211d2

* fix: fix linting errors

* fix(litellm_pre_call_utils.py): don't log litellm api key in proxy server request headers

* fix(litellm_pre_call_utils.py): don't forward stainless headers

* docs(guardrails_ai.md): add guardrails ai quick start to docs

* test: handle flaky test

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>
Co-authored-by: Marcus Elwin <marcus@elwin.com>
This commit is contained in:
Krish Dholakia 2024-10-16 07:32:06 -07:00 committed by GitHub
parent fc5b75d171
commit 54ebdbf7ce
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 982 additions and 314 deletions

View file

@ -226,7 +226,7 @@ def test_all_model_configs():
optional_params={},
) == {"max_tokens": 10}
from litellm.llms.AzureOpenAI.azure import AzureOpenAIConfig
from litellm.llms.AzureOpenAI.chat.gpt_transformation import AzureOpenAIConfig
assert "max_completion_tokens" in AzureOpenAIConfig().get_supported_openai_params()
assert AzureOpenAIConfig().map_openai_params(

View file

@ -82,6 +82,7 @@ def user_api_key_auth() -> UserAPIKeyAuth:
@pytest.mark.parametrize("num_projects", [1, 2, 100])
@pytest.mark.asyncio
@pytest.mark.flaky(retries=3, delay=1)
async def test_available_tpm(num_projects, dynamic_rate_limit_handler):
model = "my-fake-model"
## SET CACHE W/ ACTIVE PROJECTS

View file

@ -0,0 +1,28 @@
import os
import sys
import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm.proxy.guardrails.init_guardrails import init_guardrails_v2
def test_guardrails_ai():
litellm.set_verbose = True
litellm.guardrail_name_config_map = {}
init_guardrails_v2(
all_guardrails=[
{
"guardrail_name": "gibberish-guard",
"litellm_params": {
"guardrail": "guardrails_ai",
"guard_name": "gibberish_guard",
"mode": "post_call",
},
}
],
config_file_path="",
)

View file

@ -436,3 +436,24 @@ def test_vertex_only_image_user_message():
def test_convert_url():
convert_url_to_base64("https://picsum.photos/id/237/200/300")
def test_azure_tool_call_invoke_helper():
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the weather in Copenhagen?"},
{"role": "assistant", "function_call": {"name": "get_weather"}},
]
transformed_messages = litellm.AzureOpenAIConfig.transform_request(
model="gpt-4o", messages=messages, optional_params={}
)
assert transformed_messages["messages"] == [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the weather in Copenhagen?"},
{
"role": "assistant",
"function_call": {"name": "get_weather", "arguments": ""},
},
]

View file

@ -72,7 +72,7 @@ def test_embedding(client):
# assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback
print("my_custom_logger", my_custom_logger)
assert my_custom_logger.async_success_embedding == False
assert my_custom_logger.async_success_embedding is False
test_data = {"model": "azure-embedding-model", "input": ["hello"]}
response = client.post("/embeddings", json=test_data, headers=headers)
@ -84,7 +84,7 @@ def test_embedding(client):
id(my_custom_logger),
)
assert (
my_custom_logger.async_success_embedding == True
my_custom_logger.async_success_embedding is True
) # checks if the status of async_success is True, only the async_log_success_event can set this to true
assert (
my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model"
@ -107,7 +107,6 @@ def test_embedding(client):
"accept-encoding": "gzip, deflate",
"connection": "keep-alive",
"user-agent": "testclient",
"authorization": "Bearer sk-1234",
"content-length": "54",
"content-type": "application/json",
},
@ -194,6 +193,8 @@ def test_chat_completion(client):
"mode": "chat",
"db_model": False,
}
assert "authorization" not in proxy_server_request_object["headers"]
assert proxy_server_request_object == {
"url": "http://testserver/chat/completions",
"method": "POST",
@ -203,7 +204,6 @@ def test_chat_completion(client):
"accept-encoding": "gzip, deflate",
"connection": "keep-alive",
"user-agent": "testclient",
"authorization": "Bearer sk-1234",
"content-length": "123",
"content-type": "application/json",
},

View file

@ -173,6 +173,96 @@ def test_chat_completion(mock_acompletion, client_no_auth):
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")
@pytest.mark.parametrize(
"litellm_key_header_name",
["x-litellm-key", None],
)
def test_add_headers_to_request(litellm_key_header_name):
from fastapi import Request
from starlette.datastructures import URL
import json
from litellm.proxy.litellm_pre_call_utils import (
clean_headers,
get_forwardable_headers,
)
headers = {
"Authorization": "Bearer 1234",
"X-Custom-Header": "Custom-Value",
"X-Stainless-Header": "Stainless-Value",
}
request = Request(scope={"type": "http"})
request._url = URL(url="/chat/completions")
request._body = json.dumps({"model": "gpt-3.5-turbo"}).encode("utf-8")
request_headers = clean_headers(headers, litellm_key_header_name)
forwarded_headers = get_forwardable_headers(request_headers)
assert forwarded_headers == {"X-Custom-Header": "Custom-Value"}
@pytest.mark.parametrize(
"litellm_key_header_name",
["x-litellm-key", None],
)
@mock_patch_acompletion()
def test_chat_completion_forward_headers(
mock_acompletion, client_no_auth, litellm_key_header_name
):
global headers
try:
if litellm_key_header_name is not None:
gs = getattr(litellm.proxy.proxy_server, "general_settings")
gs["litellm_key_header_name"] = litellm_key_header_name
setattr(litellm.proxy.proxy_server, "general_settings", gs)
# Your test data
test_data = {
"model": "gpt-3.5-turbo",
"messages": [
{"role": "user", "content": "hi"},
],
"max_tokens": 10,
}
headers_to_forward = {
"X-Custom-Header": "Custom-Value",
"X-Another-Header": "Another-Value",
}
if litellm_key_header_name is not None:
headers_to_not_forward = {litellm_key_header_name: "Bearer 1234"}
else:
headers_to_not_forward = {"Authorization": "Bearer 1234"}
received_headers = {**headers_to_forward, **headers_to_not_forward}
print("testing proxy server with chat completions")
response = client_no_auth.post(
"/v1/chat/completions", json=test_data, headers=received_headers
)
mock_acompletion.assert_called_once_with(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "hi"},
],
max_tokens=10,
litellm_call_id=mock.ANY,
litellm_logging_obj=mock.ANY,
request_timeout=mock.ANY,
specific_deployment=True,
metadata=mock.ANY,
proxy_server_request=mock.ANY,
headers={
"x-custom-header": "Custom-Value",
"x-another-header": "Another-Value",
},
)
print(f"response - {response.text}")
assert response.status_code == 200
result = response.json()
print(f"Received response: {result}")
except Exception as e:
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")
@mock_patch_acompletion()
@pytest.mark.asyncio
async def test_team_disable_guardrails(mock_acompletion, client_no_auth):

View file

@ -1050,7 +1050,7 @@ def test_filter_invalid_params_pre_call_check():
pytest.fail(f"Got unexpected exception on router! - {str(e)}")
@pytest.mark.parametrize("allowed_model_region", ["eu", None])
@pytest.mark.parametrize("allowed_model_region", ["eu", None, "us"])
def test_router_region_pre_call_check(allowed_model_region):
"""
If region based routing set
@ -1065,7 +1065,7 @@ def test_router_region_pre_call_check(allowed_model_region):
"api_version": os.getenv("AZURE_API_VERSION"),
"api_base": os.getenv("AZURE_API_BASE"),
"base_model": "azure/gpt-35-turbo",
"region_name": "eu",
"region_name": allowed_model_region,
},
"model_info": {"id": "1"},
},
@ -1091,7 +1091,9 @@ def test_router_region_pre_call_check(allowed_model_region):
if allowed_model_region is None:
assert len(_healthy_deployments) == 2
else:
assert len(_healthy_deployments) == 1, "No models selected as healthy"
assert len(_healthy_deployments) == 1, "{} models selected as healthy".format(
len(_healthy_deployments)
)
assert (
_healthy_deployments[0]["model_info"]["id"] == "1"
), "Incorrect model id picked. Got id={}, expected id=1".format(

View file

@ -102,7 +102,6 @@ def test_spend_logs_payload():
"method": "POST",
"headers": {
"content-type": "application/json",
"authorization": "Bearer sk-1234",
"user-agent": "PostmanRuntime/7.32.3",
"accept": "*/*",
"postman-token": "92300061-eeaa-423b-a420-0b44896ecdc4",