Log applied guardrails on LLM API call (#8452)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 40s

* fix(litellm_logging.py): support saving applied guardrails in logging object

allows list of applied guardrails to be logged for proxy admin's knowledge

* feat(spend_tracking_utils.py): log applied guardrails to spend logs

makes it easy for admin to know what guardrails were applied on a request

* ci(config.yml): uninstall posthog from ci/cd

* test: fix tests

* test: update test
This commit is contained in:
Krish Dholakia 2025-02-10 22:57:30 -08:00 committed by GitHub
parent 8e32713637
commit ce3ead6f91
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 152 additions and 10 deletions

View file

@ -864,17 +864,24 @@ def test_convert_model_response_object():
== '{"type":"error","error":{"type":"invalid_request_error","message":"Output blocked by content filtering policy"}}'
)
@pytest.mark.parametrize(
"content, expected_reasoning, expected_content",
"content, expected_reasoning, expected_content",
[
(None, None, None),
("<think>I am thinking here</think>The sky is a canvas of blue", "I am thinking here", "The sky is a canvas of blue"),
(
"<think>I am thinking here</think>The sky is a canvas of blue",
"I am thinking here",
"The sky is a canvas of blue",
),
("I am a regular response", None, "I am a regular response"),
]
],
)
def test_parse_content_for_reasoning(content, expected_reasoning, expected_content):
assert(litellm.utils._parse_content_for_reasoning(content) == (expected_reasoning, expected_content))
assert litellm.utils._parse_content_for_reasoning(content) == (
expected_reasoning,
expected_content,
)
@pytest.mark.parametrize(
@ -1874,3 +1881,82 @@ def test_validate_user_messages_invalid_content_type():
assert "Invalid message" in str(e)
print(e)
from litellm.integrations.custom_guardrail import CustomGuardrail
from litellm.utils import get_applied_guardrails
from unittest.mock import Mock
@pytest.mark.parametrize(
"test_case",
[
{
"name": "default_on_guardrail",
"callbacks": [
CustomGuardrail(guardrail_name="test_guardrail", default_on=True)
],
"kwargs": {"metadata": {"requester_metadata": {"guardrails": []}}},
"expected": ["test_guardrail"],
},
{
"name": "request_specific_guardrail",
"callbacks": [
CustomGuardrail(guardrail_name="test_guardrail", default_on=False)
],
"kwargs": {
"metadata": {"requester_metadata": {"guardrails": ["test_guardrail"]}}
},
"expected": ["test_guardrail"],
},
{
"name": "multiple_guardrails",
"callbacks": [
CustomGuardrail(guardrail_name="default_guardrail", default_on=True),
CustomGuardrail(guardrail_name="request_guardrail", default_on=False),
],
"kwargs": {
"metadata": {
"requester_metadata": {"guardrails": ["request_guardrail"]}
}
},
"expected": ["default_guardrail", "request_guardrail"],
},
{
"name": "empty_metadata",
"callbacks": [
CustomGuardrail(guardrail_name="test_guardrail", default_on=False)
],
"kwargs": {},
"expected": [],
},
{
"name": "none_callback",
"callbacks": [
None,
CustomGuardrail(guardrail_name="test_guardrail", default_on=True),
],
"kwargs": {},
"expected": ["test_guardrail"],
},
{
"name": "non_guardrail_callback",
"callbacks": [
Mock(),
CustomGuardrail(guardrail_name="test_guardrail", default_on=True),
],
"kwargs": {},
"expected": ["test_guardrail"],
},
],
)
def test_get_applied_guardrails(test_case):
# Setup
litellm.callbacks = test_case["callbacks"]
# Execute
result = get_applied_guardrails(test_case["kwargs"])
# Assert
assert sorted(result) == sorted(test_case["expected"])