fix(utils.py): handle key error in msg validation (#8325)

* fix(utils.py): handle key error in msg validation

* Support running Aim Guard during LLM call (#7918)

* support running Aim Guard during LLM call

* Rename header

* adjust docs and fix type annotations

* fix(timeout.md): doc fix for openai example on dynamic timeouts

---------

Co-authored-by: Tomer Bin <117278227+hxtomer@users.noreply.github.com>
This commit is contained in:
Krish Dholakia 2025-02-06 18:13:46 -08:00 committed by GitHub
parent fac1d2ccef
commit f031926b82
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 56 additions and 21 deletions

View file

@ -37,7 +37,7 @@ guardrails:
- guardrail_name: aim-protected-app
litellm_params:
guardrail: aim
mode: pre_call
mode: pre_call # 'during_call' is also available
api_key: os.environ/AIM_API_KEY
api_base: os.environ/AIM_API_BASE # Optional, use only when using a self-hosted Aim Outpost
```

View file

@ -166,7 +166,7 @@ response = client.chat.completions.create(
{"role": "user", "content": "what color is red"}
],
logit_bias={12481: 100},
timeout=1
extra_body={"timeout": 1} # 👈 KEY CHANGE
)
print(response)

View file

@ -6,7 +6,7 @@
# +-------------------------------------------------------------+
import os
from typing import Literal, Optional
from typing import Literal, Optional, Union
from fastapi import HTTPException
@ -25,12 +25,8 @@ class AimGuardrailMissingSecrets(Exception):
class AimGuardrail(CustomGuardrail):
def __init__(
self, api_key: Optional[str] = None, api_base: Optional[str] = None, **kwargs
):
self.async_handler = get_async_httpx_client(
llm_provider=httpxSpecialProvider.GuardrailCallback
)
def __init__(self, api_key: Optional[str] = None, api_base: Optional[str] = None, **kwargs):
self.async_handler = get_async_httpx_client(llm_provider=httpxSpecialProvider.GuardrailCallback)
self.api_key = api_key or os.environ.get("AIM_API_KEY")
if not self.api_key:
msg = (
@ -38,9 +34,7 @@ class AimGuardrail(CustomGuardrail):
"pass it as a parameter to the guardrail in the config file"
)
raise AimGuardrailMissingSecrets(msg)
self.api_base = (
api_base or os.environ.get("AIM_API_BASE") or "https://api.aim.security"
)
self.api_base = api_base or os.environ.get("AIM_API_BASE") or "https://api.aim.security"
super().__init__(**kwargs)
async def async_pre_call_hook(
@ -58,11 +52,32 @@ class AimGuardrail(CustomGuardrail):
"pass_through_endpoint",
"rerank",
],
) -> Exception | str | dict | None:
) -> Union[Exception, str, dict, None]:
verbose_proxy_logger.debug("Inside AIM Pre-Call Hook")
await self.call_aim_guardrail(data, hook="pre_call")
return data
async def async_moderation_hook(
self,
data: dict,
user_api_key_dict: UserAPIKeyAuth,
call_type: Literal[
"completion",
"embeddings",
"image_generation",
"moderation",
"audio_transcription",
],
) -> Union[Exception, str, dict, None]:
verbose_proxy_logger.debug("Inside AIM Moderation Hook")
await self.call_aim_guardrail(data, hook="moderation")
return data
async def call_aim_guardrail(self, data: dict, hook: str) -> None:
user_email = data.get("metadata", {}).get("headers", {}).get("x-aim-user-email")
headers = {"Authorization": f"Bearer {self.api_key}"} | (
headers = {"Authorization": f"Bearer {self.api_key}", "x-aim-litellm-hook": hook} | (
{"x-aim-user-email": user_email} if user_email else {}
)
response = await self.async_handler.post(
@ -80,4 +95,3 @@ class AimGuardrail(CustomGuardrail):
)
if detected:
raise HTTPException(status_code=400, detail=res["detection_message"])
return data

View file

@ -5884,6 +5884,10 @@ def validate_chat_completion_user_messages(messages: List[AllMessageValues]):
if item.get("type") not in ValidUserMessageContentTypes:
raise Exception("invalid content type")
except Exception as e:
if isinstance(e, KeyError):
raise Exception(
f"Invalid message={m} at index {idx}. Please ensure all messages are valid OpenAI chat completion messages."
)
if "invalid content type" in str(e):
raise Exception(
f"Invalid user message={m} at index {idx}. Please ensure all user messages are valid OpenAI chat completion messages."

View file

@ -1850,3 +1850,15 @@ def test_dict_to_response_format_helper():
"ref_template": "/$defs/{model}",
}
_dict_to_response_format_helper(**args)
def test_validate_user_messages_invalid_content_type():
from litellm.utils import validate_chat_completion_user_messages
messages = [{"content": [{"type": "invalid_type", "text": "Hello"}]}]
with pytest.raises(Exception) as e:
validate_chat_completion_user_messages(messages)
assert "Invalid message" in str(e)
print(e)

View file

@ -55,15 +55,15 @@ def test_aim_guard_config_no_api_key():
@pytest.mark.asyncio
async def test_callback():
@pytest.mark.parametrize("mode", ["pre_call", "during_call"])
async def test_callback(mode: str):
init_guardrails_v2(
all_guardrails=[
{
"guardrail_name": "gibberish-guard",
"litellm_params": {
"guardrail": "aim",
"guard_name": "gibberish_guard",
"mode": "pre_call",
"mode": mode,
"api_key": "hs-aim-key",
},
}
@ -89,6 +89,11 @@ async def test_callback():
request=Request(method="POST", url="http://aim"),
),
):
if mode == "pre_call":
await aim_guardrail.async_pre_call_hook(
data=data, cache=DualCache(), user_api_key_dict=UserAPIKeyAuth(), call_type="completion"
)
else:
await aim_guardrail.async_moderation_hook(
data=data, user_api_key_dict=UserAPIKeyAuth(), call_type="completion"
)