fix aporia typo

This commit is contained in:
Ishaan Jaff 2024-08-19 18:34:45 -07:00
parent e7fa2cf0aa
commit ca9c81470b
5 changed files with 24 additions and 24 deletions

View file

@ -36,7 +36,7 @@ Features:
- **Guardrails, PII Masking, Content Moderation** - **Guardrails, PII Masking, Content Moderation**
- ✅ [Content Moderation with LLM Guard, LlamaGuard, Secret Detection, Google Text Moderations](#content-moderation) - ✅ [Content Moderation with LLM Guard, LlamaGuard, Secret Detection, Google Text Moderations](#content-moderation)
- ✅ [Prompt Injection Detection (with LakeraAI API)](#prompt-injection-detection---lakeraai) - ✅ [Prompt Injection Detection (with LakeraAI API)](#prompt-injection-detection---lakeraai)
- ✅ [Prompt Injection Detection (with Aporio API)](#prompt-injection-detection---aporio-ai) - ✅ [Prompt Injection Detection (with Aporia API)](#prompt-injection-detection---aporia-ai)
- ✅ [Switch LakeraAI on / off per request](guardrails#control-guardrails-onoff-per-request) - ✅ [Switch LakeraAI on / off per request](guardrails#control-guardrails-onoff-per-request)
- ✅ Reject calls from Blocked User list - ✅ Reject calls from Blocked User list
- ✅ Reject calls (incoming / outgoing) with Banned Keywords (e.g. competitors) - ✅ Reject calls (incoming / outgoing) with Banned Keywords (e.g. competitors)
@ -1035,9 +1035,9 @@ curl --location 'http://localhost:4000/chat/completions' \
Need to control LakeraAI per Request ? Doc here 👉: [Switch LakerAI on / off per request](prompt_injection.md#✨-enterprise-switch-lakeraai-on--off-per-api-call) Need to control LakeraAI per Request ? Doc here 👉: [Switch LakerAI on / off per request](prompt_injection.md#✨-enterprise-switch-lakeraai-on--off-per-api-call)
::: :::
## Prompt Injection Detection - Aporio AI ## Prompt Injection Detection - Aporia AI
Use this if you want to reject /chat/completion calls that have prompt injection attacks with [AporioAI](https://www.aporia.com/) Use this if you want to reject /chat/completion calls that have prompt injection attacks with [AporiaAI](https://www.aporia.com/)
#### Usage #### Usage
@ -1048,11 +1048,11 @@ APORIO_API_KEY="eyJh****"
APORIO_API_BASE="https://gr..." APORIO_API_BASE="https://gr..."
``` ```
Step 2. Add `aporio_prompt_injection` to your callbacks Step 2. Add `aporia_prompt_injection` to your callbacks
```yaml ```yaml
litellm_settings: litellm_settings:
callbacks: ["aporio_prompt_injection"] callbacks: ["aporia_prompt_injection"]
``` ```
That's it, start your proxy That's it, start your proxy
@ -1081,7 +1081,7 @@ curl --location 'http://localhost:4000/chat/completions' \
"error": { "error": {
"message": { "message": {
"error": "Violated guardrail policy", "error": "Violated guardrail policy",
"aporio_ai_response": { "aporia_ai_response": {
"action": "block", "action": "block",
"revised_prompt": null, "revised_prompt": null,
"revised_response": "Profanity detected: Message blocked because it includes profanity. Please rephrase.", "revised_response": "Profanity detected: Message blocked because it includes profanity. Please rephrase.",
@ -1097,7 +1097,7 @@ curl --location 'http://localhost:4000/chat/completions' \
:::info :::info
Need to control AporioAI per Request ? Doc here 👉: [Create a guardrail](./guardrails.md) Need to control AporiaAI per Request ? Doc here 👉: [Create a guardrail](./guardrails.md)
::: :::

View file

@ -90,7 +90,7 @@ curl -i http://localhost:4000/v1/chat/completions \
}' }'
``` ```
Expect response on failure Expected response on failure
```shell ```shell
{ {

View file

@ -1,6 +1,6 @@
# +-------------------------------------------------------------+ # +-------------------------------------------------------------+
# #
# Use AporioAI for your LLM calls # Use AporiaAI for your LLM calls
# #
# +-------------------------------------------------------------+ # +-------------------------------------------------------------+
# Thank you users! We ❤️ you! - Krrish & Ishaan # Thank you users! We ❤️ you! - Krrish & Ishaan
@ -33,18 +33,18 @@ from litellm.types.guardrails import GuardrailEventHooks
litellm.set_verbose = True litellm.set_verbose = True
GUARDRAIL_NAME = "aporio" GUARDRAIL_NAME = "aporia"
class _ENTERPRISE_Aporio(CustomGuardrail): class _ENTERPRISE_Aporia(CustomGuardrail):
def __init__( def __init__(
self, api_key: Optional[str] = None, api_base: Optional[str] = None, **kwargs self, api_key: Optional[str] = None, api_base: Optional[str] = None, **kwargs
): ):
self.async_handler = AsyncHTTPHandler( self.async_handler = AsyncHTTPHandler(
timeout=httpx.Timeout(timeout=600.0, connect=5.0) timeout=httpx.Timeout(timeout=600.0, connect=5.0)
) )
self.aporio_api_key = api_key or os.environ["APORIO_API_KEY"] self.aporia_api_key = api_key or os.environ["APORIO_API_KEY"]
self.aporio_api_base = api_base or os.environ["APORIO_API_BASE"] self.aporia_api_base = api_base or os.environ["APORIO_API_BASE"]
self.event_hook: GuardrailEventHooks self.event_hook: GuardrailEventHooks
super().__init__(**kwargs) super().__init__(**kwargs)
@ -114,10 +114,10 @@ class _ENTERPRISE_Aporio(CustomGuardrail):
""" """
response = await self.async_handler.post( response = await self.async_handler.post(
url=self.aporio_api_base + "/validate", url=self.aporia_api_base + "/validate",
data=_json_data, data=_json_data,
headers={ headers={
"X-APORIA-API-KEY": self.aporio_api_key, "X-APORIA-API-KEY": self.aporia_api_key,
"Content-Type": "application/json", "Content-Type": "application/json",
}, },
) )
@ -133,7 +133,7 @@ class _ENTERPRISE_Aporio(CustomGuardrail):
status_code=400, status_code=400,
detail={ detail={
"error": "Violated guardrail policy", "error": "Violated guardrail policy",
"aporio_ai_response": _json_response, "aporia_ai_response": _json_response,
}, },
) )

View file

@ -118,17 +118,17 @@ def initialize_callbacks_on_proxy(
**init_params **init_params
) )
imported_list.append(lakera_moderations_object) imported_list.append(lakera_moderations_object)
elif isinstance(callback, str) and callback == "aporio_prompt_injection": elif isinstance(callback, str) and callback == "aporia_prompt_injection":
from enterprise.enterprise_hooks.aporio_ai import _ENTERPRISE_Aporio from enterprise.enterprise_hooks.aporia_ai import _ENTERPRISE_Aporia
if premium_user is not True: if premium_user is not True:
raise Exception( raise Exception(
"Trying to use Aporio AI Guardrail" "Trying to use Aporia AI Guardrail"
+ CommonProxyErrors.not_premium_user.value + CommonProxyErrors.not_premium_user.value
) )
aporio_guardrail_object = _ENTERPRISE_Aporio() aporia_guardrail_object = _ENTERPRISE_Aporia()
imported_list.append(aporio_guardrail_object) imported_list.append(aporia_guardrail_object)
elif isinstance(callback, str) and callback == "google_text_moderation": elif isinstance(callback, str) and callback == "google_text_moderation":
from enterprise.enterprise_hooks.google_text_moderation import ( from enterprise.enterprise_hooks.google_text_moderation import (
_ENTERPRISE_GoogleTextModeration, _ENTERPRISE_GoogleTextModeration,

View file

@ -113,11 +113,11 @@ def init_guardrails_v2(all_guardrails: dict):
# Init guardrail CustomLoggerClass # Init guardrail CustomLoggerClass
if litellm_params["guardrail"] == "aporia": if litellm_params["guardrail"] == "aporia":
from litellm.proxy.enterprise.enterprise_hooks.aporio_ai import ( from litellm.proxy.enterprise.enterprise_hooks.aporia_ai import (
_ENTERPRISE_Aporio, _ENTERPRISE_Aporia,
) )
_aporia_callback = _ENTERPRISE_Aporio( _aporia_callback = _ENTERPRISE_Aporia(
api_base=litellm_params["api_base"], api_base=litellm_params["api_base"],
api_key=litellm_params["api_key"], api_key=litellm_params["api_key"],
guardrail_name=guardrail["guardrail_name"], guardrail_name=guardrail["guardrail_name"],