docs new presidio language controls

This commit is contained in:
Ishaan Jaff 2024-09-04 13:04:19 -07:00
parent 9b5164b38d
commit 6c30f18f8c
5 changed files with 154 additions and 15 deletions

View file

@ -0,0 +1,129 @@
import Image from '@theme/IdealImage';
# PII Masking - Presidio
## Quick Start
LiteLLM supports [Microsoft Presidio](https://github.com/microsoft/presidio/) for PII masking.
### 1. Define Guardrails on your LiteLLM config.yaml
Define your guardrails under the `guardrails` section
```yaml
model_list:
- model_name: gpt-3.5-turbo
litellm_params:
model: openai/gpt-3.5-turbo
api_key: os.environ/OPENAI_API_KEY
guardrails:
- guardrail_name: "presidio-pre-guard"
litellm_params:
guardrail: presidio # supported values: "aporia", "bedrock", "lakera", "presidio"
mode: "pre_call"
```
Set the following env vars
```bash
export PRESIDIO_ANALYZER_API_BASE="http://localhost:5002"
export PRESIDIO_ANONYMIZER_API_BASE="http://localhost:5001"
```
#### Supported values for `mode`
- `pre_call` Run **before** LLM call, on **input**
- `post_call` Run **after** LLM call, on **input & output**
### 2. Start LiteLLM Gateway
```shell
litellm --config config.yaml --detailed_debug
```
### 3. Test request
**[Langchain, OpenAI SDK Usage Examples](../proxy/user_keys#request-format)**
<Tabs>
<TabItem label="Masked PII call" value = "not-allowed">
Expect this to mask `Jane Doe` since it's PII
```shell
curl http://localhost:4000/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-1234" \
-d '{
"model": "gpt-4o-mini",
"messages": [
{"role": "user", "content": "Hello my name is Jane Doe"}
],
"guardrails": ["presidio-pre-guard"],
}'
```
Expected response on failure
```shell
{
"id": "chatcmpl-A3qSC39K7imjGbZ8xCDacGJZBoTJQ",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "Hello, <PERSON>! How can I assist you today?",
"role": "assistant",
"tool_calls": null,
"function_call": null
}
}
],
"created": 1725479980,
"model": "gpt-4o-mini-2024-07-18",
"object": "chat.completion",
"system_fingerprint": "fp_5bd87c427a",
"usage": {
"completion_tokens": 13,
"prompt_tokens": 14,
"total_tokens": 27
},
"service_tier": null
}
```
</TabItem>
<TabItem label="No PII Call " value = "allowed">
```shell
curl http://localhost:4000/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-1234" \
-d '{
"model": "gpt-4o-mini",
"messages": [
{"role": "user", "content": "Hello good morning"}
],
"guardrails": ["presidio-pre-guard"],
}'
```
</TabItem>
</Tabs>
## Set `language` per request
## Output parsing
## Ad Hoc Recognizers

View file

@ -1,6 +1,14 @@
import Image from '@theme/IdealImage';
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# PII Masking
# PII Masking - LiteLLM Gateway (Deprecated Version)
:::warning
This is deprecated, please use [our new Presidio pii masking integration](./guardrails/pii_masking_v2)
:::
LiteLLM supports [Microsoft Presidio](https://github.com/microsoft/presidio/) for PII masking.

View file

@ -67,7 +67,15 @@ const sidebars = {
{
type: "category",
label: "🛡️ [Beta] Guardrails",
items: ["proxy/guardrails/quick_start", "proxy/guardrails/aporia_api", "proxy/guardrails/lakera_ai", "proxy/guardrails/bedrock", "proxy/guardrails/custom_guardrail", "prompt_injection"],
items: [
"proxy/guardrails/quick_start",
"proxy/guardrails/aporia_api",
"proxy/guardrails/lakera_ai",
"proxy/guardrails/bedrock",
"proxy/guardrails/pii_masking_v2",
"proxy/guardrails/custom_guardrail",
"prompt_injection"
],
},
{
type: "category",
@ -101,7 +109,6 @@ const sidebars = {
"proxy/model_management",
"proxy/health",
"proxy/debugging",
"proxy/pii_masking",
"proxy/call_hooks",
"proxy/rules",
"proxy/cli",
@ -291,6 +298,7 @@ const sidebars = {
"data_security",
"migration_policy",
"contributing",
"proxy/pii_masking",
"rules",
"proxy_server",
{

View file

@ -396,8 +396,9 @@ class _OPTIONAL_PresidioPIIMasking(CustomGuardrail):
if "metadata" in data:
_metadata = data["metadata"]
_guardrail_config = _metadata.get("guardrail_config")
_presidio_config = PresidioPerRequestConfig(**_guardrail_config)
return _presidio_config
if _guardrail_config:
_presidio_config = PresidioPerRequestConfig(**_guardrail_config)
return _presidio_config
return None

View file

@ -1,22 +1,15 @@
model_list:
- model_name: fake-openai-endpoint
- model_name: openai/*
litellm_params:
model: openai/fake
api_base: https://exampleopenaiendpoint-production.up.railway.app/
model: gpt-3.5-turbo
api_key: os.environ/OPENAI_API_KEY
- model_name: gpt-3.5-turbo-end-user-test
litellm_params:
model: azure/chatgpt-v-2
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
api_version: "2023-05-15"
api_key: os.environ/AZURE_API_KEY
litellm_settings:
success_callback: ["prometheus"]
failure_callback: ["prometheus"]
guardrails:
- guardrail_name: "presidio"
- guardrail_name: "presidio-pre-guard"
litellm_params:
guardrail: presidio # supported values: "aporia", "lakera", "presidio"
mode: "pre_call" # pre_call, during_call, post_call