Merge pull request #2037 from BerriAI/litellm_request_level_pii_masking

feat(presidio_pii_masking.py): allow request level controls for turning on/off pii masking
This commit is contained in:
Krish Dholakia 2024-02-17 19:05:14 -08:00 committed by GitHub
commit 6f77a4a31e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 141 additions and 4 deletions

View file

@ -72,3 +72,78 @@ curl --location 'http://0.0.0.0:8000/key/generate' \
```
## Turn on/off per request
The proxy support 2 request-level PII controls:
- *no-pii*: Optional(bool) - Allow user to turn off pii masking per request.
- *output_parse_pii*: Optional(bool) - Allow user to turn off pii output parsing per request.
### Usage
**Step 1. Create key with pii permissions**
Set `allow_pii_controls` to true for a given key. This will allow the user to set request-level PII controls.
```bash
curl --location 'http://0.0.0.0:8000/key/generate' \
--header 'Authorization: Bearer my-master-key' \
--header 'Content-Type: application/json' \
--data '{
"permissions": {"allow_pii_controls": true}
}'
```
**Step 2. Turn off pii output parsing**
```python
import os
from openai import OpenAI
client = OpenAI(
# This is the default and can be omitted
api_key=os.environ.get("OPENAI_API_KEY"),
base_url="http://0.0.0.0:8000"
)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": "My name is Jane Doe, my number is 8382043839",
}
],
model="gpt-3.5-turbo",
extra_body={
"content_safety": {"output_parse_pii": False}
}
)
```
**Step 3: See response**
```
{
"id": "chatcmpl-8c5qbGTILZa1S4CK3b31yj5N40hFN",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "Hi [PERSON], what can I help you with?",
"role": "assistant"
}
}
],
"created": 1704089632,
"model": "gpt-35-turbo",
"object": "chat.completion",
"system_fingerprint": null,
"usage": {
"completion_tokens": 47,
"prompt_tokens": 12,
"total_tokens": 59
},
"_response_ms": 1753.426
}
```

View file

@ -119,6 +119,9 @@ class _OPTIONAL_PresidioPIIMasking(CustomLogger):
call_type: str,
):
"""
- Check if request turned off pii
- Check if user allowed to turn off pii (key permissions -> 'allow_pii_controls')
- Take the request data
- Call /analyze -> get the results
- Call /anonymize w/ the analyze results -> get the redacted text
@ -126,13 +129,59 @@ class _OPTIONAL_PresidioPIIMasking(CustomLogger):
For multiple messages in /chat/completions, we'll need to call them in parallel.
"""
permissions = user_api_key_dict.permissions
if permissions.get("pii", True) == False: # allow key to turn off pii masking
return data
output_parse_pii = permissions.get(
"output_parse_pii", litellm.output_parse_pii
) # allow key to turn on/off output parsing for pii
no_pii = permissions.get(
"no-pii", None
) # allow key to turn on/off pii masking (if user is allowed to set pii controls, then they can override the key defaults)
if no_pii is None:
# check older way of turning on/off pii
no_pii = not permissions.get("pii", True)
content_safety = data.get("content_safety", None)
verbose_proxy_logger.debug(f"content_safety: {content_safety}")
## Request-level turn on/off PII controls ##
if content_safety is not None and isinstance(content_safety, dict):
# pii masking ##
if (
content_safety.get("no-pii", None) is not None
and content_safety.get("no-pii") == True
):
# check if user allowed to turn this off
if permissions.get("allow_pii_controls", False) == False:
raise HTTPException(
status_code=400,
detail={"error": "Not allowed to set PII controls per request"},
)
else: # user allowed to turn off pii masking
no_pii = content_safety.get("no-pii")
if not isinstance(no_pii, bool):
raise HTTPException(
status_code=400,
detail={"error": "no_pii needs to be a boolean value"},
)
## pii output parsing ##
if content_safety.get("output_parse_pii", None) is not None:
# check if user allowed to turn this off
if permissions.get("allow_pii_controls", False) == False:
raise HTTPException(
status_code=400,
detail={"error": "Not allowed to set PII controls per request"},
)
else: # user allowed to turn on/off pii output parsing
output_parse_pii = content_safety.get("output_parse_pii")
if not isinstance(output_parse_pii, bool):
raise HTTPException(
status_code=400,
detail={
"error": "output_parse_pii needs to be a boolean value"
},
)
if no_pii == False: # turn off pii masking
return data
if call_type == "completion": # /chat/completions requests
messages = data["messages"]

View file

@ -3133,6 +3133,19 @@ async def generate_key_fn(
- permissions: Optional[dict] - key-specific permissions. Currently just used for turning off pii masking (if connected). Example - {"pii": false}
- model_max_budget: Optional[dict] - key-specific model budget in USD. Example - {"text-davinci-002": 0.5, "gpt-3.5-turbo": 0.5}. IF null or {} then no model specific budget.
Examples:
1. Allow users to turn on/off pii masking
```bash
curl --location 'http://0.0.0.0:8000/key/generate' \
--header 'Authorization: Bearer sk-1234' \
--header 'Content-Type: application/json' \
--data '{
"permissions": {"allow_pii_controls": true}
}'
```
Returns:
- key: (str) The generated api key
- expires: (datetime) Datetime object for when key expires.