forked from phoenix/litellm-mirror
add testing for aporia guardrails
This commit is contained in:
parent
c9d9c6444e
commit
9f927e0e4e
4 changed files with 76 additions and 3 deletions
|
@ -9,3 +9,16 @@ litellm_settings:
|
||||||
cache: true
|
cache: true
|
||||||
callbacks: ["otel"]
|
callbacks: ["otel"]
|
||||||
|
|
||||||
|
guardrails:
|
||||||
|
- guardrail_name: "aporia-pre-guard"
|
||||||
|
litellm_params:
|
||||||
|
guardrail: aporia # supported values: "aporia", "bedrock", "lakera"
|
||||||
|
mode: "post_call"
|
||||||
|
api_key: os.environ/APORIA_API_KEY_1
|
||||||
|
api_base: os.environ/APORIA_API_BASE_1
|
||||||
|
- guardrail_name: "aporia-post-guard"
|
||||||
|
litellm_params:
|
||||||
|
guardrail: aporia # supported values: "aporia", "bedrock", "lakera"
|
||||||
|
mode: "post_call"
|
||||||
|
api_key: os.environ/APORIA_API_KEY_2
|
||||||
|
api_base: os.environ/APORIA_API_BASE_2
|
|
@ -1,8 +1,9 @@
|
||||||
model_list:
|
model_list:
|
||||||
- model_name: gpt-3.5-turbo
|
- model_name: fake-openai-endpoint
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: openai/gpt-3.5-turbo
|
model: openai/fake
|
||||||
api_key: os.environ/OPENAI_API_KEY
|
api_key: fake-key
|
||||||
|
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||||
|
|
||||||
guardrails:
|
guardrails:
|
||||||
- guardrail_name: "aporia-pre-guard"
|
- guardrail_name: "aporia-pre-guard"
|
||||||
|
|
59
tests/otel_tests/test_guardrails.py
Normal file
59
tests/otel_tests/test_guardrails.py
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
import pytest
|
||||||
|
import asyncio
|
||||||
|
import aiohttp, openai
|
||||||
|
from openai import OpenAI, AsyncOpenAI
|
||||||
|
from typing import Optional, List, Union
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
|
||||||
|
async def chat_completion(session, key, model: Union[str, List] = "gpt-4"):
|
||||||
|
url = "http://0.0.0.0:4000/chat/completions"
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {key}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
data = {
|
||||||
|
"model": model,
|
||||||
|
"messages": [
|
||||||
|
{"role": "user", "content": f"Hello! {str(uuid.uuid4())}"},
|
||||||
|
],
|
||||||
|
"guardrails": ["aporia-post-guard", "aporia-pre-guard"],
|
||||||
|
}
|
||||||
|
|
||||||
|
async with session.post(url, headers=headers, json=data) as response:
|
||||||
|
status = response.status
|
||||||
|
response_text = await response.text()
|
||||||
|
|
||||||
|
print(response_text)
|
||||||
|
print()
|
||||||
|
|
||||||
|
if status != 200:
|
||||||
|
raise Exception(f"Request did not return a 200 status code: {status}")
|
||||||
|
|
||||||
|
# response headers
|
||||||
|
response_headers = response.headers
|
||||||
|
print("response headers=", response_headers)
|
||||||
|
|
||||||
|
return await response.json(), response_headers
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_no_llm_guard_triggered():
|
||||||
|
"""
|
||||||
|
- Tests a request where no content mod is triggered
|
||||||
|
- Assert that the guardrails applied are returned in the response headers
|
||||||
|
"""
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
response, headers = await chat_completion(
|
||||||
|
session, "sk-1234", model="fake-openai-endpoint"
|
||||||
|
)
|
||||||
|
await asyncio.sleep(3)
|
||||||
|
|
||||||
|
print("response=", response, "response headers", headers)
|
||||||
|
|
||||||
|
assert "x-litellm-applied-guardrails" in headers
|
||||||
|
|
||||||
|
assert (
|
||||||
|
headers["x-litellm-applied-guardrails"]
|
||||||
|
== "aporia-pre-guard,aporia-post-guard"
|
||||||
|
)
|
Loading…
Add table
Add a link
Reference in a new issue