forked from phoenix/litellm-mirror
feat add test for custom guardrails
This commit is contained in:
parent
5895f0c615
commit
918e4fcfe5
2 changed files with 25 additions and 3 deletions
|
@ -1,8 +1,9 @@
|
|||
model_list:
|
||||
- model_name: gpt-4
|
||||
- model_name: fake-openai-endpoint
|
||||
litellm_params:
|
||||
model: openai/gpt-4o
|
||||
api_key: os.environ/OPENAI_API_KEY
|
||||
model: openai/fake
|
||||
api_key: fake-key
|
||||
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||
|
||||
guardrails:
|
||||
- guardrail_name: "custom-pre-guard"
|
||||
|
|
|
@ -217,3 +217,24 @@ async def test_bedrock_guardrail_triggered():
|
|||
print(e)
|
||||
assert "GUARDRAIL_INTERVENED" in str(e)
|
||||
assert "Violated guardrail policy" in str(e)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_custom_guardrail_during_call_triggered():
|
||||
"""
|
||||
- Tests a request where our bedrock guardrail should be triggered
|
||||
- Assert that the guardrails applied are returned in the response headers
|
||||
"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
try:
|
||||
response, headers = await chat_completion(
|
||||
session,
|
||||
"sk-1234",
|
||||
model="fake-openai-endpoint",
|
||||
messages=[{"role": "user", "content": f"Hello do you like litellm?"}],
|
||||
guardrails=["custom-during-guard"],
|
||||
)
|
||||
pytest.fail("Should have thrown an exception")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
assert "Guardrail failed words - `litellm` detected" in str(e)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue