From 9f927e0e4eb123129e26f2f8f917ac0abc223799 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 19 Aug 2024 18:50:14 -0700 Subject: [PATCH] add testing for aporia guardrails --- .../{aporio_ai.py => aporia_ai.py} | 0 .../example_config_yaml/otel_test_config.yaml | 13 ++++ litellm/proxy/proxy_config.yaml | 7 ++- tests/otel_tests/test_guardrails.py | 59 +++++++++++++++++++ 4 files changed, 76 insertions(+), 3 deletions(-) rename enterprise/enterprise_hooks/{aporio_ai.py => aporia_ai.py} (100%) create mode 100644 tests/otel_tests/test_guardrails.py diff --git a/enterprise/enterprise_hooks/aporio_ai.py b/enterprise/enterprise_hooks/aporia_ai.py similarity index 100% rename from enterprise/enterprise_hooks/aporio_ai.py rename to enterprise/enterprise_hooks/aporia_ai.py diff --git a/litellm/proxy/example_config_yaml/otel_test_config.yaml b/litellm/proxy/example_config_yaml/otel_test_config.yaml index 2e2537443..496ae1710 100644 --- a/litellm/proxy/example_config_yaml/otel_test_config.yaml +++ b/litellm/proxy/example_config_yaml/otel_test_config.yaml @@ -9,3 +9,16 @@ litellm_settings: cache: true callbacks: ["otel"] +guardrails: + - guardrail_name: "aporia-pre-guard" + litellm_params: + guardrail: aporia # supported values: "aporia", "bedrock", "lakera" + mode: "post_call" + api_key: os.environ/APORIA_API_KEY_1 + api_base: os.environ/APORIA_API_BASE_1 + - guardrail_name: "aporia-post-guard" + litellm_params: + guardrail: aporia # supported values: "aporia", "bedrock", "lakera" + mode: "post_call" + api_key: os.environ/APORIA_API_KEY_2 + api_base: os.environ/APORIA_API_BASE_2 \ No newline at end of file diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index e36b555a9..3c1c64292 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -1,8 +1,9 @@ model_list: - - model_name: gpt-3.5-turbo + - model_name: fake-openai-endpoint litellm_params: - model: openai/gpt-3.5-turbo - api_key: os.environ/OPENAI_API_KEY + model: openai/fake + api_key: fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ guardrails: - guardrail_name: "aporia-pre-guard" diff --git a/tests/otel_tests/test_guardrails.py b/tests/otel_tests/test_guardrails.py new file mode 100644 index 000000000..db2027ce1 --- /dev/null +++ b/tests/otel_tests/test_guardrails.py @@ -0,0 +1,59 @@ +import pytest +import asyncio +import aiohttp, openai +from openai import OpenAI, AsyncOpenAI +from typing import Optional, List, Union +import uuid + + +async def chat_completion(session, key, model: Union[str, List] = "gpt-4"): + url = "http://0.0.0.0:4000/chat/completions" + headers = { + "Authorization": f"Bearer {key}", + "Content-Type": "application/json", + } + data = { + "model": model, + "messages": [ + {"role": "user", "content": f"Hello! {str(uuid.uuid4())}"}, + ], + "guardrails": ["aporia-post-guard", "aporia-pre-guard"], + } + + async with session.post(url, headers=headers, json=data) as response: + status = response.status + response_text = await response.text() + + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + + # response headers + response_headers = response.headers + print("response headers=", response_headers) + + return await response.json(), response_headers + + +@pytest.mark.asyncio +async def test_no_llm_guard_triggered(): + """ + - Tests a request where no content mod is triggered + - Assert that the guardrails applied are returned in the response headers + """ + async with aiohttp.ClientSession() as session: + response, headers = await chat_completion( + session, "sk-1234", model="fake-openai-endpoint" + ) + await asyncio.sleep(3) + + print("response=", response, "response headers", headers) + + assert "x-litellm-applied-guardrails" in headers + + assert ( + headers["x-litellm-applied-guardrails"] + == "aporia-pre-guard,aporia-post-guard" + )