feat(proxy_server.py): enable llm api based prompt injection checks

run user calls through an llm api to check for prompt injection attacks. This happens in parallel to th
e actual llm call using `async_moderation_hook`
This commit is contained in:
Krrish Dholakia 2024-03-20 22:43:42 -07:00
parent f24d3ffdb6
commit d91f9a9f50
11 changed files with 271 additions and 24 deletions

View file

@ -72,7 +72,11 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
):
pass
async def async_moderation_hook(self, data: dict):
async def async_moderation_hook(
self,
data: dict,
call_type: Literal["completion", "embeddings", "image_generation"],
):
pass
async def async_post_call_streaming_hook(