mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
feat(proxy_server.py): enable llm api based prompt injection checks
run user calls through an llm api to check for prompt injection attacks. This happens in parallel to th e actual llm call using `async_moderation_hook`
This commit is contained in:
parent
f24d3ffdb6
commit
d91f9a9f50
11 changed files with 271 additions and 24 deletions
|
@ -72,7 +72,11 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
|
|||
):
|
||||
pass
|
||||
|
||||
async def async_moderation_hook(self, data: dict):
|
||||
async def async_moderation_hook(
|
||||
self,
|
||||
data: dict,
|
||||
call_type: Literal["completion", "embeddings", "image_generation"],
|
||||
):
|
||||
pass
|
||||
|
||||
async def async_post_call_streaming_hook(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue