mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
feat(proxy_server.py): enable llm api based prompt injection checks
run user calls through an llm api to check for prompt injection attacks. This happens in parallel to th e actual llm call using `async_moderation_hook`
This commit is contained in:
parent
f24d3ffdb6
commit
d91f9a9f50
11 changed files with 271 additions and 24 deletions
|
@ -138,7 +138,17 @@ class ProxyLogging:
|
|||
except Exception as e:
|
||||
raise e
|
||||
|
||||
async def during_call_hook(self, data: dict):
|
||||
async def during_call_hook(
|
||||
self,
|
||||
data: dict,
|
||||
call_type: Literal[
|
||||
"completion",
|
||||
"embeddings",
|
||||
"image_generation",
|
||||
"moderation",
|
||||
"audio_transcription",
|
||||
],
|
||||
):
|
||||
"""
|
||||
Runs the CustomLogger's async_moderation_hook()
|
||||
"""
|
||||
|
@ -146,7 +156,9 @@ class ProxyLogging:
|
|||
new_data = copy.deepcopy(data)
|
||||
try:
|
||||
if isinstance(callback, CustomLogger):
|
||||
await callback.async_moderation_hook(data=new_data)
|
||||
await callback.async_moderation_hook(
|
||||
data=new_data, call_type=call_type
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
return data
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue