feat(proxy_server.py): enable llm api based prompt injection checks

run user calls through an llm api to check for prompt injection attacks. This happens in parallel to th
e actual llm call using `async_moderation_hook`
This commit is contained in:
Krrish Dholakia 2024-03-20 22:43:42 -07:00
parent f24d3ffdb6
commit d91f9a9f50
11 changed files with 271 additions and 24 deletions

View file

@ -138,7 +138,17 @@ class ProxyLogging:
except Exception as e:
raise e
async def during_call_hook(self, data: dict):
async def during_call_hook(
self,
data: dict,
call_type: Literal[
"completion",
"embeddings",
"image_generation",
"moderation",
"audio_transcription",
],
):
"""
Runs the CustomLogger's async_moderation_hook()
"""
@ -146,7 +156,9 @@ class ProxyLogging:
new_data = copy.deepcopy(data)
try:
if isinstance(callback, CustomLogger):
await callback.async_moderation_hook(data=new_data)
await callback.async_moderation_hook(
data=new_data, call_type=call_type
)
except Exception as e:
raise e
return data