feat(llm_guard.py): enable key-specific llm guard check

This commit is contained in:
Krrish Dholakia 2024-03-26 17:21:51 -07:00
parent 313f58c483
commit 7bc76ddbc3
8 changed files with 26 additions and 3 deletions

View file

@ -199,6 +199,7 @@ class _OPTIONAL_PromptInjectionDetection(CustomLogger):
async def async_moderation_hook(
self,
data: dict,
user_api_key_dict: UserAPIKeyAuth,
call_type: Literal["completion", "embeddings", "image_generation"],
):
self.print_verbose(

View file

@ -3168,7 +3168,9 @@ async def chat_completion(
tasks = []
tasks.append(
proxy_logging_obj.during_call_hook(data=data, call_type="completion")
proxy_logging_obj.during_call_hook(
data=data, user_api_key_dict=user_api_key_dict, call_type="completion"
)
)
start_time = time.time()

View file

@ -141,6 +141,7 @@ class ProxyLogging:
async def during_call_hook(
self,
data: dict,
user_api_key_dict: UserAPIKeyAuth,
call_type: Literal[
"completion",
"embeddings",
@ -157,7 +158,9 @@ class ProxyLogging:
try:
if isinstance(callback, CustomLogger):
await callback.async_moderation_hook(
data=new_data, call_type=call_type
data=new_data,
user_api_key_dict=user_api_key_dict,
call_type=call_type,
)
except Exception as e:
raise e