mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
feat(lakera_ai.py): control running prompt injection between pre-call and in_parallel
This commit is contained in:
parent
a32a7af215
commit
99a5436ed5
6 changed files with 211 additions and 37 deletions
|
@ -110,7 +110,12 @@ def initialize_callbacks_on_proxy(
|
|||
+ CommonProxyErrors.not_premium_user.value
|
||||
)
|
||||
|
||||
lakera_moderations_object = _ENTERPRISE_lakeraAI_Moderation()
|
||||
init_params = {}
|
||||
if "lakera_prompt_injection" in callback_specific_params:
|
||||
init_params = callback_specific_params["lakera_prompt_injection"]
|
||||
lakera_moderations_object = _ENTERPRISE_lakeraAI_Moderation(
|
||||
**init_params
|
||||
)
|
||||
imported_list.append(lakera_moderations_object)
|
||||
elif isinstance(callback, str) and callback == "aporio_prompt_injection":
|
||||
from enterprise.enterprise_hooks.aporio_ai import _ENTERPRISE_Aporio
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue