mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
[Feat-Proxy] Slack Alerting - allow using os.environ/ vars for alert to webhook url (#5726)
* allow using os.environ for slack urls * use env vars for webhook urls * fix types for get_secret * fix linting * fix linting * fix linting * linting fixes * linting fix * docs alerting slack * fix get data
This commit is contained in:
parent
703fd9395f
commit
0d027b22fd
23 changed files with 286 additions and 84 deletions
|
@ -7,18 +7,22 @@
|
|||
## Reject a call if it contains a prompt injection attack.
|
||||
|
||||
|
||||
from typing import Optional, Literal
|
||||
import litellm
|
||||
from litellm.caching import DualCache
|
||||
from litellm.proxy._types import UserAPIKeyAuth, LiteLLMPromptInjectionParams
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.utils import get_formatted_prompt
|
||||
from litellm.llms.prompt_templates.factory import prompt_injection_detection_default_pt
|
||||
from fastapi import HTTPException
|
||||
import json, traceback, re
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from difflib import SequenceMatcher
|
||||
from typing import List
|
||||
from typing import List, Literal, Optional
|
||||
|
||||
from fastapi import HTTPException
|
||||
from typing_extensions import overload
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.caching import DualCache
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.llms.prompt_templates.factory import prompt_injection_detection_default_pt
|
||||
from litellm.proxy._types import LiteLLMPromptInjectionParams, UserAPIKeyAuth
|
||||
from litellm.utils import get_formatted_prompt
|
||||
|
||||
|
||||
class _OPTIONAL_PromptInjectionDetection(CustomLogger):
|
||||
|
@ -201,7 +205,7 @@ class _OPTIONAL_PromptInjectionDetection(CustomLogger):
|
|||
and self.prompt_injection_params is not None
|
||||
and self.prompt_injection_params.reject_as_response
|
||||
):
|
||||
return e.detail["error"]
|
||||
return e.detail.get("error")
|
||||
raise e
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.error(
|
||||
|
@ -211,18 +215,24 @@ class _OPTIONAL_PromptInjectionDetection(CustomLogger):
|
|||
)
|
||||
verbose_proxy_logger.debug(traceback.format_exc())
|
||||
|
||||
async def async_moderation_hook(
|
||||
async def async_moderation_hook( # type: ignore
|
||||
self,
|
||||
data: dict,
|
||||
user_api_key_dict: UserAPIKeyAuth,
|
||||
call_type: Literal["completion", "embeddings", "image_generation"],
|
||||
):
|
||||
call_type: Literal[
|
||||
"completion",
|
||||
"embeddings",
|
||||
"image_generation",
|
||||
"moderation",
|
||||
"audio_transcription",
|
||||
],
|
||||
) -> Optional[bool]:
|
||||
self.print_verbose(
|
||||
f"IN ASYNC MODERATION HOOK - self.prompt_injection_params = {self.prompt_injection_params}"
|
||||
)
|
||||
|
||||
if self.prompt_injection_params is None:
|
||||
return
|
||||
return None
|
||||
|
||||
formatted_prompt = get_formatted_prompt(data=data, call_type=call_type) # type: ignore
|
||||
is_prompt_attack = False
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue