forked from phoenix/litellm-mirror
feat - add debugging for moderation response
This commit is contained in:
parent
1fe035c6dd
commit
6aad4e38d3
1 changed files with 3 additions and 1 deletions
|
@ -25,6 +25,7 @@ from litellm.utils import (
|
||||||
)
|
)
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import aiohttp, asyncio
|
import aiohttp, asyncio
|
||||||
|
from litellm._logging import verbose_proxy_logger
|
||||||
|
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
|
|
||||||
|
@ -44,7 +45,6 @@ class _ENTERPRISE_OpenAI_Moderation(CustomLogger):
|
||||||
user_api_key_dict: UserAPIKeyAuth,
|
user_api_key_dict: UserAPIKeyAuth,
|
||||||
call_type: Literal["completion", "embeddings", "image_generation"],
|
call_type: Literal["completion", "embeddings", "image_generation"],
|
||||||
):
|
):
|
||||||
_messages = data.get("messages", None)
|
|
||||||
if "messages" in data and isinstance(data["messages"], list):
|
if "messages" in data and isinstance(data["messages"], list):
|
||||||
text = ""
|
text = ""
|
||||||
for m in data["messages"]: # assume messages is a list
|
for m in data["messages"]: # assume messages is a list
|
||||||
|
@ -59,6 +59,8 @@ class _ENTERPRISE_OpenAI_Moderation(CustomLogger):
|
||||||
moderation_response = await llm_router.amoderation(
|
moderation_response = await llm_router.amoderation(
|
||||||
model=self.model_name, input=text
|
model=self.model_name, input=text
|
||||||
)
|
)
|
||||||
|
|
||||||
|
verbose_proxy_logger.debug("Moderation response: %s", moderation_response)
|
||||||
if moderation_response.results[0].flagged == True:
|
if moderation_response.results[0].flagged == True:
|
||||||
raise HTTPException(
|
raise HTTPException(
|
||||||
status_code=403, detail={"error": "Violated content safety policy"}
|
status_code=403, detail={"error": "Violated content safety policy"}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue