forked from phoenix/litellm-mirror
fix - /moderation don't require a model
This commit is contained in:
parent
cc11cedf02
commit
eb8a9b2654
3 changed files with 17 additions and 15 deletions
|
@ -600,7 +600,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
api_key: Optional[str] = None,
|
api_key: Optional[str] = None,
|
||||||
api_base: Optional[str] = None,
|
api_base: Optional[str] = None,
|
||||||
timeout: Union[float, httpx.Timeout] = httpx.Timeout(None),
|
timeout: Union[float, httpx.Timeout] = httpx.Timeout(None),
|
||||||
max_retries: Optional[int] = None,
|
max_retries: Optional[int] = 2,
|
||||||
organization: Optional[str] = None,
|
organization: Optional[str] = None,
|
||||||
client: Optional[Union[OpenAI, AsyncOpenAI]] = None,
|
client: Optional[Union[OpenAI, AsyncOpenAI]] = None,
|
||||||
):
|
):
|
||||||
|
|
|
@ -3852,14 +3852,20 @@ def moderation(
|
||||||
|
|
||||||
|
|
||||||
@client
|
@client
|
||||||
async def amoderation(input: str, model: str, api_key: Optional[str] = None, **kwargs):
|
async def amoderation(
|
||||||
|
input: str, model: Optional[str] = None, api_key: Optional[str] = None, **kwargs
|
||||||
|
):
|
||||||
# only supports open ai for now
|
# only supports open ai for now
|
||||||
api_key = (
|
api_key = (
|
||||||
api_key or litellm.api_key or litellm.openai_key or get_secret("OPENAI_API_KEY")
|
api_key or litellm.api_key or litellm.openai_key or get_secret("OPENAI_API_KEY")
|
||||||
)
|
)
|
||||||
openai_client = kwargs.get("client", None)
|
openai_client = kwargs.get("client", None)
|
||||||
if openai_client is None:
|
if openai_client is None:
|
||||||
openai_client = openai.AsyncOpenAI(
|
|
||||||
|
# call helper to get OpenAI client
|
||||||
|
# _get_openai_client maintains in-memory caching logic for OpenAI clients
|
||||||
|
openai_client = openai_chat_completions._get_openai_client(
|
||||||
|
is_async=True,
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
)
|
)
|
||||||
response = await openai_client.moderations.create(input=input, model=model)
|
response = await openai_client.moderations.create(input=input, model=model)
|
||||||
|
|
|
@ -4947,7 +4947,7 @@ async def moderations(
|
||||||
data["model"] = (
|
data["model"] = (
|
||||||
general_settings.get("moderation_model", None) # server default
|
general_settings.get("moderation_model", None) # server default
|
||||||
or user_model # model name passed via cli args
|
or user_model # model name passed via cli args
|
||||||
or data["model"] # default passed in http request
|
or data.get("model") # default passed in http request
|
||||||
)
|
)
|
||||||
if user_model:
|
if user_model:
|
||||||
data["model"] = user_model
|
data["model"] = user_model
|
||||||
|
@ -4966,37 +4966,33 @@ async def moderations(
|
||||||
if "api_key" in data:
|
if "api_key" in data:
|
||||||
response = await litellm.amoderation(**data)
|
response = await litellm.amoderation(**data)
|
||||||
elif (
|
elif (
|
||||||
llm_router is not None and data["model"] in router_model_names
|
llm_router is not None and data.get("model") in router_model_names
|
||||||
): # model in router model list
|
): # model in router model list
|
||||||
response = await llm_router.amoderation(**data)
|
response = await llm_router.amoderation(**data)
|
||||||
elif (
|
elif (
|
||||||
llm_router is not None and data["model"] in llm_router.deployment_names
|
llm_router is not None and data.get("model") in llm_router.deployment_names
|
||||||
): # model in router deployments, calling a specific deployment on the router
|
): # model in router deployments, calling a specific deployment on the router
|
||||||
response = await llm_router.amoderation(**data, specific_deployment=True)
|
response = await llm_router.amoderation(**data, specific_deployment=True)
|
||||||
elif (
|
elif (
|
||||||
llm_router is not None
|
llm_router is not None
|
||||||
and llm_router.model_group_alias is not None
|
and llm_router.model_group_alias is not None
|
||||||
and data["model"] in llm_router.model_group_alias
|
and data.get("model") in llm_router.model_group_alias
|
||||||
): # model set in model_group_alias
|
): # model set in model_group_alias
|
||||||
response = await llm_router.amoderation(
|
response = await llm_router.amoderation(
|
||||||
**data
|
**data
|
||||||
) # ensure this goes the llm_router, router will do the correct alias mapping
|
) # ensure this goes the llm_router, router will do the correct alias mapping
|
||||||
elif (
|
elif (
|
||||||
llm_router is not None
|
llm_router is not None
|
||||||
and data["model"] not in router_model_names
|
and data.get("model") not in router_model_names
|
||||||
and llm_router.default_deployment is not None
|
and llm_router.default_deployment is not None
|
||||||
): # model in router deployments, calling a specific deployment on the router
|
): # model in router deployments, calling a specific deployment on the router
|
||||||
response = await llm_router.amoderation(**data)
|
response = await llm_router.amoderation(**data)
|
||||||
elif user_model is not None: # `litellm --model <your-model-name>`
|
elif user_model is not None: # `litellm --model <your-model-name>`
|
||||||
response = await litellm.amoderation(**data)
|
response = await litellm.amoderation(**data)
|
||||||
else:
|
else:
|
||||||
raise HTTPException(
|
# /moderations does not need a "model" passed
|
||||||
status_code=status.HTTP_400_BAD_REQUEST,
|
# see https://platform.openai.com/docs/api-reference/moderations
|
||||||
detail={
|
response = await litellm.amoderation(**data)
|
||||||
"error": "moderations: Invalid model name passed in model="
|
|
||||||
+ data.get("model", "")
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
### ALERTING ###
|
### ALERTING ###
|
||||||
data["litellm_status"] = "success" # used for alerting
|
data["litellm_status"] = "success" # used for alerting
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue