forked from phoenix/litellm-mirror
(docs + fix) Add docs on Moderations endpoint, Text Completion (#6947)
* fix _pass_through_moderation_endpoint_factory * fix route_llm_request * doc moderations api * docs on /moderations * add e2e tests for moderations api * docs moderations api * test_pass_through_moderation_endpoint_factory * docs text completion
This commit is contained in:
parent
eba700a491
commit
4ebb7c8a7f
7 changed files with 390 additions and 6 deletions
71
tests/otel_tests/test_moderations.py
Normal file
71
tests/otel_tests/test_moderations.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
import pytest
|
||||
import asyncio
|
||||
import aiohttp, openai
|
||||
from openai import OpenAI, AsyncOpenAI
|
||||
from typing import Optional, List, Union
|
||||
import uuid
|
||||
|
||||
|
||||
async def make_moderations_curl_request(
|
||||
session,
|
||||
key,
|
||||
request_data: dict,
|
||||
):
|
||||
url = "http://0.0.0.0:4000/moderations"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {key}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
async with session.post(url, headers=headers, json=request_data) as response:
|
||||
status = response.status
|
||||
response_text = await response.text()
|
||||
|
||||
if status != 200:
|
||||
raise Exception(response_text)
|
||||
|
||||
return await response.json()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_moderations_on_proxy_no_model():
|
||||
"""
|
||||
Test moderations endpoint on proxy when no `model` is specified in the request
|
||||
"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
test_text = "I want to harm someone" # Test text that should trigger moderation
|
||||
request_data = {
|
||||
"input": test_text,
|
||||
}
|
||||
try:
|
||||
response = await make_moderations_curl_request(
|
||||
session,
|
||||
"sk-1234",
|
||||
request_data,
|
||||
)
|
||||
print("response=", response)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pytest.fail("Moderations request failed")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_moderations_on_proxy_with_model():
|
||||
"""
|
||||
Test moderations endpoint on proxy when `model` is specified in the request
|
||||
"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
test_text = "I want to harm someone" # Test text that should trigger moderation
|
||||
request_data = {
|
||||
"input": test_text,
|
||||
"model": "text-moderation-stable",
|
||||
}
|
||||
try:
|
||||
response = await make_moderations_curl_request(
|
||||
session,
|
||||
"sk-1234",
|
||||
request_data,
|
||||
)
|
||||
print("response=", response)
|
||||
except Exception as e:
|
||||
pytest.fail("Moderations request failed")
|
|
@ -1040,8 +1040,11 @@ def test_pattern_match_deployment_set_model_name(
|
|||
async def test_pass_through_moderation_endpoint_factory(model_list):
|
||||
router = Router(model_list=model_list)
|
||||
response = await router._pass_through_moderation_endpoint_factory(
|
||||
original_function=litellm.amoderation, input="this is valid good text"
|
||||
original_function=litellm.amoderation,
|
||||
input="this is valid good text",
|
||||
model=None,
|
||||
)
|
||||
assert response is not None
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue