forked from phoenix/litellm-mirror
docs on /moderations
This commit is contained in:
parent
a9b564782c
commit
3e162b3b8c
2 changed files with 23 additions and 5 deletions
|
@ -18,9 +18,19 @@ response = moderation(
|
||||||
```
|
```
|
||||||
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="openai" label="LiteLLM Proxy Server">
|
<TabItem value="proxy" label="LiteLLM Proxy Server">
|
||||||
|
|
||||||
For `/moderations` endpoint, there is no need
|
For `/moderations` endpoint, there is **no need to specify `model` in the request or on the litellm config.yaml**
|
||||||
|
|
||||||
|
Start litellm proxy server
|
||||||
|
|
||||||
|
```
|
||||||
|
litellm
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<TabItem value="python" label="OpenAI Python SDK">
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
|
@ -31,12 +41,13 @@ client = OpenAI(api_key="<proxy-api-key>", base_url="http://0.0.0.0:4000")
|
||||||
|
|
||||||
response = client.moderations.create(
|
response = client.moderations.create(
|
||||||
input="hello from litellm",
|
input="hello from litellm",
|
||||||
model="text-moderation-stable"
|
model="text-moderation-stable" # optional, defaults to `omni-moderation-latest`
|
||||||
)
|
)
|
||||||
|
|
||||||
print(response)
|
print(response)
|
||||||
```
|
```
|
||||||
</TabItem>
|
</TabItem>
|
||||||
|
|
||||||
<TabItem value="curl" label="Curl Request">
|
<TabItem value="curl" label="Curl Request">
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -48,6 +59,9 @@ curl --location 'http://0.0.0.0:4000/moderations' \
|
||||||
</TabItem>
|
</TabItem>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## Input Params
|
## Input Params
|
||||||
LiteLLM accepts and translates the [OpenAI Moderation params](https://platform.openai.com/docs/api-reference/moderations) across all supported providers.
|
LiteLLM accepts and translates the [OpenAI Moderation params](https://platform.openai.com/docs/api-reference/moderations) across all supported providers.
|
||||||
|
|
||||||
|
@ -111,3 +125,8 @@ Here's the exact json output and type you can expect from all moderation calls:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## **Supported Providers**
|
||||||
|
|
||||||
|
| Provider |
|
||||||
|
|-------------|
|
||||||
|
| OpenAI |
|
||||||
|
|
|
@ -246,7 +246,6 @@ const sidebars = {
|
||||||
"completion/usage",
|
"completion/usage",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
"text_completion",
|
|
||||||
"embedding/supported_embedding",
|
"embedding/supported_embedding",
|
||||||
"image_generation",
|
"image_generation",
|
||||||
{
|
{
|
||||||
|
@ -262,7 +261,7 @@ const sidebars = {
|
||||||
"batches",
|
"batches",
|
||||||
"realtime",
|
"realtime",
|
||||||
"fine_tuning",
|
"fine_tuning",
|
||||||
"moderation","
|
"moderation",
|
||||||
{
|
{
|
||||||
type: "link",
|
type: "link",
|
||||||
label: "Use LiteLLM Proxy with Vertex, Bedrock SDK",
|
label: "Use LiteLLM Proxy with Vertex, Bedrock SDK",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue