From 3e162b3b8cd3340d5bf303ca9faa105d690dc9da Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 27 Nov 2024 15:43:15 -0800 Subject: [PATCH] docs on /moderations --- docs/my-website/docs/moderation.md | 25 ++++++++++++++++++++++--- docs/my-website/sidebars.js | 3 +-- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/my-website/docs/moderation.md b/docs/my-website/docs/moderation.md index bd756fe8d..1bde75237 100644 --- a/docs/my-website/docs/moderation.md +++ b/docs/my-website/docs/moderation.md @@ -18,9 +18,19 @@ response = moderation( ``` - + -For `/moderations` endpoint, there is no need +For `/moderations` endpoint, there is **no need to specify `model` in the request or on the litellm config.yaml** + +Start litellm proxy server + +``` +litellm +``` + + + + ```python from openai import OpenAI @@ -31,12 +41,13 @@ client = OpenAI(api_key="", base_url="http://0.0.0.0:4000") response = client.moderations.create( input="hello from litellm", - model="text-moderation-stable" + model="text-moderation-stable" # optional, defaults to `omni-moderation-latest` ) print(response) ``` + ```shell @@ -48,6 +59,9 @@ curl --location 'http://0.0.0.0:4000/moderations' \ + + + ## Input Params LiteLLM accepts and translates the [OpenAI Moderation params](https://platform.openai.com/docs/api-reference/moderations) across all supported providers. @@ -111,3 +125,8 @@ Here's the exact json output and type you can expect from all moderation calls: ``` +## **Supported Providers** + +| Provider | +|-------------| +| OpenAI | diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 79eb326bd..49fe33343 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -246,7 +246,6 @@ const sidebars = { "completion/usage", ], }, - "text_completion", "embedding/supported_embedding", "image_generation", { @@ -262,7 +261,7 @@ const sidebars = { "batches", "realtime", "fine_tuning", - "moderation"," + "moderation", { type: "link", label: "Use LiteLLM Proxy with Vertex, Bedrock SDK",