From 311b4f9d2cacca221a4845256f07eba643be24aa Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 20 Nov 2023 19:14:00 -0800 Subject: [PATCH] docs(rules.md): adding rules to docs --- docs/my-website/docs/rules.md | 89 +++++++++++++++++++++++++++++++++++ docs/my-website/sidebars.js | 3 +- 2 files changed, 91 insertions(+), 1 deletion(-) create mode 100644 docs/my-website/docs/rules.md diff --git a/docs/my-website/docs/rules.md b/docs/my-website/docs/rules.md new file mode 100644 index 000000000..97da9096d --- /dev/null +++ b/docs/my-website/docs/rules.md @@ -0,0 +1,89 @@ +# Rules + +Use this to fail a request based on the input or output of an llm api call. + + +```python +import litellm +import os + +# set env vars +os.environ["OPENAI_API_KEY"] = "your-api-key" +os.environ["OPENROUTER_API_KEY"] = "your-api-key" + +def my_custom_rule(input): # receives the model response + if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer + return False + return True + +litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call + +response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user", +"content": "Hey, how's it going?"}], fallbacks=["openrouter/gryphe/mythomax-l2-13b"]) +``` + +## Available Endpoints + +* `litellm.pre_call_rules = []` - A list of functions to iterate over before making the api call. Each function is expected to return either True (allow call) or False (fail call). + +* `litellm.post_call_rules = []` - List of functions to iterate over before making the api call. Each function is expected to return either True (allow call) or False (fail call). + + +## Expected format of rule + +```python +def my_custom_rule(input: str) -> bool: # receives the model response + if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer + return False + return True +``` + +#### Inputs +* `input`: *str*: The user input or llm response. + +#### Outputs +* `bool`: Return True (allow call) or False (fail call) + + +## Example Rules + +### Example 1: Fail if user input is too long + +```python +import litellm +import os + +# set env vars +os.environ["OPENAI_API_KEY"] = "your-api-key" + +def my_custom_rule(input): # receives the model response + if len(input) > 10: # fail call if too long + return False + return True + +litellm.pre_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call + +response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hey, how's it going?"}]) +``` + +### Example 2: Fallback to uncensored model if llm refuses to answer + + +```python +import litellm +import os + +# set env vars +os.environ["OPENAI_API_KEY"] = "your-api-key" +os.environ["OPENROUTER_API_KEY"] = "your-api-key" + +def my_custom_rule(input): # receives the model response + if "i don't think i can answer" in input: # trigger fallback if the model refuses to answer + return False + return True + +litellm.post_call_rules = [my_custom_rule] # have these be functions that can be called to fail a call + +response = litellm.completion(model="gpt-3.5-turbo", messages=[{"role": "user", +"content": "Hey, how's it going?"}], fallbacks=["openrouter/gryphe/mythomax-l2-13b"]) +``` \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 1a523a907..c471c0d6d 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -85,8 +85,8 @@ const sidebars = { ] }, "simple_proxy", - "budget_manager", "routing", + "rules", "set_keys", "completion/token_usage", { @@ -157,6 +157,7 @@ const sidebars = { label: 'Extras', items: [ 'extras/contributing', + "budget_manager", "proxy_server", { type: "category",