forked from phoenix/litellm-mirror
(docs) add example post call rules to proxy
This commit is contained in:
parent
fcc1e23a05
commit
2873f587fd
2 changed files with 12 additions and 0 deletions
8
litellm/proxy/post_call_rules.py
Normal file
8
litellm/proxy/post_call_rules.py
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
def post_response_rule(input): # receives the model response
|
||||||
|
print(f"post_response_rule:input={input}") # noqa
|
||||||
|
if len(input) < 200:
|
||||||
|
return {
|
||||||
|
"decision": False,
|
||||||
|
"message": "This violates LiteLLM Proxy Rules. Response too short",
|
||||||
|
}
|
||||||
|
return {"decision": True} # message not required since, request will pass
|
|
@ -14,6 +14,9 @@ model_list:
|
||||||
- model_name: BEDROCK_GROUP
|
- model_name: BEDROCK_GROUP
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: bedrock/cohere.command-text-v14
|
model: bedrock/cohere.command-text-v14
|
||||||
|
- model_name: sagemaker
|
||||||
|
litellm_params:
|
||||||
|
model: sagemaker/berri-benchmarking-Llama-2-70b-chat-hf-4
|
||||||
- model_name: openai-gpt-3.5
|
- model_name: openai-gpt-3.5
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: gpt-3.5-turbo
|
model: gpt-3.5-turbo
|
||||||
|
@ -42,6 +45,7 @@ model_list:
|
||||||
model_info:
|
model_info:
|
||||||
mode: embedding
|
mode: embedding
|
||||||
litellm_settings:
|
litellm_settings:
|
||||||
|
post_call_rules: post_call_rules.post_response_rule
|
||||||
fallbacks: [{"openai-gpt-3.5": ["azure-gpt-3.5"]}]
|
fallbacks: [{"openai-gpt-3.5": ["azure-gpt-3.5"]}]
|
||||||
# cache: True
|
# cache: True
|
||||||
# setting callback class
|
# setting callback class
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue