mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
refactor: add black formatting
This commit is contained in:
parent
b87d630b0a
commit
4905929de3
156 changed files with 19723 additions and 10869 deletions
|
@ -1,22 +1,25 @@
|
|||
#### What this tests ####
|
||||
# This tests setting rules before / after making llm api calls
|
||||
# This tests setting rules before / after making llm api calls
|
||||
import sys, os, time
|
||||
import traceback, asyncio
|
||||
import pytest
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import litellm
|
||||
from litellm import completion, acompletion
|
||||
|
||||
def my_pre_call_rule(input: str):
|
||||
|
||||
def my_pre_call_rule(input: str):
|
||||
print(f"input: {input}")
|
||||
print(f"INSIDE MY PRE CALL RULE, len(input) - {len(input)}")
|
||||
if len(input) > 10:
|
||||
if len(input) > 10:
|
||||
return False
|
||||
return True
|
||||
|
||||
def my_post_call_rule(input: str):
|
||||
|
||||
def my_post_call_rule(input: str):
|
||||
input = input.lower()
|
||||
print(f"input: {input}")
|
||||
print(f"INSIDE MY POST CALL RULE, len(input) - {len(input)}")
|
||||
|
@ -24,16 +27,20 @@ def my_post_call_rule(input: str):
|
|||
return False
|
||||
return True
|
||||
|
||||
## Test 1: Pre-call rule
|
||||
|
||||
## Test 1: Pre-call rule
|
||||
def test_pre_call_rule():
|
||||
try:
|
||||
try:
|
||||
litellm.pre_call_rules = [my_pre_call_rule]
|
||||
### completion
|
||||
response = completion(model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "say something inappropriate"}])
|
||||
### completion
|
||||
response = completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "say something inappropriate"}],
|
||||
)
|
||||
pytest.fail(f"Completion call should have been failed. ")
|
||||
except:
|
||||
except:
|
||||
pass
|
||||
|
||||
### async completion
|
||||
async def test_async_response():
|
||||
user_message = "Hello, how are you?"
|
||||
|
@ -43,22 +50,24 @@ def test_pre_call_rule():
|
|||
pytest.fail(f"acompletion call should have been failed. ")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
asyncio.run(test_async_response())
|
||||
litellm.pre_call_rules = []
|
||||
|
||||
# test_pre_call_rule()
|
||||
## Test 2: Post-call rule
|
||||
|
||||
# test_pre_call_rule()
|
||||
## Test 2: Post-call rule
|
||||
# commenting out of ci/cd since llm's have variable output which was causing our pipeline to fail erratically.
|
||||
# def test_post_call_rule():
|
||||
# try:
|
||||
# try:
|
||||
# litellm.pre_call_rules = []
|
||||
# litellm.post_call_rules = [my_post_call_rule]
|
||||
# ### completion
|
||||
# response = completion(model="gpt-3.5-turbo",
|
||||
# ### completion
|
||||
# response = completion(model="gpt-3.5-turbo",
|
||||
# messages=[{"role": "user", "content": "say sorry"}],
|
||||
# fallbacks=["deepinfra/Gryphe/MythoMax-L2-13b"])
|
||||
# pytest.fail(f"Completion call should have been failed. ")
|
||||
# except:
|
||||
# except:
|
||||
# pass
|
||||
# print(f"MAKING ACOMPLETION CALL")
|
||||
# # litellm.set_verbose = True
|
||||
|
@ -74,4 +83,4 @@ def test_pre_call_rule():
|
|||
# litellm.pre_call_rules = []
|
||||
# litellm.post_call_rules = []
|
||||
|
||||
# test_post_call_rule()
|
||||
# test_post_call_rule()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue