forked from phoenix/litellm-mirror
* ci(config.yml): add a 'check_code_quality' step Addresses https://github.com/BerriAI/litellm/issues/5991 * ci(config.yml): check why circle ci doesn't pick up this test * ci(config.yml): fix to run 'check_code_quality' tests * fix(__init__.py): fix unprotected import * fix(__init__.py): don't remove unused imports * build(ruff.toml): update ruff.toml to ignore unused imports * fix: fix: ruff + pyright - fix linting + type-checking errors * fix: fix linting errors * fix(lago.py): fix module init error * fix: fix linting errors * ci(config.yml): cd into correct dir for checks * fix(proxy_server.py): fix linting error * fix(utils.py): fix bare except causes ruff linting errors * fix: ruff - fix remaining linting errors * fix(clickhouse.py): use standard logging object * fix(__init__.py): fix unprotected import * fix: ruff - fix linting errors * fix: fix linting errors * ci(config.yml): cleanup code qa step (formatting handled in local_testing) * fix(_health_endpoints.py): fix ruff linting errors * ci(config.yml): just use ruff in check_code_quality pipeline for now * build(custom_guardrail.py): include missing file * style(embedding_handler.py): fix ruff check
156 lines
4.7 KiB
Python
156 lines
4.7 KiB
Python
#### What this tests ####
|
|
# This tests chaos monkeys - if random parts of the system are broken / things aren't sent correctly - what happens.
|
|
# Expect to add more edge cases to this over time.
|
|
|
|
import os
|
|
import sys
|
|
import traceback
|
|
|
|
import pytest
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
import litellm
|
|
from litellm import completion, embedding
|
|
from litellm.utils import Message
|
|
|
|
# litellm.set_verbose = True
|
|
user_message = "Hello, how are you?"
|
|
messages = [{"content": user_message, "role": "user"}]
|
|
model_val = None
|
|
|
|
|
|
def test_completion_with_no_model():
|
|
# test on empty
|
|
with pytest.raises(ValueError):
|
|
response = completion(messages=messages)
|
|
|
|
|
|
def test_completion_with_empty_model():
|
|
# test on empty
|
|
try:
|
|
response = completion(model=model_val, messages=messages)
|
|
except Exception as e:
|
|
print(f"error occurred: {e}")
|
|
pass
|
|
|
|
|
|
# def test_completion_catch_nlp_exception():
|
|
# TEMP commented out NLP cloud API is unstable
|
|
# try:
|
|
# response = completion(model="dolphin", messages=messages, functions=[
|
|
# {
|
|
# "name": "get_current_weather",
|
|
# "description": "Get the current weather in a given location",
|
|
# "parameters": {
|
|
# "type": "object",
|
|
# "properties": {
|
|
# "location": {
|
|
# "type": "string",
|
|
# "description": "The city and state, e.g. San Francisco, CA"
|
|
# },
|
|
# "unit": {
|
|
# "type": "string",
|
|
# "enum": ["celsius", "fahrenheit"]
|
|
# }
|
|
# },
|
|
# "required": ["location"]
|
|
# }
|
|
# }
|
|
# ])
|
|
|
|
# except Exception as e:
|
|
# if "Function calling is not supported by nlp_cloud" in str(e):
|
|
# pass
|
|
# else:
|
|
# pytest.fail(f'An error occurred {e}')
|
|
|
|
# test_completion_catch_nlp_exception()
|
|
|
|
|
|
def test_completion_invalid_param_cohere():
|
|
try:
|
|
litellm.set_verbose = True
|
|
response = completion(model="command-nightly", messages=messages, seed=12)
|
|
pytest.fail(f"This should have failed cohere does not support `seed` parameter")
|
|
except Exception as e:
|
|
assert isinstance(e, litellm.UnsupportedParamsError)
|
|
print("got an exception=", str(e))
|
|
if " cohere does not support parameters: {'seed': 12}" in str(e):
|
|
pass
|
|
else:
|
|
pytest.fail(f"An error occurred {e}")
|
|
|
|
|
|
def test_completion_function_call_cohere():
|
|
try:
|
|
response = completion(
|
|
model="command-nightly", messages=messages, functions=["TEST-FUNCTION"]
|
|
)
|
|
pytest.fail(f"An error occurred {e}")
|
|
except Exception as e:
|
|
print(e)
|
|
pass
|
|
|
|
|
|
# test_completion_function_call_cohere()
|
|
|
|
|
|
def test_completion_function_call_openai():
|
|
try:
|
|
messages = [{"role": "user", "content": "What is the weather like in Boston?"}]
|
|
response = completion(
|
|
model="gpt-3.5-turbo",
|
|
messages=messages,
|
|
functions=[
|
|
{
|
|
"name": "get_current_weather",
|
|
"description": "Get the current weather in a given location",
|
|
"parameters": {
|
|
"type": "object",
|
|
"properties": {
|
|
"location": {
|
|
"type": "string",
|
|
"description": "The city and state, e.g. San Francisco, CA",
|
|
},
|
|
"unit": {
|
|
"type": "string",
|
|
"enum": ["celsius", "fahrenheit"],
|
|
},
|
|
},
|
|
"required": ["location"],
|
|
},
|
|
}
|
|
],
|
|
)
|
|
print(f"response: {response}")
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
# test_completion_function_call_openai()
|
|
|
|
|
|
def test_completion_with_no_provider():
|
|
# test on empty
|
|
try:
|
|
model = "cerebras/btlm-3b-8k-base"
|
|
response = completion(model=model, messages=messages)
|
|
except Exception as e:
|
|
print(f"error occurred: {e}")
|
|
pass
|
|
|
|
|
|
# test_completion_with_no_provider()
|
|
# # bad key
|
|
# temp_key = os.environ.get("OPENAI_API_KEY")
|
|
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
|
# # test on openai completion call
|
|
# try:
|
|
# response = completion(model="gpt-3.5-turbo", messages=messages)
|
|
# print(f"response: {response}")
|
|
# except Exception:
|
|
# print(f"error occurred: {traceback.format_exc()}")
|
|
# pass
|
|
# os.environ["OPENAI_API_KEY"] = str(temp_key) # this passes linting#5
|