forked from phoenix/litellm-mirror
add better testing
This commit is contained in:
parent
557ba4b139
commit
d4ffd11ad3
4 changed files with 129 additions and 137 deletions
|
@ -24,9 +24,12 @@ set_verbose(True)
|
||||||
user_message = "Hello, how are you?"
|
user_message = "Hello, how are you?"
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
model_val = "krrish is a model"
|
model_val = "krrish is a model"
|
||||||
# # test on empty
|
|
||||||
# try:
|
|
||||||
# response = completion(model=model_val, messages=messages)
|
def test_completion_with_empty_model():
|
||||||
# except Exception as e:
|
# test on empty
|
||||||
# print(f"error occurred: {e}")
|
try:
|
||||||
# pass
|
response = completion(model=model_val, messages=messages)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"error occurred: {e}")
|
||||||
|
pass
|
||||||
|
|
|
@ -3,6 +3,8 @@
|
||||||
|
|
||||||
import sys, os
|
import sys, os
|
||||||
import traceback
|
import traceback
|
||||||
|
import pytest
|
||||||
|
|
||||||
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import embedding, completion
|
from litellm import embedding, completion
|
||||||
|
@ -19,37 +21,39 @@ def logger_fn(model_call_object: dict):
|
||||||
user_message = "Hello, how are you?"
|
user_message = "Hello, how are you?"
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
|
||||||
# test on openai completion call
|
def test_completion_openai():
|
||||||
try:
|
try:
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn)
|
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn)
|
||||||
except:
|
# Add any assertions here to check the response
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
except Exception as e:
|
||||||
pass
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# test on non-openai completion call
|
def test_completion_non_openai():
|
||||||
try:
|
try:
|
||||||
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
|
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
|
||||||
except:
|
# Add any assertions here to check the response
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
except Exception as e:
|
||||||
pass
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# test on openai embedding call
|
def test_embedding_openai():
|
||||||
try:
|
try:
|
||||||
response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)
|
response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)
|
||||||
print(f"response: {str(response)[:50]}")
|
# Add any assertions here to check the response
|
||||||
except:
|
print(f"response: {str(response)[:50]}")
|
||||||
traceback.print_exc()
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model
|
def test_bad_azure_embedding():
|
||||||
try:
|
try:
|
||||||
response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn)
|
response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn)
|
||||||
print(f"response: {str(response)[:50]}")
|
# Add any assertions here to check the response
|
||||||
except:
|
print(f"response: {str(response)[:50]}")
|
||||||
traceback.print_exc()
|
except Exception as e:
|
||||||
|
pass
|
||||||
# test on good azure openai embedding call
|
def test_good_azure_embedding():
|
||||||
try:
|
try:
|
||||||
response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn)
|
response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn)
|
||||||
print(f"response: {str(response)[:50]}")
|
# Add any assertions here to check the response
|
||||||
except:
|
print(f"response: {str(response)[:50]}")
|
||||||
traceback.print_exc()
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import sys, os
|
import sys, os
|
||||||
import traceback
|
import traceback
|
||||||
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||||
|
import pytest
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import embedding, completion
|
from litellm import embedding, completion
|
||||||
|
|
||||||
|
@ -9,112 +10,95 @@ litellm.set_verbose = True
|
||||||
user_message = "Hello, whats the weather in San Francisco??"
|
user_message = "Hello, whats the weather in San Francisco??"
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
|
||||||
|
def test_completion_openai():
|
||||||
|
try:
|
||||||
|
response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
################# Test 3 #################
|
def test_completion_openai_with_optional_params():
|
||||||
# test on Azure Openai Completion Call
|
try:
|
||||||
try:
|
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai")
|
||||||
response = completion(model="chatgpt-test", messages=messages, azure=True)
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
raise e
|
|
||||||
|
|
||||||
################# Test 1 #################
|
def test_completion_openai_with_more_optional_params():
|
||||||
# test on openai completion call, with model and messages
|
try:
|
||||||
try:
|
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, frequency_penalty=-0.5, logit_bias={123: 5}, user="ishaan_dev@berri.ai")
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages)
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
raise e
|
|
||||||
|
|
||||||
################# Test 1.1 #################
|
def test_completion_openai_with_stream():
|
||||||
# test on openai completion call, with model and messages, optional params
|
try:
|
||||||
try:
|
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, stream=True, frequency_penalty=-0.5, logit_bias={27000: 5}, user="ishaan_dev@berri.ai")
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai")
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
raise e
|
|
||||||
|
|
||||||
################# Test 1.2 #################
|
def test_completion_openai_with_functions():
|
||||||
# test on openai completion call, with model and messages, optional params
|
function1 = [
|
||||||
try:
|
{
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, frequency_penalty=-0.5, logit_bias={123:5}, user="ishaan_dev@berri.ai")
|
"name": "get_current_weather",
|
||||||
print(response)
|
"description": "Get the current weather in a given location",
|
||||||
except Exception as e:
|
"parameters": {
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
"type": "object",
|
||||||
raise e
|
"properties": {
|
||||||
|
"location": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The city and state, e.g. San Francisco, CA"
|
||||||
|
},
|
||||||
|
"unit": {
|
||||||
|
"type": "string",
|
||||||
|
"enum": ["celsius", "fahrenheit"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["location"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
try:
|
||||||
|
response = completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
def test_completion_azure():
|
||||||
|
try:
|
||||||
|
response = completion(model="chatgpt-test", messages=messages, azure=True)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
def test_completion_claude():
|
||||||
|
try:
|
||||||
|
response = completion(model="claude-instant-1", messages=messages)
|
||||||
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
################# Test 1.3 #################
|
def test_completion_cohere():
|
||||||
# Test with Stream = True
|
try:
|
||||||
try:
|
response = completion(model="command-nightly", messages=messages, max_tokens=500)
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, stream=True, frequency_penalty=-0.5, logit_bias={27000:5}, user="ishaan_dev@berri.ai")
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
raise e
|
|
||||||
|
|
||||||
################# Test 2 #################
|
def test_completion_replicate_llama():
|
||||||
# test on openai completion call, with functions
|
|
||||||
function1 = [
|
|
||||||
{
|
|
||||||
"name": "get_current_weather",
|
|
||||||
"description": "Get the current weather in a given location",
|
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"location": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The city and state, e.g. San Francisco, CA"
|
|
||||||
},
|
|
||||||
"unit": {
|
|
||||||
"type": "string",
|
|
||||||
"enum": ["celsius", "fahrenheit"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": ["location"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
user_message = "Hello, whats the weather in San Francisco??"
|
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
|
||||||
try:
|
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
|
|
||||||
print(response)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
################# Test 4 #################
|
|
||||||
# test on Claude Completion Call
|
|
||||||
try:
|
|
||||||
response = completion(model="claude-instant-1", messages=messages)
|
|
||||||
print(response)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
################# Test 5 #################
|
|
||||||
# test on Cohere Completion Call
|
|
||||||
try:
|
|
||||||
response = completion(model="command-nightly", messages=messages, max_tokens=500)
|
|
||||||
print(response)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
|
||||||
raise e
|
|
||||||
|
|
||||||
################# Test 6 #################
|
|
||||||
# test on Replicate llama2 Completion Call
|
|
||||||
try:
|
|
||||||
model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
|
||||||
response = completion(model=model_name, messages=messages, max_tokens=500)
|
try:
|
||||||
print(response)
|
response = completion(model=model_name, messages=messages, max_tokens=500)
|
||||||
except Exception as e:
|
# Add any assertions here to check the response
|
||||||
print(f"error occurred: {traceback.format_exc()}")
|
print(response)
|
||||||
raise e
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
|
@ -4,5 +4,6 @@ func_timeout
|
||||||
anthropic
|
anthropic
|
||||||
replicate
|
replicate
|
||||||
pytest
|
pytest
|
||||||
|
pytest
|
||||||
python-dotenv
|
python-dotenv
|
||||||
openai[datalib]
|
openai[datalib]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue