add better testing

This commit is contained in:
ishaan-jaff 2023-07-31 18:36:38 -07:00
parent 557ba4b139
commit d4ffd11ad3
4 changed files with 129 additions and 137 deletions

View file

@ -24,9 +24,12 @@ set_verbose(True)
user_message = "Hello, how are you?" user_message = "Hello, how are you?"
messages = [{ "content": user_message,"role": "user"}] messages = [{ "content": user_message,"role": "user"}]
model_val = "krrish is a model" model_val = "krrish is a model"
# # test on empty
# try:
# response = completion(model=model_val, messages=messages) def test_completion_with_empty_model():
# except Exception as e: # test on empty
# print(f"error occurred: {e}") try:
# pass response = completion(model=model_val, messages=messages)
except Exception as e:
print(f"error occurred: {e}")
pass

View file

@ -3,6 +3,8 @@
import sys, os import sys, os
import traceback import traceback
import pytest
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
import litellm import litellm
from litellm import embedding, completion from litellm import embedding, completion
@ -19,37 +21,39 @@ def logger_fn(model_call_object: dict):
user_message = "Hello, how are you?" user_message = "Hello, how are you?"
messages = [{ "content": user_message,"role": "user"}] messages = [{ "content": user_message,"role": "user"}]
# test on openai completion call def test_completion_openai():
try: try:
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn)
except: # Add any assertions here to check the response
print(f"error occurred: {traceback.format_exc()}") except Exception as e:
pass pytest.fail(f"Error occurred: {e}")
# test on non-openai completion call def test_completion_non_openai():
try: try:
response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn)
except: # Add any assertions here to check the response
print(f"error occurred: {traceback.format_exc()}") except Exception as e:
pass pytest.fail(f"Error occurred: {e}")
# test on openai embedding call def test_embedding_openai():
try: try:
response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)
# Add any assertions here to check the response
print(f"response: {str(response)[:50]}") print(f"response: {str(response)[:50]}")
except: except Exception as e:
traceback.print_exc() pytest.fail(f"Error occurred: {e}")
# test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model def test_bad_azure_embedding():
try: try:
response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn)
# Add any assertions here to check the response
print(f"response: {str(response)[:50]}") print(f"response: {str(response)[:50]}")
except: except Exception as e:
traceback.print_exc() pass
def test_good_azure_embedding():
# test on good azure openai embedding call try:
try:
response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn)
# Add any assertions here to check the response
print(f"response: {str(response)[:50]}") print(f"response: {str(response)[:50]}")
except: except Exception as e:
traceback.print_exc() pytest.fail(f"Error occurred: {e}")

View file

@ -1,6 +1,7 @@
import sys, os import sys, os
import traceback import traceback
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
import pytest
import litellm import litellm
from litellm import embedding, completion from litellm import embedding, completion
@ -9,57 +10,40 @@ litellm.set_verbose = True
user_message = "Hello, whats the weather in San Francisco??" user_message = "Hello, whats the weather in San Francisco??"
messages = [{ "content": user_message,"role": "user"}] messages = [{ "content": user_message,"role": "user"}]
def test_completion_openai():
################# Test 3 ################# try:
# test on Azure Openai Completion Call
try:
response = completion(model="chatgpt-test", messages=messages, azure=True)
print(response)
except Exception as e:
print(f"error occurred: {traceback.format_exc()}")
raise e
################# Test 1 #################
# test on openai completion call, with model and messages
try:
response = completion(model="gpt-3.5-turbo", messages=messages) response = completion(model="gpt-3.5-turbo", messages=messages)
# Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
################# Test 1.1 ################# def test_completion_openai_with_optional_params():
# test on openai completion call, with model and messages, optional params try:
try:
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai") response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai")
# Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
################# Test 1.2 ################# def test_completion_openai_with_more_optional_params():
# test on openai completion call, with model and messages, optional params try:
try: response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, frequency_penalty=-0.5, logit_bias={123: 5}, user="ishaan_dev@berri.ai")
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, frequency_penalty=-0.5, logit_bias={123:5}, user="ishaan_dev@berri.ai") # Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
def test_completion_openai_with_stream():
try:
################# Test 1.3 ################# response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, stream=True, frequency_penalty=-0.5, logit_bias={27000: 5}, user="ishaan_dev@berri.ai")
# Test with Stream = True # Add any assertions here to check the response
try:
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, stream=True, frequency_penalty=-0.5, logit_bias={27000:5}, user="ishaan_dev@berri.ai")
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
################# Test 2 ################# def test_completion_openai_with_functions():
# test on openai completion call, with functions function1 = [
function1 = [
{ {
"name": "get_current_weather", "name": "get_current_weather",
"description": "Get the current weather in a given location", "description": "Get the current weather in a given location",
@ -79,42 +63,42 @@ function1 = [
} }
} }
] ]
user_message = "Hello, whats the weather in San Francisco??" try:
messages = [{ "content": user_message,"role": "user"}]
try:
response = completion(model="gpt-3.5-turbo", messages=messages, functions=function1) response = completion(model="gpt-3.5-turbo", messages=messages, functions=function1)
# Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
def test_completion_azure():
try:
response = completion(model="chatgpt-test", messages=messages, azure=True)
# Add any assertions here to check the response
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_completion_claude():
try:
################# Test 4 #################
# test on Claude Completion Call
try:
response = completion(model="claude-instant-1", messages=messages) response = completion(model="claude-instant-1", messages=messages)
# Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
################# Test 5 ################# def test_completion_cohere():
# test on Cohere Completion Call try:
try:
response = completion(model="command-nightly", messages=messages, max_tokens=500) response = completion(model="command-nightly", messages=messages, max_tokens=500)
# Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e
################# Test 6 ################# def test_completion_replicate_llama():
# test on Replicate llama2 Completion Call
try:
model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"
try:
response = completion(model=model_name, messages=messages, max_tokens=500) response = completion(model=model_name, messages=messages, max_tokens=500)
# Add any assertions here to check the response
print(response) print(response)
except Exception as e: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
raise e

View file

@ -4,5 +4,6 @@ func_timeout
anthropic anthropic
replicate replicate
pytest pytest
pytest
python-dotenv python-dotenv
openai[datalib] openai[datalib]