From d4ffd11ad367b5710026dd85ad7625ed20f01c2f Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 31 Jul 2023 18:36:38 -0700 Subject: [PATCH] add better testing --- litellm/tests/test_bad_params.py | 15 ++- litellm/tests/test_client.py | 66 +++++------ litellm/tests/test_completion.py | 184 ++++++++++++++----------------- requirements.txt | 1 + 4 files changed, 129 insertions(+), 137 deletions(-) diff --git a/litellm/tests/test_bad_params.py b/litellm/tests/test_bad_params.py index b42c16d589..95f6695941 100644 --- a/litellm/tests/test_bad_params.py +++ b/litellm/tests/test_bad_params.py @@ -24,9 +24,12 @@ set_verbose(True) user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] model_val = "krrish is a model" -# # test on empty -# try: -# response = completion(model=model_val, messages=messages) -# except Exception as e: -# print(f"error occurred: {e}") -# pass + + +def test_completion_with_empty_model(): + # test on empty + try: + response = completion(model=model_val, messages=messages) + except Exception as e: + print(f"error occurred: {e}") + pass diff --git a/litellm/tests/test_client.py b/litellm/tests/test_client.py index 2f5906ad0e..9129b5853c 100644 --- a/litellm/tests/test_client.py +++ b/litellm/tests/test_client.py @@ -3,6 +3,8 @@ import sys, os import traceback +import pytest + sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path import litellm from litellm import embedding, completion @@ -19,37 +21,39 @@ def logger_fn(model_call_object: dict): user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] -# test on openai completion call -try: - response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) -except: - print(f"error occurred: {traceback.format_exc()}") - pass +def test_completion_openai(): + try: + response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) + # Add any assertions here to check the response + except Exception as e: + pytest.fail(f"Error occurred: {e}") -# test on non-openai completion call -try: - response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) -except: - print(f"error occurred: {traceback.format_exc()}") - pass +def test_completion_non_openai(): + try: + response = completion(model="claude-instant-1", messages=messages, logger_fn=logger_fn) + # Add any assertions here to check the response + except Exception as e: + pytest.fail(f"Error occurred: {e}") -# test on openai embedding call -try: - response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") -except: - traceback.print_exc() +def test_embedding_openai(): + try: + response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) + # Add any assertions here to check the response + print(f"response: {str(response)[:50]}") + except Exception as e: + pytest.fail(f"Error occurred: {e}") -# test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model -try: - response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") -except: - traceback.print_exc() - -# test on good azure openai embedding call -try: - response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) - print(f"response: {str(response)[:50]}") -except: - traceback.print_exc() +def test_bad_azure_embedding(): + try: + response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) + # Add any assertions here to check the response + print(f"response: {str(response)[:50]}") + except Exception as e: + pass +def test_good_azure_embedding(): + try: + response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) + # Add any assertions here to check the response + print(f"response: {str(response)[:50]}") + except Exception as e: + pytest.fail(f"Error occurred: {e}") diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 88574ff2ee..adb55a45e3 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -1,6 +1,7 @@ import sys, os import traceback sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +import pytest import litellm from litellm import embedding, completion @@ -9,112 +10,95 @@ litellm.set_verbose = True user_message = "Hello, whats the weather in San Francisco??" messages = [{ "content": user_message,"role": "user"}] +def test_completion_openai(): + try: + response = completion(model="gpt-3.5-turbo", messages=messages) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") -################# Test 3 ################# -# test on Azure Openai Completion Call -try: - response = completion(model="chatgpt-test", messages=messages, azure=True) - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e +def test_completion_openai_with_optional_params(): + try: + response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai") + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") -################# Test 1 ################# -# test on openai completion call, with model and messages -try: - response = completion(model="gpt-3.5-turbo", messages=messages) - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e +def test_completion_openai_with_more_optional_params(): + try: + response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, frequency_penalty=-0.5, logit_bias={123: 5}, user="ishaan_dev@berri.ai") + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") -################# Test 1.1 ################# -# test on openai completion call, with model and messages, optional params -try: - response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai") - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e +def test_completion_openai_with_stream(): + try: + response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, stream=True, frequency_penalty=-0.5, logit_bias={27000: 5}, user="ishaan_dev@berri.ai") + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") -################# Test 1.2 ################# -# test on openai completion call, with model and messages, optional params -try: - response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, frequency_penalty=-0.5, logit_bias={123:5}, user="ishaan_dev@berri.ai") - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e +def test_completion_openai_with_functions(): + function1 = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + ] + try: + response = completion(model="gpt-3.5-turbo", messages=messages, functions=function1) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") +def test_completion_azure(): + try: + response = completion(model="chatgpt-test", messages=messages, azure=True) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") +def test_completion_claude(): + try: + response = completion(model="claude-instant-1", messages=messages) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") -################# Test 1.3 ################# -# Test with Stream = True -try: - response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, n=2, max_tokens=150, presence_penalty=0.5, stream=True, frequency_penalty=-0.5, logit_bias={27000:5}, user="ishaan_dev@berri.ai") - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e +def test_completion_cohere(): + try: + response = completion(model="command-nightly", messages=messages, max_tokens=500) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") -################# Test 2 ################# -# test on openai completion call, with functions -function1 = [ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA" - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"] - } - }, - "required": ["location"] - } - } - ] -user_message = "Hello, whats the weather in San Francisco??" -messages = [{ "content": user_message,"role": "user"}] -try: - response = completion(model="gpt-3.5-turbo", messages=messages, functions=function1) - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e - - - - -################# Test 4 ################# -# test on Claude Completion Call -try: - response = completion(model="claude-instant-1", messages=messages) - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e - -################# Test 5 ################# -# test on Cohere Completion Call -try: - response = completion(model="command-nightly", messages=messages, max_tokens=500) - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e - -################# Test 6 ################# -# test on Replicate llama2 Completion Call -try: +def test_completion_replicate_llama(): model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" - response = completion(model=model_name, messages=messages, max_tokens=500) - print(response) -except Exception as e: - print(f"error occurred: {traceback.format_exc()}") - raise e + try: + response = completion(model=model_name, messages=messages, max_tokens=500) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index afcf55cba9..37bc975e49 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,5 +4,6 @@ func_timeout anthropic replicate pytest +pytest python-dotenv openai[datalib]