remove useless tests

This commit is contained in:
ishaan-jaff 2023-08-02 21:08:01 -07:00
parent 5ac2ee2afc
commit 05513c516f
2 changed files with 47 additions and 45 deletions

View file

@ -1,25 +1,27 @@
# #### What this tests ####
# # This tests the ability to set api key's via the params instead of as environment variables
#### What this tests ####
# This tests the ability to set api key's via the params instead of as environment variables
# import sys, os
# import traceback
# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
# import litellm
# from litellm import embedding, completion
import sys, os
import traceback
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
# litellm.set_verbose = False
litellm.set_verbose = False
# def logger_fn(model_call_object: dict):
# print(f"model call details: {model_call_object}")
def logger_fn(model_call_object: dict):
print(f"model call details: {model_call_object}")
# user_message = "Hello, how are you?"
# messages = [{ "content": user_message,"role": "user"}]
user_message = "Hello, how are you?"
messages = [{ "content": user_message,"role": "user"}]
# temp_key = os.environ.get("OPENAI_API_KEY")
# os.environ["OPENAI_API_KEY"] = "bad-key"
# # test on openai completion call
# try:
# response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn, api_key=temp_key)
# except:
# print(f"error occurred: {traceback.format_exc()}")
# pass
temp_key = os.environ.get("OPENAI_API_KEY")
os.environ["OPENAI_API_KEY"] = "bad-key"
# test on openai completion call
try:
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn, api_key=temp_key)
except:
print(f"error occurred: {traceback.format_exc()}")
pass
os.environ["OPENAI_API_KEY"] = temp_key

View file

@ -18,33 +18,33 @@ import pytest
# Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered
models = ["gpt-3.5-turbo", "chatgpt-test", "claude-instant-1", "command-nightly"]
# models = ["gpt-3.5-turbo", "chatgpt-test", "claude-instant-1", "command-nightly"]
# Test 1: Context Window Errors
@pytest.mark.parametrize("model", models)
def test_context_window(model):
sample_text = "how does a court case get to the Supreme Court?" * 100000
messages = [{"content": sample_text, "role": "user"}]
try:
azure = model == "chatgpt-test"
print(f"model: {model}")
response = completion(model=model, messages=messages, azure=azure)
except InvalidRequestError:
print("InvalidRequestError")
return
except OpenAIError:
print("OpenAIError")
return
except Exception as e:
print("Uncaught Error in test_context_window")
print(f"Error Type: {type(e).__name__}")
print(f"Uncaught Exception - {e}")
pytest.fail(f"Error occurred: {e}")
return
# # Test 1: Context Window Errors
# @pytest.mark.parametrize("model", models)
# def test_context_window(model):
# sample_text = "how does a court case get to the Supreme Court?" * 100000
# messages = [{"content": sample_text, "role": "user"}]
# try:
# azure = model == "chatgpt-test"
# print(f"model: {model}")
# response = completion(model=model, messages=messages, azure=azure)
# except InvalidRequestError:
# print("InvalidRequestError")
# return
# except OpenAIError:
# print("OpenAIError")
# return
# except Exception as e:
# print("Uncaught Error in test_context_window")
# # print(f"Error Type: {type(e).__name__}")
# print(f"Uncaught Exception - {e}")
# pytest.fail(f"Error occurred: {e}")
# return
# Test 2: InvalidAuth Errors
def logger_fn(model_call_object: dict):
print(f"model call details: {model_call_object}")
# # Test 2: InvalidAuth Errors
# def logger_fn(model_call_object: dict):
# print(f"model call details: {model_call_object}")
# @pytest.mark.parametrize("model", models)
# def invalid_auth(model): # set the model key to an invalid key, depending on the model