From 3839be8ed81b3624f1236aa43a85060da97f0e2c Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 12 Oct 2023 15:10:18 -0700 Subject: [PATCH] (tests) delete old testing files --- litellm/tests/test_berrispend_integration.py | 25 -------------- litellm/tests/test_custom_api_base.py | 33 ------------------ litellm/tests/test_secrets.py | 35 -------------------- litellm/tests/test_split_test.py | 26 --------------- 4 files changed, 119 deletions(-) delete mode 100644 litellm/tests/test_berrispend_integration.py delete mode 100644 litellm/tests/test_custom_api_base.py delete mode 100644 litellm/tests/test_secrets.py delete mode 100644 litellm/tests/test_split_test.py diff --git a/litellm/tests/test_berrispend_integration.py b/litellm/tests/test_berrispend_integration.py deleted file mode 100644 index 500285b85..000000000 --- a/litellm/tests/test_berrispend_integration.py +++ /dev/null @@ -1,25 +0,0 @@ -#### What this tests #### -# This tests if logging to the helicone integration actually works -# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this -# import sys, os -# import traceback -# import pytest - -# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path -# import litellm -# from litellm import embedding, completion - -# litellm.success_callback = ["berrispend"] -# litellm.failure_callback = ["berrispend"] - -# litellm.set_verbose = True - -# user_message = "Hello, how are you?" -# messages = [{ "content": user_message,"role": "user"}] - - -# #openai call -# response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}]) - -# #bad request call -# response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}]) diff --git a/litellm/tests/test_custom_api_base.py b/litellm/tests/test_custom_api_base.py deleted file mode 100644 index 4d080551e..000000000 --- a/litellm/tests/test_custom_api_base.py +++ /dev/null @@ -1,33 +0,0 @@ -# import sys, os -# import traceback -# from dotenv import load_dotenv - -# load_dotenv() -# import os - -# sys.path.insert( -# 0, os.path.abspath("../..") -# ) # Adds the parent directory to the system path -# import litellm -# from litellm import completion - - -# def logging_fn(model_call_dict): -# print(f"model call details: {model_call_dict}") - - -# models = ["gorilla-7b-hf-v1", "gpt-4"] -# custom_llm_provider = None -# messages = [{"role": "user", "content": "Hey, how's it going?"}] -# for model in models: # iterate through list -# api_base = None -# if model == "gorilla-7b-hf-v1": -# custom_llm_provider = "custom_openai" -# api_base = "http://zanino.millennium.berkeley.edu:8000/v1" -# completion( -# model=model, -# messages=messages, -# custom_llm_provider=custom_llm_provider, -# api_base=api_base, -# logger_fn=logging_fn, -# ) diff --git a/litellm/tests/test_secrets.py b/litellm/tests/test_secrets.py deleted file mode 100644 index 809d35627..000000000 --- a/litellm/tests/test_secrets.py +++ /dev/null @@ -1,35 +0,0 @@ -#### What this tests #### -# This tests error logging (with custom user functions) for the `completion` + `embedding` endpoints without callbacks (i.e. slack, posthog, etc. not set) -# Requirements: Remove any env keys you have related to slack/posthog/etc. + anthropic api key (cause an exception) - -# import sys, os -# import traceback - -# sys.path.insert( -# 0, os.path.abspath("../..") -# ) # Adds the parent directory to the system path -# import litellm -# from litellm import embedding, completion -# from infisical import InfisicalClient -# import pytest - -# infisical_token = os.environ["INFISICAL_TOKEN"] - -# litellm.secret_manager_client = InfisicalClient(token=infisical_token) - -# user_message = "Hello, whats the weather in San Francisco??" -# messages = [{"content": user_message, "role": "user"}] - - -# def test_completion_openai(): -# try: -# response = completion(model="gpt-3.5-turbo", messages=messages) -# # Add any assertions here to check the response -# print(response) -# except Exception as e: -# litellm.secret_manager_client = None -# pytest.fail(f"Error occurred: {e}") -# litellm.secret_manager_client = None - - -# test_completion_openai() diff --git a/litellm/tests/test_split_test.py b/litellm/tests/test_split_test.py deleted file mode 100644 index c995a0d88..000000000 --- a/litellm/tests/test_split_test.py +++ /dev/null @@ -1,26 +0,0 @@ -#### What this tests #### -# This tests the 'completion_with_split_tests' function to enable a/b testing between llm models - -import sys, os -import traceback - -sys.path.insert( - 0, os.path.abspath("../..") -) # Adds the parent directory to the system path -import litellm -from litellm import completion_with_split_tests -litellm.set_verbose = True -split_per_model = { - "gpt-3.5-turbo": 0.8, - "claude-instant-1.2": 0.1 -} - -messages = [{ "content": "Hello, how are you?","role": "user"}] - -# print(completion_with_split_tests(models=split_per_model, messages=messages)) - -# test -print(completion_with_split_tests(models=split_per_model, messages=messages)) - -# test with client, without id -print(completion_with_split_tests(models=split_per_model, messages=messages, use_client=True))