(tests) delete old testing files

This commit is contained in:
ishaan-jaff 2023-10-12 15:10:18 -07:00
parent 4b3e4c97b8
commit 3839be8ed8
4 changed files with 0 additions and 119 deletions

View file

@ -1,25 +0,0 @@
#### What this tests ####
# This tests if logging to the helicone integration actually works
# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
# import sys, os
# import traceback
# import pytest
# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
# import litellm
# from litellm import embedding, completion
# litellm.success_callback = ["berrispend"]
# litellm.failure_callback = ["berrispend"]
# litellm.set_verbose = True
# user_message = "Hello, how are you?"
# messages = [{ "content": user_message,"role": "user"}]
# #openai call
# response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
# #bad request call
# response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])

View file

@ -1,33 +0,0 @@
# import sys, os
# import traceback
# from dotenv import load_dotenv
# load_dotenv()
# import os
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import litellm
# from litellm import completion
# def logging_fn(model_call_dict):
# print(f"model call details: {model_call_dict}")
# models = ["gorilla-7b-hf-v1", "gpt-4"]
# custom_llm_provider = None
# messages = [{"role": "user", "content": "Hey, how's it going?"}]
# for model in models: # iterate through list
# api_base = None
# if model == "gorilla-7b-hf-v1":
# custom_llm_provider = "custom_openai"
# api_base = "http://zanino.millennium.berkeley.edu:8000/v1"
# completion(
# model=model,
# messages=messages,
# custom_llm_provider=custom_llm_provider,
# api_base=api_base,
# logger_fn=logging_fn,
# )

View file

@ -1,35 +0,0 @@
#### What this tests ####
# This tests error logging (with custom user functions) for the `completion` + `embedding` endpoints without callbacks (i.e. slack, posthog, etc. not set)
# Requirements: Remove any env keys you have related to slack/posthog/etc. + anthropic api key (cause an exception)
# import sys, os
# import traceback
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import litellm
# from litellm import embedding, completion
# from infisical import InfisicalClient
# import pytest
# infisical_token = os.environ["INFISICAL_TOKEN"]
# litellm.secret_manager_client = InfisicalClient(token=infisical_token)
# user_message = "Hello, whats the weather in San Francisco??"
# messages = [{"content": user_message, "role": "user"}]
# def test_completion_openai():
# try:
# response = completion(model="gpt-3.5-turbo", messages=messages)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# litellm.secret_manager_client = None
# pytest.fail(f"Error occurred: {e}")
# litellm.secret_manager_client = None
# test_completion_openai()

View file

@ -1,26 +0,0 @@
#### What this tests ####
# This tests the 'completion_with_split_tests' function to enable a/b testing between llm models
import sys, os
import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import completion_with_split_tests
litellm.set_verbose = True
split_per_model = {
"gpt-3.5-turbo": 0.8,
"claude-instant-1.2": 0.1
}
messages = [{ "content": "Hello, how are you?","role": "user"}]
# print(completion_with_split_tests(models=split_per_model, messages=messages))
# test
print(completion_with_split_tests(models=split_per_model, messages=messages))
# test with client, without id
print(completion_with_split_tests(models=split_per_model, messages=messages, use_client=True))