updates to tests

This commit is contained in:
Krrish Dholakia 2023-09-01 16:01:28 -07:00
parent ab35fdb635
commit 4d17c57373
3 changed files with 29 additions and 29 deletions

View file

@ -213,13 +213,7 @@ try:
response = completion(model="claude-instant-1", messages=messages)
except AuthenticationError:
pass
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
print(output)
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
@ -231,6 +225,11 @@ try:
score += 1
except Exception as e:
print(f"exception type: {type(e).__name__}")
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
print(output)
pytest.fail(f"Error occurred: {e}")

View file

@ -2,34 +2,34 @@
# This tests error logging (with custom user functions) for the `completion` + `embedding` endpoints without callbacks (i.e. slack, posthog, etc. not set)
# Requirements: Remove any env keys you have related to slack/posthog/etc. + anthropic api key (cause an exception)
import sys, os
import traceback
# import sys, os
# import traceback
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
from infisical import InfisicalClient
import pytest
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import litellm
# from litellm import embedding, completion
# from infisical import InfisicalClient
# import pytest
infisical_token = os.environ["INFISICAL_TOKEN"]
# infisical_token = os.environ["INFISICAL_TOKEN"]
litellm.secret_manager_client = InfisicalClient(token=infisical_token)
# litellm.secret_manager_client = InfisicalClient(token=infisical_token)
user_message = "Hello, whats the weather in San Francisco??"
messages = [{"content": user_message, "role": "user"}]
# user_message = "Hello, whats the weather in San Francisco??"
# messages = [{"content": user_message, "role": "user"}]
def test_completion_openai():
try:
response = completion(model="gpt-3.5-turbo", messages=messages)
# Add any assertions here to check the response
print(response)
except Exception as e:
litellm.secret_manager_client = None
pytest.fail(f"Error occurred: {e}")
litellm.secret_manager_client = None
# def test_completion_openai():
# try:
# response = completion(model="gpt-3.5-turbo", messages=messages)
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# litellm.secret_manager_client = None
# pytest.fail(f"Error occurred: {e}")
# litellm.secret_manager_client = None
test_completion_openai()
# test_completion_openai()

View file

@ -1956,6 +1956,7 @@ def completion_with_split_tests(models={}, messages=[], use_client=False, overri
completion_with_split_tests(models=models, messages=messages)
"""
import random
model_configs = {}
if use_client and not override_client:
if "id" not in kwargs or kwargs["id"] is None:
raise ValueError("Please tag this completion call, if you'd like to update it's split test values through the UI. - eg. `completion_with_split_tests(.., id=1234)`.")