forked from phoenix/litellm-mirror
ci(test_logging): running just this test on circle ci
This commit is contained in:
parent
9f118bb5db
commit
28a2e31ede
2 changed files with 314 additions and 314 deletions
|
@ -56,7 +56,7 @@ jobs:
|
||||||
command: |
|
command: |
|
||||||
pwd
|
pwd
|
||||||
ls
|
ls
|
||||||
python -m pytest -vv tests/test_logging.py --junitxml=test-results/junit.xml
|
python -m pytest -vv litellm/tests/test_logging.py --junitxml=test-results/junit.xml
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
|
|
||||||
# Store test results
|
# Store test results
|
||||||
|
|
|
@ -40,343 +40,343 @@ messages = [{"content": user_message, "role": "user"}]
|
||||||
|
|
||||||
# 1. On Call Success
|
# 1. On Call Success
|
||||||
# normal completion
|
# normal completion
|
||||||
|
# test on openai completion call
|
||||||
|
def test_logging_success_completion():
|
||||||
|
global score
|
||||||
|
try:
|
||||||
|
# Redirect stdout
|
||||||
|
old_stdout = sys.stdout
|
||||||
|
sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
|
response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
|
# Restore stdout
|
||||||
|
sys.stdout = old_stdout
|
||||||
|
output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
if "Logging Details Pre-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details Post-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details LiteLLM-Success Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
score += 1
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
## test on non-openai completion call
|
||||||
|
def test_logging_success_completion_non_openai():
|
||||||
|
global score
|
||||||
|
try:
|
||||||
|
# Redirect stdout
|
||||||
|
old_stdout = sys.stdout
|
||||||
|
sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
|
response = completion(model="claude-instant-1", messages=messages)
|
||||||
|
|
||||||
|
# Restore stdout
|
||||||
|
sys.stdout = old_stdout
|
||||||
|
output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
if "Logging Details Pre-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details Post-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details LiteLLM-Success Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
score += 1
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# streaming completion
|
||||||
## test on openai completion call
|
## test on openai completion call
|
||||||
# def test_logging_success_completion():
|
def test_logging_success_streaming_openai():
|
||||||
# global score
|
global score
|
||||||
|
try:
|
||||||
|
# litellm.set_verbose = False
|
||||||
|
def custom_callback(
|
||||||
|
kwargs, # kwargs to completion
|
||||||
|
completion_response, # response from completion
|
||||||
|
start_time, end_time # start/end time
|
||||||
|
):
|
||||||
|
if "complete_streaming_response" in kwargs:
|
||||||
|
print(f"Complete Streaming Response: {kwargs['complete_streaming_response']}")
|
||||||
|
|
||||||
|
# Assign the custom callback function
|
||||||
|
litellm.success_callback = [custom_callback]
|
||||||
|
|
||||||
|
# Redirect stdout
|
||||||
|
old_stdout = sys.stdout
|
||||||
|
sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
|
response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
||||||
|
for chunk in response:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Restore stdout
|
||||||
|
sys.stdout = old_stdout
|
||||||
|
output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
if "Logging Details Pre-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details Post-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details LiteLLM-Success Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Complete Streaming Response:" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
score += 1
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# test_logging_success_streaming_openai()
|
||||||
|
|
||||||
|
## test on non-openai completion call
|
||||||
|
def test_logging_success_streaming_non_openai():
|
||||||
|
global score
|
||||||
|
try:
|
||||||
|
# litellm.set_verbose = False
|
||||||
|
def custom_callback(
|
||||||
|
kwargs, # kwargs to completion
|
||||||
|
completion_response, # response from completion
|
||||||
|
start_time, end_time # start/end time
|
||||||
|
):
|
||||||
|
# print(f"streaming response: {completion_response}")
|
||||||
|
if "complete_streaming_response" in kwargs:
|
||||||
|
print(f"Complete Streaming Response: {kwargs['complete_streaming_response']}")
|
||||||
|
|
||||||
|
# Assign the custom callback function
|
||||||
|
litellm.success_callback = [custom_callback]
|
||||||
|
|
||||||
|
# Redirect stdout
|
||||||
|
old_stdout = sys.stdout
|
||||||
|
sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
|
response = completion(model="claude-instant-1", messages=messages, stream=True)
|
||||||
|
for idx, chunk in enumerate(response):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Restore stdout
|
||||||
|
sys.stdout = old_stdout
|
||||||
|
output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
if "Logging Details Pre-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details Post-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details LiteLLM-Success Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Complete Streaming Response:" not in output:
|
||||||
|
raise Exception(f"Required log message not found! {output}")
|
||||||
|
score += 1
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
pass
|
||||||
|
|
||||||
|
# test_logging_success_streaming_non_openai()
|
||||||
|
# embedding
|
||||||
|
|
||||||
|
def test_logging_success_embedding_openai():
|
||||||
|
try:
|
||||||
|
# Redirect stdout
|
||||||
|
old_stdout = sys.stdout
|
||||||
|
sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
|
response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
|
||||||
|
|
||||||
|
# Restore stdout
|
||||||
|
sys.stdout = old_stdout
|
||||||
|
output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
if "Logging Details Pre-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details Post-API Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
elif "Logging Details LiteLLM-Success Call" not in output:
|
||||||
|
raise Exception("Required log message not found!")
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
# ## 2. On LiteLLM Call failure
|
||||||
|
# ## TEST BAD KEY
|
||||||
|
|
||||||
|
# # normal completion
|
||||||
|
# ## test on openai completion call
|
||||||
|
# try:
|
||||||
|
# temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
||||||
|
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||||
|
|
||||||
|
# temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
||||||
|
|
||||||
|
|
||||||
|
# # Redirect stdout
|
||||||
|
# old_stdout = sys.stdout
|
||||||
|
# sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# # Redirect stdout
|
|
||||||
# old_stdout = sys.stdout
|
|
||||||
# sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# response = completion(model="gpt-3.5-turbo", messages=messages)
|
# response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
# # Restore stdout
|
# except AuthenticationError:
|
||||||
# sys.stdout = old_stdout
|
# print(f"raised auth error")
|
||||||
# output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# if "Logging Details Pre-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details Post-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details LiteLLM-Success Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# score += 1
|
|
||||||
# except Exception as e:
|
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
# pass
|
# pass
|
||||||
|
# # Restore stdout
|
||||||
|
# sys.stdout = old_stdout
|
||||||
|
# output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
# print(output)
|
||||||
|
|
||||||
|
# if "Logging Details Pre-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details Post-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details LiteLLM-Failure Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
|
||||||
|
# os.environ["OPENAI_API_KEY"] = temporary_oai_key
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key
|
||||||
|
|
||||||
|
# score += 1
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"exception type: {type(e).__name__}")
|
||||||
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
|
# pass
|
||||||
|
|
||||||
# ## test on non-openai completion call
|
# ## test on non-openai completion call
|
||||||
# def test_logging_success_completion_non_openai():
|
# try:
|
||||||
# global score
|
# temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
||||||
|
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||||
|
|
||||||
|
# temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
||||||
|
# # Redirect stdout
|
||||||
|
# old_stdout = sys.stdout
|
||||||
|
# sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# # Redirect stdout
|
|
||||||
# old_stdout = sys.stdout
|
|
||||||
# sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# response = completion(model="claude-instant-1", messages=messages)
|
# response = completion(model="claude-instant-1", messages=messages)
|
||||||
|
# except AuthenticationError:
|
||||||
# # Restore stdout
|
|
||||||
# sys.stdout = old_stdout
|
|
||||||
# output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# if "Logging Details Pre-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details Post-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details LiteLLM-Success Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# score += 1
|
|
||||||
# except Exception as e:
|
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
|
# if "Logging Details Pre-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details Post-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details LiteLLM-Failure Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# os.environ["OPENAI_API_KEY"] = temporary_oai_key
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key
|
||||||
|
# score += 1
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"exception type: {type(e).__name__}")
|
||||||
|
# # Restore stdout
|
||||||
|
# sys.stdout = old_stdout
|
||||||
|
# output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
# print(output)
|
||||||
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
|
||||||
# # streaming completion
|
# # streaming completion
|
||||||
# ## test on openai completion call
|
# ## test on openai completion call
|
||||||
# def test_logging_success_streaming_openai():
|
# try:
|
||||||
# global score
|
# temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
||||||
|
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||||
|
|
||||||
|
# temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
||||||
|
# # Redirect stdout
|
||||||
|
# old_stdout = sys.stdout
|
||||||
|
# sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# # litellm.set_verbose = False
|
# response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
# def custom_callback(
|
# except AuthenticationError:
|
||||||
# kwargs, # kwargs to completion
|
|
||||||
# completion_response, # response from completion
|
|
||||||
# start_time, end_time # start/end time
|
|
||||||
# ):
|
|
||||||
# if "complete_streaming_response" in kwargs:
|
|
||||||
# print(f"Complete Streaming Response: {kwargs['complete_streaming_response']}")
|
|
||||||
|
|
||||||
# # Assign the custom callback function
|
|
||||||
# litellm.success_callback = [custom_callback]
|
|
||||||
|
|
||||||
# # Redirect stdout
|
|
||||||
# old_stdout = sys.stdout
|
|
||||||
# sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# response = completion(model="gpt-3.5-turbo", messages=messages, stream=True)
|
|
||||||
# for chunk in response:
|
|
||||||
# pass
|
|
||||||
|
|
||||||
# # Restore stdout
|
|
||||||
# sys.stdout = old_stdout
|
|
||||||
# output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# if "Logging Details Pre-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details Post-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details LiteLLM-Success Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Complete Streaming Response:" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# score += 1
|
|
||||||
# except Exception as e:
|
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
# # test_logging_success_streaming_openai()
|
# # Restore stdout
|
||||||
|
# sys.stdout = old_stdout
|
||||||
|
# output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
# print(output)
|
||||||
|
|
||||||
|
# if "Logging Details Pre-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details Post-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details LiteLLM-Failure Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
|
||||||
|
# os.environ["OPENAI_API_KEY"] = temporary_oai_key
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key
|
||||||
|
# score += 1
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"exception type: {type(e).__name__}")
|
||||||
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# ## test on non-openai completion call
|
# ## test on non-openai completion call
|
||||||
# def test_logging_success_streaming_non_openai():
|
# try:
|
||||||
# global score
|
# temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
||||||
|
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||||
|
|
||||||
|
# temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
||||||
|
# # Redirect stdout
|
||||||
|
# old_stdout = sys.stdout
|
||||||
|
# sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# # litellm.set_verbose = False
|
# response = completion(model="claude-instant-1", messages=messages)
|
||||||
# def custom_callback(
|
# except AuthenticationError:
|
||||||
# kwargs, # kwargs to completion
|
|
||||||
# completion_response, # response from completion
|
|
||||||
# start_time, end_time # start/end time
|
|
||||||
# ):
|
|
||||||
# # print(f"streaming response: {completion_response}")
|
|
||||||
# if "complete_streaming_response" in kwargs:
|
|
||||||
# print(f"Complete Streaming Response: {kwargs['complete_streaming_response']}")
|
|
||||||
|
|
||||||
# # Assign the custom callback function
|
|
||||||
# litellm.success_callback = [custom_callback]
|
|
||||||
|
|
||||||
# # Redirect stdout
|
|
||||||
# old_stdout = sys.stdout
|
|
||||||
# sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# response = completion(model="claude-instant-1", messages=messages, stream=True)
|
|
||||||
# for idx, chunk in enumerate(response):
|
|
||||||
# pass
|
|
||||||
|
|
||||||
# # Restore stdout
|
|
||||||
# sys.stdout = old_stdout
|
|
||||||
# output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# if "Logging Details Pre-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details Post-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details LiteLLM-Success Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Complete Streaming Response:" not in output:
|
|
||||||
# raise Exception(f"Required log message not found! {output}")
|
|
||||||
# score += 1
|
|
||||||
# except Exception as e:
|
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
# pass
|
# pass
|
||||||
|
|
||||||
|
# # Restore stdout
|
||||||
|
# sys.stdout = old_stdout
|
||||||
|
# output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
|
# print(output)
|
||||||
|
|
||||||
|
# if "Logging Details Pre-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details Post-API Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details LiteLLM-Failure Call" not in output:
|
||||||
|
# raise Exception("Required log message not found!")
|
||||||
|
# score += 1
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"exception type: {type(e).__name__}")
|
||||||
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# # test_logging_success_streaming_non_openai()
|
|
||||||
# # embedding
|
# # embedding
|
||||||
|
|
||||||
# def test_logging_success_embedding_openai():
|
# try:
|
||||||
|
# temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
||||||
|
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||||
|
|
||||||
|
# temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
||||||
|
# os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
||||||
|
# # Redirect stdout
|
||||||
|
# old_stdout = sys.stdout
|
||||||
|
# sys.stdout = new_stdout = io.StringIO()
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# # Redirect stdout
|
|
||||||
# old_stdout = sys.stdout
|
|
||||||
# sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
|
# response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
|
||||||
|
# except AuthenticationError:
|
||||||
|
# pass
|
||||||
|
|
||||||
# # Restore stdout
|
# # Restore stdout
|
||||||
# sys.stdout = old_stdout
|
# sys.stdout = old_stdout
|
||||||
# output = new_stdout.getvalue().strip()
|
# output = new_stdout.getvalue().strip()
|
||||||
|
|
||||||
# if "Logging Details Pre-API Call" not in output:
|
# print(output)
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details Post-API Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# elif "Logging Details LiteLLM-Success Call" not in output:
|
|
||||||
# raise Exception("Required log message not found!")
|
|
||||||
# except Exception as e:
|
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
# # ## 2. On LiteLLM Call failure
|
# if "Logging Details Pre-API Call" not in output:
|
||||||
# # ## TEST BAD KEY
|
# raise Exception("Required log message not found!")
|
||||||
|
# elif "Logging Details Post-API Call" not in output:
|
||||||
# # # normal completion
|
# raise Exception("Required log message not found!")
|
||||||
# # ## test on openai completion call
|
# elif "Logging Details LiteLLM-Failure Call" not in output:
|
||||||
# # try:
|
# raise Exception("Required log message not found!")
|
||||||
# # temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
# except Exception as e:
|
||||||
# # os.environ["OPENAI_API_KEY"] = "bad-key"
|
# print(f"exception type: {type(e).__name__}")
|
||||||
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
# # temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
|
||||||
|
|
||||||
|
|
||||||
# # # Redirect stdout
|
|
||||||
# # old_stdout = sys.stdout
|
|
||||||
# # sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# # try:
|
|
||||||
# # response = completion(model="gpt-3.5-turbo", messages=messages)
|
|
||||||
# # except AuthenticationError:
|
|
||||||
# # print(f"raised auth error")
|
|
||||||
# # pass
|
|
||||||
# # # Restore stdout
|
|
||||||
# # sys.stdout = old_stdout
|
|
||||||
# # output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# # print(output)
|
|
||||||
|
|
||||||
# # if "Logging Details Pre-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details Post-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details LiteLLM-Failure Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = temporary_oai_key
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key
|
|
||||||
|
|
||||||
# # score += 1
|
|
||||||
# # except Exception as e:
|
|
||||||
# # print(f"exception type: {type(e).__name__}")
|
|
||||||
# # pytest.fail(f"Error occurred: {e}")
|
|
||||||
# # pass
|
|
||||||
|
|
||||||
# # ## test on non-openai completion call
|
|
||||||
# # try:
|
|
||||||
# # temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = "bad-key"
|
|
||||||
|
|
||||||
# # temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
|
||||||
# # # Redirect stdout
|
|
||||||
# # old_stdout = sys.stdout
|
|
||||||
# # sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# # try:
|
|
||||||
# # response = completion(model="claude-instant-1", messages=messages)
|
|
||||||
# # except AuthenticationError:
|
|
||||||
# # pass
|
|
||||||
|
|
||||||
# # if "Logging Details Pre-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details Post-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details LiteLLM-Failure Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = temporary_oai_key
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key
|
|
||||||
# # score += 1
|
|
||||||
# # except Exception as e:
|
|
||||||
# # print(f"exception type: {type(e).__name__}")
|
|
||||||
# # # Restore stdout
|
|
||||||
# # sys.stdout = old_stdout
|
|
||||||
# # output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# # print(output)
|
|
||||||
# # pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
# # # streaming completion
|
|
||||||
# # ## test on openai completion call
|
|
||||||
# # try:
|
|
||||||
# # temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = "bad-key"
|
|
||||||
|
|
||||||
# # temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
|
||||||
# # # Redirect stdout
|
|
||||||
# # old_stdout = sys.stdout
|
|
||||||
# # sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# # try:
|
|
||||||
# # response = completion(model="gpt-3.5-turbo", messages=messages)
|
|
||||||
# # except AuthenticationError:
|
|
||||||
# # pass
|
|
||||||
|
|
||||||
# # # Restore stdout
|
|
||||||
# # sys.stdout = old_stdout
|
|
||||||
# # output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# # print(output)
|
|
||||||
|
|
||||||
# # if "Logging Details Pre-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details Post-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details LiteLLM-Failure Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = temporary_oai_key
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key
|
|
||||||
# # score += 1
|
|
||||||
# # except Exception as e:
|
|
||||||
# # print(f"exception type: {type(e).__name__}")
|
|
||||||
# # pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
# # ## test on non-openai completion call
|
|
||||||
# # try:
|
|
||||||
# # temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = "bad-key"
|
|
||||||
|
|
||||||
# # temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
|
||||||
# # # Redirect stdout
|
|
||||||
# # old_stdout = sys.stdout
|
|
||||||
# # sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# # try:
|
|
||||||
# # response = completion(model="claude-instant-1", messages=messages)
|
|
||||||
# # except AuthenticationError:
|
|
||||||
# # pass
|
|
||||||
|
|
||||||
# # # Restore stdout
|
|
||||||
# # sys.stdout = old_stdout
|
|
||||||
# # output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# # print(output)
|
|
||||||
|
|
||||||
# # if "Logging Details Pre-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details Post-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details LiteLLM-Failure Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # score += 1
|
|
||||||
# # except Exception as e:
|
|
||||||
# # print(f"exception type: {type(e).__name__}")
|
|
||||||
# # pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
# # # embedding
|
|
||||||
|
|
||||||
# # try:
|
|
||||||
# # temporary_oai_key = os.environ["OPENAI_API_KEY"]
|
|
||||||
# # os.environ["OPENAI_API_KEY"] = "bad-key"
|
|
||||||
|
|
||||||
# # temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
|
|
||||||
# # os.environ["ANTHROPIC_API_KEY"] = "bad-key"
|
|
||||||
# # # Redirect stdout
|
|
||||||
# # old_stdout = sys.stdout
|
|
||||||
# # sys.stdout = new_stdout = io.StringIO()
|
|
||||||
|
|
||||||
# # try:
|
|
||||||
# # response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
|
|
||||||
# # except AuthenticationError:
|
|
||||||
# # pass
|
|
||||||
|
|
||||||
# # # Restore stdout
|
|
||||||
# # sys.stdout = old_stdout
|
|
||||||
# # output = new_stdout.getvalue().strip()
|
|
||||||
|
|
||||||
# # print(output)
|
|
||||||
|
|
||||||
# # if "Logging Details Pre-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details Post-API Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # elif "Logging Details LiteLLM-Failure Call" not in output:
|
|
||||||
# # raise Exception("Required log message not found!")
|
|
||||||
# # except Exception as e:
|
|
||||||
# # print(f"exception type: {type(e).__name__}")
|
|
||||||
# # pytest.fail(f"Error occurred: {e}")
|
|
Loading…
Add table
Add a link
Reference in a new issue