fixes to core logging

This commit is contained in:
Krrish Dholakia 2023-09-01 11:51:49 -07:00
parent ecce7ee1a8
commit bfa79ea3c0
5 changed files with 274 additions and 47 deletions

View file

@ -1,66 +1,285 @@
#### What this tests ####
# This tests error logging (with custom user functions) for the raw `completion` + `embedding` endpoints
import sys, os
import traceback
# Test Scenarios (test across completion, streaming, embedding)
## 1: Pre-API-Call
## 2: Post-API-Call
## 3: On LiteLLM Call success
## 4: On LiteLLM Call failure
import sys, os, io
import traceback, logging
import pytest
import dotenv
dotenv.load_dotenv()
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create a stream handler
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
# Create a function to log information
def logger_fn(message):
logger.info(message)
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import embedding, completion
litellm.set_verbose = False
from openai.error import AuthenticationError
litellm.set_verbose = True
score = 0
def logger_fn(model_call_object: dict):
print(f"model call details: {model_call_object}")
user_message = "Hello, how are you?"
messages = [{"content": user_message, "role": "user"}]
# test on openai completion call
# 1. On Call Success
# normal completion
## test on openai completion call
try:
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn)
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="gpt-3.5-turbo", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1
except:
print(f"error occurred: {traceback.format_exc()}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pass
# test on non-openai completion call
## test on non-openai completion call
try:
response = completion(
model="claude-instant-1", messages=messages, logger_fn=logger_fn
)
print(f"claude response: {response}")
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1
except:
print(f"error occurred: {traceback.format_exc()}")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pass
# # test on openai embedding call
# try:
# response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn)
# score +=1
# except:
# traceback.print_exc()
# streaming completion
## test on openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
# # test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model
# try:
# response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn)
# except:
# score +=1 # expect this to fail
# traceback.print_exc()
response = completion(model="gpt-3.5-turbo", messages=messages)
# # test on good azure openai embedding call
# try:
# response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn)
# score +=1
# except:
# traceback.print_exc()
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pass
# print(f"Score: {score}, Overall score: {score/5}")
## test on non-openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pass
# embedding
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
## 2. On LiteLLM Call failure
## TEST BAD KEY
temporary_oai_key = os.environ["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = "bad-key"
temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
# normal completion
## test on openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="gpt-3.5-turbo", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
print(f"exception type: {type(e).__name__}")
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
## test on non-openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
# streaming completion
## test on openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="gpt-3.5-turbo", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
## test on non-openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
# embedding
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
os.environ["OPENAI_API_KEY"] = temporary_oai_key
os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key