fixes to core logging

This commit is contained in:
Krrish Dholakia 2023-09-01 11:51:49 -07:00
parent 8af86419a6
commit fda6dba1ec
5 changed files with 274 additions and 47 deletions

View file

@ -1,66 +1,285 @@
#### What this tests #### #### What this tests ####
# This tests error logging (with custom user functions) for the raw `completion` + `embedding` endpoints # This tests error logging (with custom user functions) for the raw `completion` + `embedding` endpoints
import sys, os # Test Scenarios (test across completion, streaming, embedding)
import traceback ## 1: Pre-API-Call
## 2: Post-API-Call
## 3: On LiteLLM Call success
## 4: On LiteLLM Call failure
import sys, os, io
import traceback, logging
import pytest
import dotenv
dotenv.load_dotenv()
# Create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Create a stream handler
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
# Create a function to log information
def logger_fn(message):
logger.info(message)
sys.path.insert( sys.path.insert(
0, os.path.abspath("../..") 0, os.path.abspath("../..")
) # Adds the parent directory to the system path ) # Adds the parent directory to the system path
import litellm import litellm
from litellm import embedding, completion from litellm import embedding, completion
from openai.error import AuthenticationError
litellm.set_verbose = False litellm.set_verbose = True
score = 0 score = 0
def logger_fn(model_call_object: dict):
print(f"model call details: {model_call_object}")
user_message = "Hello, how are you?" user_message = "Hello, how are you?"
messages = [{"content": user_message, "role": "user"}] messages = [{"content": user_message, "role": "user"}]
# test on openai completion call # 1. On Call Success
# normal completion
## test on openai completion call
try: try:
response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) # Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="gpt-3.5-turbo", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1 score += 1
except: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
pass pass
# test on non-openai completion call ## test on non-openai completion call
try: try:
response = completion( # Redirect stdout
model="claude-instant-1", messages=messages, logger_fn=logger_fn old_stdout = sys.stdout
) sys.stdout = new_stdout = io.StringIO()
print(f"claude response: {response}")
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1 score += 1
except: except Exception as e:
print(f"error occurred: {traceback.format_exc()}") pytest.fail(f"Error occurred: {e}")
pass pass
# # test on openai embedding call # streaming completion
# try: ## test on openai completion call
# response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) try:
# score +=1 # Redirect stdout
# except: old_stdout = sys.stdout
# traceback.print_exc() sys.stdout = new_stdout = io.StringIO()
# # test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model response = completion(model="gpt-3.5-turbo", messages=messages)
# try:
# response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn)
# except:
# score +=1 # expect this to fail
# traceback.print_exc()
# # test on good azure openai embedding call # Restore stdout
# try: sys.stdout = old_stdout
# response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) output = new_stdout.getvalue().strip()
# score +=1
# except:
# traceback.print_exc()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pass
# print(f"Score: {score}, Overall score: {score/5}") ## test on non-openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pass
# embedding
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Success Call" not in output:
raise Exception("Required log message not found!")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
## 2. On LiteLLM Call failure
## TEST BAD KEY
temporary_oai_key = os.environ["OPENAI_API_KEY"]
os.environ["OPENAI_API_KEY"] = "bad-key"
temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"]
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
# normal completion
## test on openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="gpt-3.5-turbo", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
print(f"exception type: {type(e).__name__}")
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
## test on non-openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
# streaming completion
## test on openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="gpt-3.5-turbo", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
## test on non-openai completion call
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = completion(model="claude-instant-1", messages=messages)
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
score += 1
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
# embedding
try:
# Redirect stdout
old_stdout = sys.stdout
sys.stdout = new_stdout = io.StringIO()
response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"])
# Restore stdout
sys.stdout = old_stdout
output = new_stdout.getvalue().strip()
if "Logging Details Pre-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details Post-API Call" not in output:
raise Exception("Required log message not found!")
elif "Logging Details LiteLLM-Failure Call" not in output:
raise Exception("Required log message not found!")
except Exception as e:
if not isinstance(e, AuthenticationError):
pytest.fail(f"Error occurred: {e}")
os.environ["OPENAI_API_KEY"] = temporary_oai_key
os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key

View file

@ -180,8 +180,10 @@ class Logging:
} }
def pre_call(self, input, api_key, model=None, additional_args={}): def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
print_verbose(f"Logging Details Pre-API Call")
try: try:
print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}") # print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key self.model_call_details["api_key"] = api_key
self.model_call_details["additional_args"] = additional_args self.model_call_details["additional_args"] = additional_args
@ -193,9 +195,6 @@ class Logging:
# User Logging -> if you pass in a custom logging function # User Logging -> if you pass in a custom logging function
print_verbose(f"model call details: {self.model_call_details}") print_verbose(f"model call details: {self.model_call_details}")
print_verbose(
f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
)
if self.logger_fn and callable(self.logger_fn): if self.logger_fn and callable(self.logger_fn):
try: try:
self.logger_fn( self.logger_fn(
@ -257,7 +256,7 @@ class Logging:
capture_exception(e) capture_exception(e)
def post_call(self, original_response, input=None, api_key=None, additional_args={}): def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Do something here # Log the exact result from the LLM API, for streaming - log the type of response received
try: try:
self.model_call_details["input"] = input self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key self.model_call_details["api_key"] = api_key
@ -266,7 +265,7 @@ class Logging:
# User Logging -> if you pass in a custom logging function # User Logging -> if you pass in a custom logging function
print_verbose( print_verbose(
f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}"
) )
if self.logger_fn and callable(self.logger_fn): if self.logger_fn and callable(self.logger_fn):
try: try:
@ -331,6 +330,9 @@ class Logging:
def success_handler(self, result, start_time, end_time): def success_handler(self, result, start_time, end_time):
print_verbose(
f"Logging Details LiteLLM-Success Call"
)
try: try:
for callback in litellm.success_callback: for callback in litellm.success_callback:
try: try:
@ -364,6 +366,9 @@ class Logging:
pass pass
def failure_handler(self, exception, traceback_exception, start_time, end_time): def failure_handler(self, exception, traceback_exception, start_time, end_time):
print_verbose(
f"Logging Details LiteLLM-Failure Call"
)
try: try:
for callback in litellm.failure_callback: for callback in litellm.failure_callback:
if callback == "lite_debugger": if callback == "lite_debugger":
@ -1699,6 +1704,9 @@ class CustomStreamWrapper:
self.model = model self.model = model
self.custom_llm_provider = custom_llm_provider self.custom_llm_provider = custom_llm_provider
self.logging_obj = logging_obj self.logging_obj = logging_obj
if self.logging_obj:
# Log the type of the received item
self.logging_obj.post_call(str(type(completion_stream)))
if model in litellm.cohere_models: if model in litellm.cohere_models:
# cohere does not return an iterator, so we need to wrap it in one # cohere does not return an iterator, so we need to wrap it in one
self.completion_stream = iter(completion_stream) self.completion_stream = iter(completion_stream)
@ -1825,7 +1833,7 @@ class CustomStreamWrapper:
completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk) completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk)
# LOGGING # LOGGING
self.logging_obj.post_call(completion_obj["content"]) # self.logging_obj.post_call(completion_obj["content"])
# return this for all models # return this for all models
return {"choices": [{"delta": completion_obj}]} return {"choices": [{"delta": completion_obj}]}
except: except:

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.512" version = "0.1.513"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"