Merge pull request #416 from BerriAI/ishaan/error-logging

Add Dashboard for showing error logs on exception
This commit is contained in:
Ishaan Jaff 2023-09-20 20:17:18 -07:00 committed by GitHub
commit a669f5a738
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 57 additions and 3 deletions

View file

@ -38,6 +38,7 @@ cache: Optional[Cache] = None # cache object
model_alias_map: Dict[str, str] = {}
max_budget: float = 0.0 # set the max budget across all providers
_current_cost = 0 # private variable, used if max budget is set
error_logs: Dict = {}
#############################################
def get_model_cost_map():

View file

@ -1069,7 +1069,7 @@ def completion(
except Exception as e:
## Map to OpenAI Exception
raise exception_type(
model=model, custom_llm_provider=custom_llm_provider, original_exception=e
model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args,
)

View file

@ -175,12 +175,32 @@ def test_completion_with_litellm_call_id():
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# hf_test_completion()
# this should throw an exception, to trigger https://logs.litellm.ai/
# def hf_test_error_logs():
# try:
# litellm.set_verbose=True
# user_message = "My name is Merve and my favorite"
# messages = [{ "content": user_message,"role": "user"}]
# response = completion(
# model="huggingface/roneneldan/TinyStories-3M",
# messages=messages,
# api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
# )
# # Add any assertions here to check the response
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# hf_test_error_logs()
def test_completion_cohere(): # commenting for now as the cohere endpoint is being flaky
try:
response = completion(

View file

@ -208,6 +208,7 @@ class Logging:
def pre_call(self, input, api_key, model=None, additional_args={}):
# Log the exact input to the LLM API
print_verbose(f"Logging Details Pre-API Call for call id {self.litellm_call_id}")
litellm.error_logs['PRE_CALL'] = locals()
try:
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
self.model_call_details["input"] = input
@ -290,6 +291,7 @@ class Logging:
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
# Log the exact result from the LLM API, for streaming - log the type of response received
litellm.error_logs['POST_CALL'] = locals()
try:
self.model_call_details["input"] = input
self.model_call_details["api_key"] = api_key
@ -1882,9 +1884,40 @@ def get_model_list():
)
####### EXCEPTION MAPPING ################
def exception_type(model, original_exception, custom_llm_provider):
def exception_type(
model,
original_exception,
custom_llm_provider,
completion_kwargs={},
):
global user_logger_fn, liteDebuggerClient
exception_mapping_worked = False
if litellm.set_verbose == True:
litellm.error_logs['EXCEPTION'] = original_exception
litellm.error_logs['KWARGS'] = completion_kwargs
try:
# code to show users their litellm error dashboard
import urllib.parse
import json
for log_key in litellm.error_logs:
current_logs = litellm.error_logs[log_key]
if type(current_logs) == dict:
filtered_error_logs = {key: str(value) for key, value in current_logs.items()}
litellm.error_logs[log_key] = filtered_error_logs
else:
litellm.error_logs[log_key] = str(current_logs)
# Convert the filtered_error_logs dictionary to a JSON string
error_logs_json = json.dumps(litellm.error_logs)
# URL-encode the JSON data
encoded_data = urllib.parse.quote(error_logs_json)
print("👉 view error logs:")
print("\033[91m" + '\033[4m' + 'https://logs.litellm.ai/?data=' + str(encoded_data) + "\033[0m")
except:
pass
try:
if isinstance(original_exception, OriginalError):
# Handle the OpenAIError