updates to logging

This commit is contained in:
Krrish Dholakia 2023-08-24 18:54:34 -07:00
parent 0d24bf9221
commit 7d16df98ac
4 changed files with 49 additions and 4 deletions

View file

@ -57,6 +57,27 @@ class LiteDebugger:
) )
pass pass
def post_call_log_event(
self, original_response, litellm_call_id, print_verbose
):
try:
litellm_data_obj = {
"status": "succeeded",
"additional_details": {"original_response": original_response},
"litellm_call_id": litellm_call_id,
"user_email": self.user_email,
}
response = requests.post(
url=self.api_url,
headers={"content-type": "application/json"},
data=json.dumps(litellm_data_obj),
)
print_verbose(f"LiteDebugger: api response - {response.text}")
except:
print_verbose(
f"[Non-Blocking Error] LiteDebugger: Logging Error - {traceback.format_exc()}"
)
def log_event( def log_event(
self, self,
model, model,

View file

@ -260,6 +260,30 @@ class Logging:
print_verbose( print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}" f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
) )
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
for callback in litellm.input_callback:
try:
if callback == "lite_debugger":
print_verbose("reaches litedebugger for post-call logging!")
model = self.model_call_details["model"]
messages = self.model_call_details["input"]
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
liteDebuggerClient.post_call_log_event(
original_response=original_response,
litellm_call_id=self.
litellm_params["litellm_call_id"],
print_verbose=print_verbose,
)
except:
print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while post-call logging with integrations {traceback.format_exc()}"
)
print_verbose(
f"LiteLLM.Logging: is sentry capture exception initialized {capture_exception}"
)
if capture_exception: # log this error to sentry for debugging
capture_exception(e)
except: except:
print_verbose( print_verbose(
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}" f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
@ -788,8 +812,6 @@ def handle_failure(exception, traceback_exception, start_time, end_time, args,
additional_details["Event_Name"] = additional_details.pop( additional_details["Event_Name"] = additional_details.pop(
"failed_event_name", "litellm.failed_query") "failed_event_name", "litellm.failed_query")
print_verbose(f"self.failure_callback: {litellm.failure_callback}") print_verbose(f"self.failure_callback: {litellm.failure_callback}")
# print_verbose(f"additional_details: {additional_details}")
for callback in litellm.failure_callback: for callback in litellm.failure_callback:
try: try:
if callback == "slack": if callback == "slack":
@ -1143,8 +1165,8 @@ def get_all_keys(llm_provider=None):
time_delta = current_time - last_fetched_at_keys time_delta = current_time - last_fetched_at_keys
if time_delta > 300 or last_fetched_at_keys == None or llm_provider: # if the llm provider is passed in , assume this happening due to an AuthError for that provider if time_delta > 300 or last_fetched_at_keys == None or llm_provider: # if the llm provider is passed in , assume this happening due to an AuthError for that provider
# make the api call # make the api call
last_fetched_at_keys = time.time() last_fetched_at = time.time()
print_verbose(f"last_fetched_at_keys: {last_fetched_at_keys}") print_verbose(f"last_fetched_at: {last_fetched_at}")
response = requests.post(url="http://api.litellm.ai/get_all_keys", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email})) response = requests.post(url="http://api.litellm.ai/get_all_keys", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
print_verbose(f"get model key response: {response.text}") print_verbose(f"get model key response: {response.text}")
data = response.json() data = response.json()
@ -1175,6 +1197,8 @@ def get_model_list():
data = response.json() data = response.json()
# update model list # update model list
model_list = data["model_list"] model_list = data["model_list"]
# update environment - if required
threading.Thread(target=get_all_keys, args=()).start()
return model_list return model_list
return [] # return empty list by default return [] # return empty list by default
except: except: