mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Merge pull request #5244 from BerriAI/litellm_better_error_logging_sentry
refactor: replace .error() with .exception() logging for better debugging on sentry
This commit is contained in:
commit
a8dd2b6910
35 changed files with 242 additions and 253 deletions
|
@ -8923,12 +8923,11 @@ class CustomStreamWrapper:
|
|||
"finish_reason": finish_reason,
|
||||
}
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_predibase_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise e
|
||||
|
||||
def handle_huggingface_chunk(self, chunk):
|
||||
|
@ -8972,12 +8971,11 @@ class CustomStreamWrapper:
|
|||
"finish_reason": finish_reason,
|
||||
}
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_huggingface_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise e
|
||||
|
||||
def handle_ai21_chunk(self, chunk): # fake streaming
|
||||
|
@ -9200,12 +9198,11 @@ class CustomStreamWrapper:
|
|||
"usage": usage,
|
||||
}
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_openai_chat_completion_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise e
|
||||
|
||||
def handle_azure_text_completion_chunk(self, chunk):
|
||||
|
@ -9285,13 +9282,12 @@ class CustomStreamWrapper:
|
|||
return ""
|
||||
else:
|
||||
return ""
|
||||
except:
|
||||
verbose_logger.error(
|
||||
except Exception as e:
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
return ""
|
||||
|
||||
def handle_cloudlfare_stream(self, chunk):
|
||||
|
@ -9525,13 +9521,12 @@ class CustomStreamWrapper:
|
|||
"text": text,
|
||||
"is_finished": True,
|
||||
}
|
||||
except:
|
||||
verbose_logger.error(
|
||||
except Exception as e:
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
return ""
|
||||
|
||||
def model_response_creator(
|
||||
|
@ -10144,12 +10139,11 @@ class CustomStreamWrapper:
|
|||
tool["type"] = "function"
|
||||
model_response.choices[0].delta = Delta(**_json_delta)
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
"litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}\n{}".format(
|
||||
str(e), traceback.format_exc()
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
model_response.choices[0].delta = Delta()
|
||||
else:
|
||||
try:
|
||||
|
@ -11137,10 +11131,8 @@ def trim_messages(
|
|||
return final_messages, response_tokens
|
||||
return final_messages
|
||||
except Exception as e: # [NON-Blocking, if error occurs just return final_messages
|
||||
verbose_logger.error(
|
||||
"Got exception while token trimming - {}\n{}".format(
|
||||
str(e), traceback.format_exc()
|
||||
)
|
||||
verbose_logger.exception(
|
||||
"Got exception while token trimming - {}".format(str(e))
|
||||
)
|
||||
return messages
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue