forked from phoenix/litellm-mirror
refactor: replace .error() with .exception() logging for better debugging on sentry
This commit is contained in:
parent
1510daba4f
commit
61f4b71ef7
35 changed files with 242 additions and 253 deletions
|
@ -8894,12 +8894,11 @@ class CustomStreamWrapper:
|
|||
"finish_reason": finish_reason,
|
||||
}
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_predibase_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise e
|
||||
|
||||
def handle_huggingface_chunk(self, chunk):
|
||||
|
@ -8943,12 +8942,11 @@ class CustomStreamWrapper:
|
|||
"finish_reason": finish_reason,
|
||||
}
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_huggingface_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise e
|
||||
|
||||
def handle_ai21_chunk(self, chunk): # fake streaming
|
||||
|
@ -9171,12 +9169,11 @@ class CustomStreamWrapper:
|
|||
"usage": usage,
|
||||
}
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_openai_chat_completion_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise e
|
||||
|
||||
def handle_azure_text_completion_chunk(self, chunk):
|
||||
|
@ -9256,13 +9253,12 @@ class CustomStreamWrapper:
|
|||
return ""
|
||||
else:
|
||||
return ""
|
||||
except:
|
||||
verbose_logger.error(
|
||||
except Exception as e:
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_baseten_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
return ""
|
||||
|
||||
def handle_cloudlfare_stream(self, chunk):
|
||||
|
@ -9496,13 +9492,12 @@ class CustomStreamWrapper:
|
|||
"text": text,
|
||||
"is_finished": True,
|
||||
}
|
||||
except:
|
||||
verbose_logger.error(
|
||||
except Exception as e:
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.handle_clarifai_chunk(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
return ""
|
||||
|
||||
def model_response_creator(
|
||||
|
@ -10103,12 +10098,11 @@ class CustomStreamWrapper:
|
|||
tool["type"] = "function"
|
||||
model_response.choices[0].delta = Delta(**_json_delta)
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
"litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}\n{}".format(
|
||||
str(e), traceback.format_exc()
|
||||
verbose_logger.exception(
|
||||
"litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format(
|
||||
str(e)
|
||||
)
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
model_response.choices[0].delta = Delta()
|
||||
else:
|
||||
try:
|
||||
|
@ -11088,10 +11082,8 @@ def trim_messages(
|
|||
return final_messages, response_tokens
|
||||
return final_messages
|
||||
except Exception as e: # [NON-Blocking, if error occurs just return final_messages
|
||||
verbose_logger.error(
|
||||
"Got exception while token trimming - {}\n{}".format(
|
||||
str(e), traceback.format_exc()
|
||||
)
|
||||
verbose_logger.exception(
|
||||
"Got exception while token trimming - {}".format(str(e))
|
||||
)
|
||||
return messages
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue