mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
refactor: replace 'traceback.print_exc()' with logging library
allows error logs to be in json format for otel logging
This commit is contained in:
parent
3c1d4179c2
commit
e391e30285
41 changed files with 542 additions and 225 deletions
|
@ -364,7 +364,10 @@ async def acompletion(
|
|||
) # sets the logging event loop if the user does sync streaming (e.g. on proxy for sagemaker calls)
|
||||
return response
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
verbose_logger.error(
|
||||
"litellm.acompletion(): Exception occured - {}".format(str(e))
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
custom_llm_provider = custom_llm_provider or "openai"
|
||||
raise exception_type(
|
||||
model=model,
|
||||
|
@ -477,7 +480,10 @@ def mock_completion(
|
|||
except Exception as e:
|
||||
if isinstance(e, openai.APIError):
|
||||
raise e
|
||||
traceback.print_exc()
|
||||
verbose_logger.error(
|
||||
"litellm.mock_completion(): Exception occured - {}".format(str(e))
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
raise Exception("Mock completion response failed")
|
||||
|
||||
|
||||
|
@ -4430,7 +4436,10 @@ async def ahealth_check(
|
|||
response = {} # args like remaining ratelimit etc.
|
||||
return response
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
verbose_logger.error(
|
||||
"litellm.ahealth_check(): Exception occured - {}".format(str(e))
|
||||
)
|
||||
verbose_logger.debug(traceback.format_exc())
|
||||
stack_trace = traceback.format_exc()
|
||||
if isinstance(stack_trace, str):
|
||||
stack_trace = stack_trace[:1000]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue