diff --git a/litellm/proxy/custom_callbacks.py b/litellm/proxy/custom_callbacks.py index f6e01f807c..9ad19822f0 100644 --- a/litellm/proxy/custom_callbacks.py +++ b/litellm/proxy/custom_callbacks.py @@ -57,17 +57,16 @@ async def async_on_succes_logger(kwargs, response_obj, start_time, end_time): print(f"On Async Success!") # log: key, user, model, prompt, response, tokens, cost print("\nOn Success") - ### Access kwargs passed to litellm.completion() + # Access kwargs passed to litellm.completion() model = kwargs.get("model", None) messages = kwargs.get("messages", None) user = kwargs.get("user", None) - #### Access litellm_params passed to litellm.completion(), example access `metadata` + # Access litellm_params passed to litellm.completion(), example access `metadata` litellm_params = kwargs.get("litellm_params", {}) metadata = litellm_params.get("metadata", {}) # headers passed to LiteLLM proxy, can be found here - ################################################# - ##### Calculate cost using litellm.completion_cost() ####################### + # Calculate cost using litellm.completion_cost() cost = litellm.completion_cost(completion_response=response_obj) response = response_obj # tokens used in response @@ -86,4 +85,42 @@ async def async_on_succes_logger(kwargs, response_obj, start_time, end_time): ) return + +async def async_on_fail_logger(kwargs, response_obj, start_time, end_time): + print(f"On Async Failure!") + print(kwargs) + + # Access kwargs passed to litellm.completion() + model = kwargs.get("model", None) + messages = kwargs.get("messages", None) + user = kwargs.get("user", None) + + # Access litellm_params passed to litellm.completion(), example access `metadata` + litellm_params = kwargs.get("litellm_params", {}) + metadata = litellm_params.get("metadata", {}) # headers passed to LiteLLM proxy, can be found here + + # Acess Exceptions & Traceback + exception_event = kwargs.get("exception", None) + traceback_event = kwargs.get("traceback_exception", None) + + # Calculate cost using litellm.completion_cost() + cost = litellm.completion_cost(completion_response=response_obj) + response = response_obj + # tokens used in response + usage = response_obj.get("usage", {}) + + print( + f""" + Model: {model}, + Messages: {messages}, + User: {user}, + Usage: {usage}, + Cost: {cost}, + Response: {response} + Proxy Metadata: {metadata} + Exception: {exception_event} + Traceback: {traceback_event} + """ + ) + # litellm.success_callback = [async_on_succes_logger] \ No newline at end of file