(feat) proxy - async_on_fail_logger

This commit is contained in:
ishaan-jaff 2023-12-06 14:40:35 -08:00
parent f1c1ec8523
commit 3b17fd3821

View file

@ -57,17 +57,16 @@ async def async_on_succes_logger(kwargs, response_obj, start_time, end_time):
print(f"On Async Success!")
# log: key, user, model, prompt, response, tokens, cost
print("\nOn Success")
### Access kwargs passed to litellm.completion()
# Access kwargs passed to litellm.completion()
model = kwargs.get("model", None)
messages = kwargs.get("messages", None)
user = kwargs.get("user", None)
#### Access litellm_params passed to litellm.completion(), example access `metadata`
# Access litellm_params passed to litellm.completion(), example access `metadata`
litellm_params = kwargs.get("litellm_params", {})
metadata = litellm_params.get("metadata", {}) # headers passed to LiteLLM proxy, can be found here
#################################################
##### Calculate cost using litellm.completion_cost() #######################
# Calculate cost using litellm.completion_cost()
cost = litellm.completion_cost(completion_response=response_obj)
response = response_obj
# tokens used in response
@ -86,4 +85,42 @@ async def async_on_succes_logger(kwargs, response_obj, start_time, end_time):
)
return
async def async_on_fail_logger(kwargs, response_obj, start_time, end_time):
print(f"On Async Failure!")
print(kwargs)
# Access kwargs passed to litellm.completion()
model = kwargs.get("model", None)
messages = kwargs.get("messages", None)
user = kwargs.get("user", None)
# Access litellm_params passed to litellm.completion(), example access `metadata`
litellm_params = kwargs.get("litellm_params", {})
metadata = litellm_params.get("metadata", {}) # headers passed to LiteLLM proxy, can be found here
# Acess Exceptions & Traceback
exception_event = kwargs.get("exception", None)
traceback_event = kwargs.get("traceback_exception", None)
# Calculate cost using litellm.completion_cost()
cost = litellm.completion_cost(completion_response=response_obj)
response = response_obj
# tokens used in response
usage = response_obj.get("usage", {})
print(
f"""
Model: {model},
Messages: {messages},
User: {user},
Usage: {usage},
Cost: {cost},
Response: {response}
Proxy Metadata: {metadata}
Exception: {exception_event}
Traceback: {traceback_event}
"""
)
# litellm.success_callback = [async_on_succes_logger]