forked from phoenix/litellm-mirror
docs(custom_callback.md): add details on what kwargs are passed to custom callbacks
This commit is contained in:
parent
fc757dc1b4
commit
d6f2d9b9bb
3 changed files with 59 additions and 2 deletions
|
@ -39,6 +39,55 @@ response = completion(
|
|||
print(response)
|
||||
|
||||
```
|
||||
|
||||
## What's in kwargs?
|
||||
|
||||
Notice we pass in a kwargs argument to custom callback.
|
||||
```python
|
||||
def custom_callback(
|
||||
kwargs, # kwargs to completion
|
||||
completion_response, # response from completion
|
||||
start_time, end_time # start/end time
|
||||
):
|
||||
# Your custom code here
|
||||
print("LITELLM: in custom callback function")
|
||||
print("kwargs", kwargs)
|
||||
print("completion_response", completion_response)
|
||||
print("start_time", start_time)
|
||||
print("end_time", end_time)
|
||||
```
|
||||
|
||||
This is a dictionary containing all the model-call details (the params we receive, the values we send to the http endpoint, the response we receive, stacktrace in case of errors, etc.).
|
||||
|
||||
This is all logged in the [model_call_details via our Logger](https://github.com/BerriAI/litellm/blob/fc757dc1b47d2eb9d0ea47d6ad224955b705059d/litellm/utils.py#L246).
|
||||
|
||||
Here's exactly what you can expect in the kwargs dictionary:
|
||||
```shell
|
||||
### DEFAULT PARAMS ###
|
||||
"model": self.model,
|
||||
"messages": self.messages,
|
||||
"optional_params": self.optional_params, # model-specific params passed in
|
||||
"litellm_params": self.litellm_params, # litellm-specific params passed in (e.g. metadata passed to completion call)
|
||||
"start_time": self.start_time, # datetime object of when call was started
|
||||
|
||||
### PRE-API CALL PARAMS ### (check via kwargs["log_event_type"]="pre_api_call")
|
||||
"input" = input # the exact prompt sent to the LLM API
|
||||
"api_key" = api_key # the api key used for that LLM API
|
||||
"additional_args" = additional_args # any additional details for that API call (e.g. contains optional params sent)
|
||||
|
||||
### POST-API CALL PARAMS ### (check via kwargs["log_event_type"]="post_api_call")
|
||||
"original_response" = original_response # the original http response received (saved via response.text)
|
||||
|
||||
### ON-SUCCESS PARAMS ### (check via kwargs["log_event_type"]="successful_api_call")
|
||||
"complete_streaming_response" = complete_streaming_response # the complete streamed response (only set if `completion(..stream=True)`)
|
||||
"end_time" = end_time # datetime object of when call was completed
|
||||
|
||||
### ON-FAILURE PARAMS ### (check via kwargs["log_event_type"]="failed_api_call")
|
||||
"exception" = exception # the Exception raised
|
||||
"traceback_exception" = traceback_exception # the traceback generated via `traceback.format_exc()`
|
||||
"end_time" = end_time # datetime object of when call was completed
|
||||
```
|
||||
|
||||
## Get complete streaming response
|
||||
|
||||
LiteLLM will pass you the complete streaming response in the final streaming chunk as part of the kwargs for your custom callback function.
|
||||
|
|
|
@ -94,6 +94,7 @@ def is_port_in_use(port):
|
|||
@click.option('--temperature', default=None, type=float, help='Set temperature for the model')
|
||||
@click.option('--max_tokens', default=None, type=int, help='Set max tokens for the model')
|
||||
@click.option('--drop_params', is_flag=True, help='Drop any unmapped params')
|
||||
@click.option('--save', is_flag=True, help='Save params to config, to persist across restarts')
|
||||
@click.option('--create_proxy', is_flag=True, help='Creates a local OpenAI-compatible server template')
|
||||
@click.option('--add_function_to_prompt', is_flag=True, help='If function passed but unsupported, pass it as prompt')
|
||||
@click.option('--config', '-c', is_flag=True, help='Configure Litellm')
|
||||
|
|
|
@ -260,7 +260,7 @@ class Logging:
|
|||
self.model_call_details["input"] = input
|
||||
self.model_call_details["api_key"] = api_key
|
||||
self.model_call_details["additional_args"] = additional_args
|
||||
|
||||
self.model_call_details["log_event_type"] = "pre_api_call"
|
||||
if (
|
||||
model
|
||||
): # if model name was changes pre-call, overwrite the initial model call name with the new one
|
||||
|
@ -358,6 +358,7 @@ class Logging:
|
|||
self.model_call_details["api_key"] = api_key
|
||||
self.model_call_details["original_response"] = original_response
|
||||
self.model_call_details["additional_args"] = additional_args
|
||||
self.model_call_details["log_event_type"] = "post_api_call"
|
||||
|
||||
# User Logging -> if you pass in a custom logging function
|
||||
print_verbose(f"model call details: {self.model_call_details}")
|
||||
|
@ -419,7 +420,8 @@ class Logging:
|
|||
start_time = self.start_time
|
||||
if end_time is None:
|
||||
end_time = datetime.datetime.now()
|
||||
|
||||
self.model_call_details["log_event_type"] = "successful_api_call"
|
||||
self.model_call_details["end_time"] = end_time
|
||||
complete_streaming_response = None
|
||||
|
||||
## BUILD COMPLETE STREAMED RESPONSE
|
||||
|
@ -547,6 +549,11 @@ class Logging:
|
|||
if end_time is None:
|
||||
end_time = datetime.datetime.now()
|
||||
|
||||
self.model_call_details["log_event_type"] = "failed_api_call"
|
||||
self.model_call_details["exception"] = exception
|
||||
self.model_call_details["traceback_exception"] = traceback_exception
|
||||
self.model_call_details["end_time"] = end_time
|
||||
|
||||
for callback in litellm.failure_callback:
|
||||
try:
|
||||
if callback == "lite_debugger":
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue