forked from phoenix/litellm-mirror
docs(custom_callback.md): add async failure + streaming logging events to docs
https://github.com/BerriAI/litellm/issues/1125
This commit is contained in:
parent
a6e78497b5
commit
0f14fb3797
2 changed files with 64 additions and 4 deletions
|
@ -4,7 +4,9 @@
|
||||||
You can create a custom callback class to precisely log events as they occur in litellm.
|
You can create a custom callback class to precisely log events as they occur in litellm.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
import litellm
|
||||||
from litellm.integrations.custom_logger import CustomLogger
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
|
from litellm import completion, acompletion
|
||||||
|
|
||||||
class MyCustomHandler(CustomLogger):
|
class MyCustomHandler(CustomLogger):
|
||||||
def log_pre_api_call(self, model, messages, kwargs):
|
def log_pre_api_call(self, model, messages, kwargs):
|
||||||
|
@ -21,14 +23,38 @@ class MyCustomHandler(CustomLogger):
|
||||||
|
|
||||||
def log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
def log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
print(f"On Failure")
|
print(f"On Failure")
|
||||||
|
|
||||||
|
#### ASYNC #### - for acompletion/aembeddings
|
||||||
|
|
||||||
|
async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(f"On Async Streaming")
|
||||||
|
|
||||||
|
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(f"On Async Success")
|
||||||
|
|
||||||
|
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(f"On Async Success")
|
||||||
|
|
||||||
customHandler = MyCustomHandler()
|
customHandler = MyCustomHandler()
|
||||||
|
|
||||||
litellm.callbacks = [customHandler]
|
litellm.callbacks = [customHandler]
|
||||||
|
|
||||||
|
## sync
|
||||||
response = completion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}],
|
response = completion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}],
|
||||||
stream=True)
|
stream=True)
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
## async
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
def async completion():
|
||||||
|
response = await acompletion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}],
|
||||||
|
stream=True)
|
||||||
|
async for chunk in response:
|
||||||
|
continue
|
||||||
|
asyncio.run(completion())
|
||||||
```
|
```
|
||||||
|
|
||||||
## Callback Functions
|
## Callback Functions
|
||||||
|
@ -87,6 +113,41 @@ print(response)
|
||||||
|
|
||||||
## Async Callback Functions
|
## Async Callback Functions
|
||||||
|
|
||||||
|
We recommend using the Custom Logger class for async.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
|
from litellm import acompletion
|
||||||
|
|
||||||
|
class MyCustomHandler(CustomLogger):
|
||||||
|
#### ASYNC ####
|
||||||
|
|
||||||
|
async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(f"On Async Streaming")
|
||||||
|
|
||||||
|
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(f"On Async Success")
|
||||||
|
|
||||||
|
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(f"On Async Failure")
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
customHandler = MyCustomHandler()
|
||||||
|
|
||||||
|
litellm.callbacks = [customHandler]
|
||||||
|
|
||||||
|
def async completion():
|
||||||
|
response = await acompletion(model="gpt-3.5-turbo", messages=[{ "role": "user", "content": "Hi 👋 - i'm openai"}],
|
||||||
|
stream=True)
|
||||||
|
async for chunk in response:
|
||||||
|
continue
|
||||||
|
asyncio.run(completion())
|
||||||
|
```
|
||||||
|
|
||||||
|
**Functions**
|
||||||
|
|
||||||
|
If you just want to pass in an async function for logging.
|
||||||
|
|
||||||
LiteLLM currently supports just async success callback functions for async completion/embedding calls.
|
LiteLLM currently supports just async success callback functions for async completion/embedding calls.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -117,9 +178,6 @@ asyncio.run(test_chat_openai())
|
||||||
:::info
|
:::info
|
||||||
|
|
||||||
We're actively trying to expand this to other event types. [Tell us if you need this!](https://github.com/BerriAI/litellm/issues/1007)
|
We're actively trying to expand this to other event types. [Tell us if you need this!](https://github.com/BerriAI/litellm/issues/1007)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## What's in kwargs?
|
## What's in kwargs?
|
||||||
|
|
|
@ -28,6 +28,8 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback
|
||||||
|
|
||||||
def log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
def log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
#### ASYNC ####
|
||||||
|
|
||||||
async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
|
async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
pass
|
pass
|
||||||
|
@ -41,7 +43,7 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback
|
||||||
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
#### CALL HOOKS ####
|
#### CALL HOOKS - proxy only ####
|
||||||
"""
|
"""
|
||||||
Control the modify incoming / outgoung data before calling the model
|
Control the modify incoming / outgoung data before calling the model
|
||||||
"""
|
"""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue