forked from phoenix/litellm-mirror
docs(call_hooks.md): add docs on pre-call hooks
This commit is contained in:
parent
589a6ca863
commit
d7b49dbc41
2 changed files with 79 additions and 0 deletions
78
docs/my-website/docs/proxy/call_hooks.md
Normal file
78
docs/my-website/docs/proxy/call_hooks.md
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
# Call Hooks - Modify Data
|
||||||
|
|
||||||
|
Modify data just before making litellm completion calls call on proxy
|
||||||
|
|
||||||
|
See a complete example with our [parallel request rate limiter](https://github.com/BerriAI/litellm/blob/main/litellm/proxy/hooks/parallel_request_limiter.py)
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
1. In your Custom Handler add a new `async_pre_call_hook` function
|
||||||
|
|
||||||
|
This function is called just before a litellm completion call is made, and allows you to modify the data going into the litellm call [**See Code**](https://github.com/BerriAI/litellm/blob/589a6ca863000ba8e92c897ba0f776796e7a5904/litellm/proxy/proxy_server.py#L1000)
|
||||||
|
|
||||||
|
```python
|
||||||
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
|
import litellm
|
||||||
|
|
||||||
|
# This file includes the custom callbacks for LiteLLM Proxy
|
||||||
|
# Once defined, these can be passed in proxy_config.yaml
|
||||||
|
class MyCustomHandler(CustomLogger): # https://docs.litellm.ai/docs/observability/custom_callback#callback-class
|
||||||
|
# Class variables or attributes
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
#### ASYNC ####
|
||||||
|
|
||||||
|
async def async_log_stream_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def async_log_pre_api_call(self, model, messages, kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
pass
|
||||||
|
|
||||||
|
#### CALL HOOKS - proxy only ####
|
||||||
|
|
||||||
|
async def async_pre_call_hook(self, user_api_key_dict: UserAPIKeyAuth, cache: DualCache, data: dict, call_type: Literal["completion", "embeddings"]):
|
||||||
|
data["model"] = "my-new-model"
|
||||||
|
return data
|
||||||
|
|
||||||
|
proxy_handler_instance = MyCustomHandler()
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Add this file to your proxy config
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
model_list:
|
||||||
|
- model_name: gpt-3.5-turbo
|
||||||
|
litellm_params:
|
||||||
|
model: gpt-3.5-turbo
|
||||||
|
|
||||||
|
litellm_settings:
|
||||||
|
callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Start the server + test the request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ litellm /path/to/config.yaml
|
||||||
|
```
|
||||||
|
```shell
|
||||||
|
curl --location 'http://0.0.0.0:8000/chat/completions' \
|
||||||
|
--data ' {
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "good morning good sir"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"user": "ishaan-app",
|
||||||
|
"temperature": 0.2
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
|
@ -102,6 +102,7 @@ const sidebars = {
|
||||||
"proxy/load_balancing",
|
"proxy/load_balancing",
|
||||||
"proxy/virtual_keys",
|
"proxy/virtual_keys",
|
||||||
"proxy/model_management",
|
"proxy/model_management",
|
||||||
|
"proxy/call_hooks",
|
||||||
"proxy/caching",
|
"proxy/caching",
|
||||||
"proxy/logging",
|
"proxy/logging",
|
||||||
"proxy/cli",
|
"proxy/cli",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue