mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
return cache key in streming responses
This commit is contained in:
parent
c4cb0afa98
commit
7e1d5c81b4
1 changed files with 4 additions and 1 deletions
|
@ -3440,7 +3440,10 @@ async def chat_completion(
|
||||||
if (
|
if (
|
||||||
"stream" in data and data["stream"] == True
|
"stream" in data and data["stream"] == True
|
||||||
): # use generate_responses to stream responses
|
): # use generate_responses to stream responses
|
||||||
custom_headers = {"x-litellm-model-id": model_id}
|
custom_headers = {
|
||||||
|
"x-litellm-model-id": model_id,
|
||||||
|
"x-litellm-cache-key": cache_key,
|
||||||
|
}
|
||||||
selected_data_generator = select_data_generator(
|
selected_data_generator = select_data_generator(
|
||||||
response=response, user_api_key_dict=user_api_key_dict
|
response=response, user_api_key_dict=user_api_key_dict
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue