mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
(fix) proxy - streaming sagemaker
This commit is contained in:
parent
505544340b
commit
67dddc94d9
2 changed files with 19 additions and 9 deletions
|
@ -1672,11 +1672,16 @@ async def completion(
|
|||
"stream" in data and data["stream"] == True
|
||||
): # use generate_responses to stream responses
|
||||
custom_headers = {"x-litellm-model-id": model_id}
|
||||
return StreamingResponse(
|
||||
async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
stream_content = async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
response=response,
|
||||
)
|
||||
if response.custom_llm_provider == "sagemaker":
|
||||
stream_content = data_generator(
|
||||
response=response,
|
||||
),
|
||||
)
|
||||
return StreamingResponse(
|
||||
stream_content,
|
||||
media_type="text/event-stream",
|
||||
headers=custom_headers,
|
||||
)
|
||||
|
@ -1834,11 +1839,16 @@ async def chat_completion(
|
|||
"stream" in data and data["stream"] == True
|
||||
): # use generate_responses to stream responses
|
||||
custom_headers = {"x-litellm-model-id": model_id}
|
||||
return StreamingResponse(
|
||||
async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
stream_content = async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
response=response,
|
||||
)
|
||||
if response.custom_llm_provider == "sagemaker":
|
||||
stream_content = data_generator(
|
||||
response=response,
|
||||
),
|
||||
)
|
||||
return StreamingResponse(
|
||||
stream_content,
|
||||
media_type="text/event-stream",
|
||||
headers=custom_headers,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue