forked from phoenix/litellm-mirror
(fix) proxy - streaming sagemaker
This commit is contained in:
parent
01a2514b98
commit
bd37a9cb5e
2 changed files with 19 additions and 9 deletions
|
@ -1658,11 +1658,16 @@ async def completion(
|
|||
"stream" in data and data["stream"] == True
|
||||
): # use generate_responses to stream responses
|
||||
custom_headers = {"x-litellm-model-id": model_id}
|
||||
return StreamingResponse(
|
||||
async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
stream_content = async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
response=response,
|
||||
)
|
||||
if response.custom_llm_provider == "sagemaker":
|
||||
stream_content = data_generator(
|
||||
response=response,
|
||||
),
|
||||
)
|
||||
return StreamingResponse(
|
||||
stream_content,
|
||||
media_type="text/event-stream",
|
||||
headers=custom_headers,
|
||||
)
|
||||
|
@ -1820,11 +1825,16 @@ async def chat_completion(
|
|||
"stream" in data and data["stream"] == True
|
||||
): # use generate_responses to stream responses
|
||||
custom_headers = {"x-litellm-model-id": model_id}
|
||||
return StreamingResponse(
|
||||
async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
stream_content = async_data_generator(
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
response=response,
|
||||
)
|
||||
if response.custom_llm_provider == "sagemaker":
|
||||
stream_content = data_generator(
|
||||
response=response,
|
||||
),
|
||||
)
|
||||
return StreamingResponse(
|
||||
stream_content,
|
||||
media_type="text/event-stream",
|
||||
headers=custom_headers,
|
||||
)
|
||||
|
|
|
@ -4,7 +4,7 @@ const openai = require('openai');
|
|||
process.env.DEBUG=false;
|
||||
async function runOpenAI() {
|
||||
const client = new openai.OpenAI({
|
||||
apiKey: 'sk-yPX56TDqBpr23W7ruFG3Yg',
|
||||
apiKey: 'sk-JkKeNi6WpWDngBsghJ6B9g',
|
||||
baseURL: 'http://0.0.0.0:8000'
|
||||
});
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue