mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(fix) vertex ai streaming
This commit is contained in:
parent
6b3671b593
commit
6c82abf5bf
1 changed files with 7 additions and 0 deletions
|
@ -109,7 +109,12 @@ def completion(
|
|||
logging_obj.pre_call(input=prompt, api_key=None, additional_args={"complete_input_dict": optional_params})
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
# NOTE: VertexAI does not accept stream=True as a param and raises an error,
|
||||
# we handle this by removing 'stream' from optional params and sending the request
|
||||
# after we get the response we add optional_params["stream"] = True, since main.py needs to know it's a streaming response to then transform it for the OpenAI format
|
||||
optional_params.pop("stream", None) # vertex ai raises an error when passing stream in optional params
|
||||
model_response = chat.send_message_streaming(prompt, **optional_params)
|
||||
optional_params["stream"] = True
|
||||
return model_response
|
||||
|
||||
completion_response = chat.send_message(prompt, **optional_params).text
|
||||
|
@ -118,7 +123,9 @@ def completion(
|
|||
logging_obj.pre_call(input=prompt, api_key=None)
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
optional_params.pop("stream", None) # See note above on handling streaming for vertex ai
|
||||
model_response = text_model.predict_streaming(prompt, **optional_params)
|
||||
optional_params["stream"] = True
|
||||
return model_response
|
||||
|
||||
completion_response = text_model.predict(prompt, **optional_params).text
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue