mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(vertex_ai.py): add support for real async streaming + completion calls
This commit is contained in:
parent
07015843ac
commit
69c29f8f86
5 changed files with 134 additions and 49 deletions
|
@ -1157,7 +1157,7 @@ def completion(
|
|||
acompletion=acompletion
|
||||
)
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
if "stream" in optional_params and optional_params["stream"] == True and acompletion == False:
|
||||
response = CustomStreamWrapper(
|
||||
model_response, model, custom_llm_provider="vertex_ai", logging_obj=logging
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue