mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(vertex_ai.py): handle stream=false
also adds unit testing for vertex ai calls with langchain
This commit is contained in:
parent
cff83c720d
commit
5f8d88d363
2 changed files with 45 additions and 16 deletions
|
@ -143,7 +143,9 @@ class VertexAIConfig:
|
|||
optional_params["temperature"] = value
|
||||
if param == "top_p":
|
||||
optional_params["top_p"] = value
|
||||
if param == "stream":
|
||||
if (
|
||||
param == "stream" and value == True
|
||||
): # sending stream = False, can cause it to get passed unchecked and raise issues
|
||||
optional_params["stream"] = value
|
||||
if param == "n":
|
||||
optional_params["candidate_count"] = value
|
||||
|
@ -541,8 +543,9 @@ def completion(
|
|||
tools = optional_params.pop("tools", None)
|
||||
prompt, images = _gemini_vision_convert_messages(messages=messages)
|
||||
content = [prompt] + images
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
stream = optional_params.pop("stream")
|
||||
stream = optional_params.pop("stream", False)
|
||||
if stream == True:
|
||||
|
||||
request_str += f"response = llm_model.generate_content({content}, generation_config=GenerationConfig(**{optional_params}), safety_settings={safety_settings}, stream={stream})\n"
|
||||
logging_obj.pre_call(
|
||||
input=prompt,
|
||||
|
@ -820,6 +823,7 @@ async def async_completion(
|
|||
print_verbose("\nMaking VertexAI Gemini Pro/Vision Call")
|
||||
print_verbose(f"\nProcessing input messages = {messages}")
|
||||
tools = optional_params.pop("tools", None)
|
||||
stream = optional_params.pop("stream", False)
|
||||
|
||||
prompt, images = _gemini_vision_convert_messages(messages=messages)
|
||||
content = [prompt] + images
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue