fix(vertex_ai.py): check if message length > 0 before merging

This commit is contained in:
Krrish Dholakia 2024-06-19 18:47:08 -07:00
parent df1a2ca5c2
commit 4c5f077bd3
2 changed files with 7 additions and 4 deletions

View file

@ -337,7 +337,7 @@ def _gemini_convert_messages_with_history(messages: list) -> List[ContentType]:
_parts: List[PartType] = [] _parts: List[PartType] = []
for element in messages[msg_i]["content"]: for element in messages[msg_i]["content"]:
if isinstance(element, dict): if isinstance(element, dict):
if element["type"] == "text": if element["type"] == "text" and len(element["text"]) > 0:
_part = PartType(text=element["text"]) _part = PartType(text=element["text"])
_parts.append(_part) _parts.append(_part)
elif element["type"] == "image_url": elif element["type"] == "image_url":
@ -345,7 +345,10 @@ def _gemini_convert_messages_with_history(messages: list) -> List[ContentType]:
_part = _process_gemini_image(image_url=image_url) _part = _process_gemini_image(image_url=image_url)
_parts.append(_part) # type: ignore _parts.append(_part) # type: ignore
user_content.extend(_parts) user_content.extend(_parts)
else: elif (
isinstance(messages[msg_i]["content"], str)
and len(messages[msg_i]["content"]) > 0
):
_part = PartType(text=messages[msg_i]["content"]) _part = PartType(text=messages[msg_i]["content"])
user_content.append(_part) user_content.append(_part)

View file

@ -570,7 +570,6 @@ async def test_gemini_pro_vision(provider, sync_mode):
# Google counts the prompt tokens for us, we should ensure we use the tokens from the orignal response # Google counts the prompt tokens for us, we should ensure we use the tokens from the orignal response
assert prompt_tokens == 263 # the gemini api returns 263 to us assert prompt_tokens == 263 # the gemini api returns 263 to us
assert False
except litellm.RateLimitError as e: except litellm.RateLimitError as e:
pass pass
except Exception as e: except Exception as e:
@ -1164,6 +1163,7 @@ def test_gemini_pro_vision_async():
resp = await litellm.acompletion( resp = await litellm.acompletion(
model="vertex_ai/gemini-pro-vision", model="vertex_ai/gemini-pro-vision",
messages=[ messages=[
{"role": "system", "content": ""},
{ {
"role": "user", "role": "user",
"content": [ "content": [
@ -1175,7 +1175,7 @@ def test_gemini_pro_vision_async():
}, },
}, },
], ],
} },
], ],
) )
print("async response gemini pro vision") print("async response gemini pro vision")