mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix(vertex_ai.py): check if message length > 0 before merging
This commit is contained in:
parent
df1a2ca5c2
commit
4c5f077bd3
2 changed files with 7 additions and 4 deletions
|
@ -337,7 +337,7 @@ def _gemini_convert_messages_with_history(messages: list) -> List[ContentType]:
|
||||||
_parts: List[PartType] = []
|
_parts: List[PartType] = []
|
||||||
for element in messages[msg_i]["content"]:
|
for element in messages[msg_i]["content"]:
|
||||||
if isinstance(element, dict):
|
if isinstance(element, dict):
|
||||||
if element["type"] == "text":
|
if element["type"] == "text" and len(element["text"]) > 0:
|
||||||
_part = PartType(text=element["text"])
|
_part = PartType(text=element["text"])
|
||||||
_parts.append(_part)
|
_parts.append(_part)
|
||||||
elif element["type"] == "image_url":
|
elif element["type"] == "image_url":
|
||||||
|
@ -345,7 +345,10 @@ def _gemini_convert_messages_with_history(messages: list) -> List[ContentType]:
|
||||||
_part = _process_gemini_image(image_url=image_url)
|
_part = _process_gemini_image(image_url=image_url)
|
||||||
_parts.append(_part) # type: ignore
|
_parts.append(_part) # type: ignore
|
||||||
user_content.extend(_parts)
|
user_content.extend(_parts)
|
||||||
else:
|
elif (
|
||||||
|
isinstance(messages[msg_i]["content"], str)
|
||||||
|
and len(messages[msg_i]["content"]) > 0
|
||||||
|
):
|
||||||
_part = PartType(text=messages[msg_i]["content"])
|
_part = PartType(text=messages[msg_i]["content"])
|
||||||
user_content.append(_part)
|
user_content.append(_part)
|
||||||
|
|
||||||
|
|
|
@ -570,7 +570,6 @@ async def test_gemini_pro_vision(provider, sync_mode):
|
||||||
# Google counts the prompt tokens for us, we should ensure we use the tokens from the orignal response
|
# Google counts the prompt tokens for us, we should ensure we use the tokens from the orignal response
|
||||||
assert prompt_tokens == 263 # the gemini api returns 263 to us
|
assert prompt_tokens == 263 # the gemini api returns 263 to us
|
||||||
|
|
||||||
assert False
|
|
||||||
except litellm.RateLimitError as e:
|
except litellm.RateLimitError as e:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1164,6 +1163,7 @@ def test_gemini_pro_vision_async():
|
||||||
resp = await litellm.acompletion(
|
resp = await litellm.acompletion(
|
||||||
model="vertex_ai/gemini-pro-vision",
|
model="vertex_ai/gemini-pro-vision",
|
||||||
messages=[
|
messages=[
|
||||||
|
{"role": "system", "content": ""},
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": [
|
"content": [
|
||||||
|
@ -1175,7 +1175,7 @@ def test_gemini_pro_vision_async():
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
}
|
},
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
print("async response gemini pro vision")
|
print("async response gemini pro vision")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue