mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-28 04:04:31 +00:00
fix(vertex_httpx.py): fix supports system message check for vertex_ai_beta
Fixes https://github.com/BerriAI/litellm/issues/4283
This commit is contained in:
parent
7966266d4f
commit
0a96865c48
2 changed files with 8 additions and 2 deletions
|
@ -709,8 +709,11 @@ class VertexLLM(BaseLLM):
|
|||
|
||||
## TRANSFORMATION ##
|
||||
try:
|
||||
_custom_llm_provider = custom_llm_provider
|
||||
if custom_llm_provider == "vertex_ai_beta":
|
||||
_custom_llm_provider = "vertex_ai"
|
||||
supports_system_message = litellm.supports_system_messages(
|
||||
model=model, custom_llm_provider=custom_llm_provider
|
||||
model=model, custom_llm_provider=_custom_llm_provider
|
||||
)
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
|
|
|
@ -529,6 +529,7 @@ async def test_gemini_pro_vision(provider, sync_mode):
|
|||
resp = litellm.completion(
|
||||
model="{}/gemini-1.5-flash-preview-0514".format(provider),
|
||||
messages=[
|
||||
{"role": "system", "content": "Be a good bot"},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
|
@ -540,7 +541,7 @@ async def test_gemini_pro_vision(provider, sync_mode):
|
|||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
],
|
||||
)
|
||||
else:
|
||||
|
@ -568,6 +569,8 @@ async def test_gemini_pro_vision(provider, sync_mode):
|
|||
# DO Not DELETE this ASSERT
|
||||
# Google counts the prompt tokens for us, we should ensure we use the tokens from the orignal response
|
||||
assert prompt_tokens == 263 # the gemini api returns 263 to us
|
||||
|
||||
assert False
|
||||
except litellm.RateLimitError as e:
|
||||
pass
|
||||
except Exception as e:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue