mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix(vertex_ai.py): raise explicit error when image url fails to download - prevents silent failure
This commit is contained in:
parent
f800ebc3c1
commit
b918f58262
2 changed files with 6 additions and 4 deletions
|
@ -225,8 +225,7 @@ def _get_image_bytes_from_url(image_url: str) -> bytes:
|
||||||
image_bytes = response.content
|
image_bytes = response.content
|
||||||
return image_bytes
|
return image_bytes
|
||||||
except requests.exceptions.RequestException as e:
|
except requests.exceptions.RequestException as e:
|
||||||
# Handle any request exceptions (e.g., connection error, timeout)
|
raise Exception(f"An exception occurs with this image - {str(e)}")
|
||||||
return b"" # Return an empty bytes object or handle the error as needed
|
|
||||||
|
|
||||||
|
|
||||||
def _load_image_from_url(image_url: str):
|
def _load_image_from_url(image_url: str):
|
||||||
|
@ -247,7 +246,8 @@ def _load_image_from_url(image_url: str):
|
||||||
)
|
)
|
||||||
|
|
||||||
image_bytes = _get_image_bytes_from_url(image_url)
|
image_bytes = _get_image_bytes_from_url(image_url)
|
||||||
return Image.from_bytes(image_bytes)
|
|
||||||
|
return Image.from_bytes(data=image_bytes)
|
||||||
|
|
||||||
|
|
||||||
def _gemini_vision_convert_messages(messages: list):
|
def _gemini_vision_convert_messages(messages: list):
|
||||||
|
@ -817,7 +817,7 @@ async def async_completion(
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if mode == "vision":
|
if mode == "vision":
|
||||||
print_verbose("\nMaking VertexAI Gemini Pro Vision Call")
|
print_verbose("\nMaking VertexAI Gemini Pro/Vision Call")
|
||||||
print_verbose(f"\nProcessing input messages = {messages}")
|
print_verbose(f"\nProcessing input messages = {messages}")
|
||||||
tools = optional_params.pop("tools", None)
|
tools = optional_params.pop("tools", None)
|
||||||
|
|
||||||
|
@ -836,6 +836,7 @@ async def async_completion(
|
||||||
)
|
)
|
||||||
|
|
||||||
## LLM Call
|
## LLM Call
|
||||||
|
# print(f"final content: {content}")
|
||||||
response = await llm_model._generate_content_async(
|
response = await llm_model._generate_content_async(
|
||||||
contents=content,
|
contents=content,
|
||||||
generation_config=optional_params,
|
generation_config=optional_params,
|
||||||
|
|
|
@ -5930,6 +5930,7 @@ def get_llm_provider(
|
||||||
or model in litellm.vertex_code_text_models
|
or model in litellm.vertex_code_text_models
|
||||||
or model in litellm.vertex_language_models
|
or model in litellm.vertex_language_models
|
||||||
or model in litellm.vertex_embedding_models
|
or model in litellm.vertex_embedding_models
|
||||||
|
or model in litellm.vertex_vision_models
|
||||||
):
|
):
|
||||||
custom_llm_provider = "vertex_ai"
|
custom_llm_provider = "vertex_ai"
|
||||||
## ai21
|
## ai21
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue