fix again vllm

This commit is contained in:
Xi Yan 2025-01-17 17:46:42 -08:00
parent 97053ba9d6
commit f2ddf02fb6
2 changed files with 3 additions and 2 deletions

View file

@ -177,7 +177,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
if isinstance(request, ChatCompletionRequest):
if media_present:
input_dict["messages"] = [
await convert_message_to_openai_dict(m) for m in request.messages
await convert_message_to_openai_dict(m, download=True)
for m in request.messages
]
else:
input_dict["prompt"] = await chat_completion_request_to_prompt(

View file

@ -188,7 +188,7 @@ async def localize_image_content(media: ImageContentItem) -> Tuple[bytes, str]:
async def convert_image_content_to_url(
media: ImageContentItem, download: bool = False, include_format: bool = True
) -> str:
if media.url and not download:
if media.url and (not download or media.url.uri.startswith("data")):
return media.url.uri
content, format = await localize_image_content(media)