From f2ddf02fb6ae53551c8448371df9f0aed4948117 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Fri, 17 Jan 2025 17:46:42 -0800 Subject: [PATCH] fix again vllm --- llama_stack/providers/remote/inference/vllm/vllm.py | 3 ++- llama_stack/providers/utils/inference/prompt_adapter.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index 81c746cce..1dbb4ecfa 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -177,7 +177,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): if isinstance(request, ChatCompletionRequest): if media_present: input_dict["messages"] = [ - await convert_message_to_openai_dict(m) for m in request.messages + await convert_message_to_openai_dict(m, download=True) + for m in request.messages ] else: input_dict["prompt"] = await chat_completion_request_to_prompt( diff --git a/llama_stack/providers/utils/inference/prompt_adapter.py b/llama_stack/providers/utils/inference/prompt_adapter.py index 7ee19fd7b..701b2ca3b 100644 --- a/llama_stack/providers/utils/inference/prompt_adapter.py +++ b/llama_stack/providers/utils/inference/prompt_adapter.py @@ -188,7 +188,7 @@ async def localize_image_content(media: ImageContentItem) -> Tuple[bytes, str]: async def convert_image_content_to_url( media: ImageContentItem, download: bool = False, include_format: bool = True ) -> str: - if media.url and not download: + if media.url and (not download or media.url.uri.startswith("data")): return media.url.uri content, format = await localize_image_content(media)