From ff73f0a17b0ba49e23f900c85bc9a8d36a6f045e Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Thu, 30 Jan 2025 14:01:18 -0800 Subject: [PATCH] prompt adapter --- llama_stack/providers/utils/inference/prompt_adapter.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/llama_stack/providers/utils/inference/prompt_adapter.py b/llama_stack/providers/utils/inference/prompt_adapter.py index babfb736a..e49771980 100644 --- a/llama_stack/providers/utils/inference/prompt_adapter.py +++ b/llama_stack/providers/utils/inference/prompt_adapter.py @@ -185,8 +185,10 @@ async def localize_image_content(media: ImageContentItem) -> Tuple[bytes, str]: return content, format else: - pil_image = PIL_Image.open(io.BytesIO(image.data)) - return image.data, pil_image.format + # data is a base64 encoded string, decode it to bytes first + data_bytes = base64.b64decode(image.data) + pil_image = PIL_Image.open(io.BytesIO(data_bytes)) + return data_bytes, pil_image.format async def convert_image_content_to_url(