mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-06 10:42:39 +00:00
fix again vllm
This commit is contained in:
parent
97053ba9d6
commit
f2ddf02fb6
2 changed files with 3 additions and 2 deletions
|
@ -177,7 +177,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
||||||
if isinstance(request, ChatCompletionRequest):
|
if isinstance(request, ChatCompletionRequest):
|
||||||
if media_present:
|
if media_present:
|
||||||
input_dict["messages"] = [
|
input_dict["messages"] = [
|
||||||
await convert_message_to_openai_dict(m) for m in request.messages
|
await convert_message_to_openai_dict(m, download=True)
|
||||||
|
for m in request.messages
|
||||||
]
|
]
|
||||||
else:
|
else:
|
||||||
input_dict["prompt"] = await chat_completion_request_to_prompt(
|
input_dict["prompt"] = await chat_completion_request_to_prompt(
|
||||||
|
|
|
@ -188,7 +188,7 @@ async def localize_image_content(media: ImageContentItem) -> Tuple[bytes, str]:
|
||||||
async def convert_image_content_to_url(
|
async def convert_image_content_to_url(
|
||||||
media: ImageContentItem, download: bool = False, include_format: bool = True
|
media: ImageContentItem, download: bool = False, include_format: bool = True
|
||||||
) -> str:
|
) -> str:
|
||||||
if media.url and not download:
|
if media.url and (not download or media.url.uri.startswith("data")):
|
||||||
return media.url.uri
|
return media.url.uri
|
||||||
|
|
||||||
content, format = await localize_image_content(media)
|
content, format = await localize_image_content(media)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue