mirror of
https://github.com/meta-llama/llama-stack.git
synced 2026-01-01 19:40:01 +00:00
fix vllm base64
This commit is contained in:
parent
9d005154d7
commit
6d21da6e48
2 changed files with 43 additions and 4 deletions
|
|
@ -176,10 +176,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
media_present = request_has_media(request)
|
||||
if isinstance(request, ChatCompletionRequest):
|
||||
if media_present:
|
||||
# vllm does not seem to work well with image urls, so we download the images
|
||||
input_dict["messages"] = [
|
||||
await convert_message_to_openai_dict(m, download=True)
|
||||
for m in request.messages
|
||||
await convert_message_to_openai_dict(m) for m in request.messages
|
||||
]
|
||||
else:
|
||||
input_dict["prompt"] = await chat_completion_request_to_prompt(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue