fix vllm base64

This commit is contained in:
Xi Yan 2025-01-17 16:17:15 -08:00
parent 9d005154d7
commit 6d21da6e48
2 changed files with 43 additions and 4 deletions

View file

@ -176,10 +176,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
media_present = request_has_media(request)
if isinstance(request, ChatCompletionRequest):
if media_present:
# vllm does not seem to work well with image urls, so we download the images
input_dict["messages"] = [
await convert_message_to_openai_dict(m, download=True)
for m in request.messages
await convert_message_to_openai_dict(m) for m in request.messages
]
else:
input_dict["prompt"] = await chat_completion_request_to_prompt(