From 3537a1401fb5cdb694438b0402781de84c9a31ae Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Thu, 2 Jan 2025 18:29:21 -0500 Subject: [PATCH] Fix assert message and call to completion_request_to_prompt in remote:vllm Signed-off-by: Yuan Tang --- llama_stack/providers/remote/inference/vllm/vllm.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index f62ccaa58..9f9072922 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -193,10 +193,9 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): else: assert ( not media_present - ), "Together does not support media for Completion requests" + ), "vLLM does not support media for Completion requests" input_dict["prompt"] = await completion_request_to_prompt( request, - self.register_helper.get_llama_model(request.model), self.formatter, )