From 6f77ca17554959721267cdd436b37b500c55603e Mon Sep 17 00:00:00 2001 From: Ilya Kolchinsky <58424190+ilya-kolchinsky@users.noreply.github.com> Date: Thu, 17 Apr 2025 11:35:41 +0200 Subject: [PATCH] Update llama_stack/providers/remote/inference/vllm/vllm.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Sébastien Han --- llama_stack/providers/remote/inference/vllm/vllm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index c177a438c..8cfef2ee0 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -248,7 +248,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): if self.client is not None: return - log.info(f"Initializing VLLM client with base_url={self.config.url}") + log.info(f"Initializing vLLM client with base_url={self.config.url}") self.client = self._create_client() def _create_client(self):