From 3ea30c0a9c002584d3f61986598f5a537b16c641 Mon Sep 17 00:00:00 2001 From: Matthew Farrellee Date: Tue, 23 Sep 2025 10:33:55 -0400 Subject: [PATCH] fix(dev): fix vllm inference recording (await models.list) --- llama_stack/providers/remote/inference/vllm/vllm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index b4079c39f..15f807846 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -504,7 +504,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro except ValueError: pass # Ignore statically unknown model, will check live listing try: - res = await self.client.models.list() + res = self.client.models.list() except APIConnectionError as e: raise ValueError( f"Failed to connect to vLLM at {self.config.url}. Please check if vLLM is running and accessible at that URL."