From fbd4ae8dae672c7a0746b52ecea3a601d38106ee Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Wed, 8 Oct 2025 23:11:25 +0200 Subject: [PATCH] review: add warning when not checking mode availability for vllm --- llama_stack/providers/remote/inference/vllm/vllm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index bfa1d6031..5974ca176 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -88,6 +88,7 @@ class VLLMInferenceAdapter(OpenAIMixin): return True model_ids.append(m.id) raise ValueError(f"Model '{model}' not found. Available models: {model_ids}") + log.warning(f"Not checking model availability for {model} as API token may trigger OAuth workflow") return True async def openai_chat_completion(