review: add warning when not checking mode availability for vllm

This commit is contained in:
Akram Ben Aissi 2025-10-08 23:11:25 +02:00
parent 0ecfdc6dff
commit fbd4ae8dae

View file

@ -88,6 +88,7 @@ class VLLMInferenceAdapter(OpenAIMixin):
return True
model_ids.append(m.id)
raise ValueError(f"Model '{model}' not found. Available models: {model_ids}")
log.warning(f"Not checking model availability for {model} as API token may trigger OAuth workflow")
return True
async def openai_chat_completion(