From b413c7562b2d0101edaf5dfbe933d2e3667af29f Mon Sep 17 00:00:00 2001 From: Sumit Jaiswal Date: Mon, 2 Jun 2025 12:45:17 +0530 Subject: [PATCH] fix review cosmetic comment --- llama_stack/providers/remote/inference/vllm/vllm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index 9e084ea30..b703c07fc 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -304,10 +304,11 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate): async def health(self) -> HealthResponse: """ - Performs a health check by verifying connectivity to the remote VLLM server. - This method is used by initialize() and the Provider API to verify + Performs a health check by verifying connectivity to the remote vLLM server. + This method is used by the Provider API to verify that the service is running correctly. Returns: + HealthResponse: A dictionary containing the health status. """ try: