mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-29 03:14:19 +00:00
update the code with aysnc iterator as suggested by Ben
This commit is contained in:
parent
b413c7562b
commit
3840ef7a98
3 changed files with 22 additions and 7 deletions
|
@ -313,10 +313,10 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
"""
|
||||
try:
|
||||
client = self._create_client() if self.client is None else self.client
|
||||
client.models.list() # Ensure the client is initialized
|
||||
_ = [m async for m in client.models.list()] # Ensure the client is initialized
|
||||
return HealthResponse(status=HealthStatus.OK)
|
||||
except Exception as ex:
|
||||
return HealthResponse(status=HealthStatus.ERROR, message=f"Health check failed: {str(ex)}")
|
||||
except Exception as e:
|
||||
return HealthResponse(status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}")
|
||||
|
||||
async def _get_model(self, model_id: str) -> Model:
|
||||
if not self.model_store:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue