(fix) proxy - health checks support cli model

This commit is contained in:
ishaan-jaff 2023-12-19 12:55:20 +05:30
parent b82fcd51d7
commit ce1b0b89ba
2 changed files with 15 additions and 3 deletions

View file

@ -96,7 +96,7 @@ async def _perform_health_check(model_list: list):
async def perform_health_check(model_list: list, model: Optional[str] = None):
async def perform_health_check(model_list: list, model: Optional[str] = None, cli_model: Optional[str] = None):
"""
Perform a health check on the system.
@ -104,7 +104,10 @@ async def perform_health_check(model_list: list, model: Optional[str] = None):
(bool): True if the health check passes, False otherwise.
"""
if not model_list:
return [], []
if cli_model:
model_list = [{"model_name": cli_model, "litellm_params": {"model": cli_model}}]
else:
return [], []
if model is not None:
model_list = [x for x in model_list if x["litellm_params"]["model"] == model]

View file

@ -1514,9 +1514,18 @@ async def health_endpoint(request: Request, model: Optional[str] = fastapi.Query
```
else, the health checks will be run on models when /health is called.
"""
global health_check_results, use_background_health_checks
global health_check_results, use_background_health_checks, user_model
if llm_model_list is None:
# if no router set, check if user set a model using litellm --model ollama/llama2
if user_model is not None:
healthy_endpoints, unhealthy_endpoints = await perform_health_check(model_list=[], cli_model=user_model)
return {
"healthy_endpoints": healthy_endpoints,
"unhealthy_endpoints": unhealthy_endpoints,
"healthy_count": len(healthy_endpoints),
"unhealthy_count": len(unhealthy_endpoints),
}
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail={"error": "Model list not initialized"},