LiteLLM Minor Fixes and Improvements (09/14/2024) (#5697)

* fix(health_check.py): hide sensitive keys from health check debug information k

* fix(route_llm_request.py): fix proxy model not found error message to indicate how to resolve issue

* fix(vertex_llm_base.py): fix exception message to not log credentials
This commit is contained in:
Krish Dholakia 2024-09-14 10:32:39 -07:00 committed by GitHub
parent 60709a0753
commit dad1ad2077
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 43 additions and 14 deletions

View file

@ -1,4 +1,4 @@
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
from typing import TYPE_CHECKING, Any, List, Literal, Optional, Union
from fastapi import (
Depends,
@ -37,6 +37,14 @@ ROUTE_ENDPOINT_MAPPING = {
}
class ProxyModelNotFoundError(HTTPException):
def __init__(self, route: str, model_name: str):
detail = {
"error": f"{route}: Invalid model name passed in model={model_name}. Call `/v1/models` to view available models for your key."
}
super().__init__(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)
async def route_request(
data: dict,
llm_router: Optional[LitellmRouter],
@ -110,10 +118,7 @@ async def route_request(
# if no route found then it's a bad request
route_name = ROUTE_ENDPOINT_MAPPING.get(route_type, route_type)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={
"error": f"{route_name}: Invalid model name passed in model="
+ data.get("model", "")
},
raise ProxyModelNotFoundError(
route=route_name,
model_name=data.get("model", ""),
)