LiteLLM Minor Fixes and Improvements (09/14/2024) (#5697)

* fix(health_check.py): hide sensitive keys from health check debug information k

* fix(route_llm_request.py): fix proxy model not found error message to indicate how to resolve issue

* fix(vertex_llm_base.py): fix exception message to not log credentials
This commit is contained in:
Krish Dholakia 2024-09-14 10:32:39 -07:00 committed by GitHub
parent 60709a0753
commit dad1ad2077
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 43 additions and 14 deletions

View file

@ -255,9 +255,15 @@ class VertexBase(BaseLLM):
return self.access_token, self.project_id
if not self._credentials:
try:
self._credentials, cred_project_id = await asyncify(self.load_auth)(
credentials=credentials, project_id=project_id
)
except Exception:
verbose_logger.exception(
"Failed to load vertex credentials. Check to see if credentials containing partial/invalid information."
)
raise
if not self.project_id:
self.project_id = project_id or cred_project_id
else:

View file

@ -29,3 +29,15 @@ print(response)
```
[**See how to call Huggingface,Bedrock,TogetherAI,Anthropic, etc.**](https://docs.litellm.ai/docs/simple_proxy)
---
### Folder Structure
**Routes**
- `proxy_server.py` - all openai-compatible routes - `/v1/chat/completion`, `/v1/embedding` + model info routes - `/v1/models`, `/v1/model/info`, `/v1/model_group_info` routes.
- `health_endpoints/` - `/health`, `/health/liveliness`, `/health/readiness`
- `management_endpoints/key_management_endpoints.py` - all `/key/*` routes
- `management_endpoints/team_endpoints.py` - all `/team/*` routes
- `management_endpoints/internal_user_endpoints.py` - all `/user/*` routes

View file

@ -11,7 +11,15 @@ from litellm._logging import print_verbose
logger = logging.getLogger(__name__)
ILLEGAL_DISPLAY_PARAMS = ["messages", "api_key", "prompt", "input"]
ILLEGAL_DISPLAY_PARAMS = [
"messages",
"api_key",
"prompt",
"input",
"vertex_credentials",
"aws_access_key_id",
"aws_secret_access_key",
]
MINIMAL_DISPLAY_PARAMS = ["model", "mode_error"]

View file

@ -608,9 +608,7 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
global_max_parallel_requests = _metadata.get(
"global_max_parallel_requests", None
)
user_api_key = (
kwargs["litellm_params"].get("metadata", {}).get("user_api_key", None)
)
user_api_key = _metadata.get("user_api_key", None)
self.print_verbose(f"user_api_key: {user_api_key}")
if user_api_key is None:
return

View file

@ -1,4 +1,4 @@
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
from typing import TYPE_CHECKING, Any, List, Literal, Optional, Union
from fastapi import (
Depends,
@ -37,6 +37,14 @@ ROUTE_ENDPOINT_MAPPING = {
}
class ProxyModelNotFoundError(HTTPException):
def __init__(self, route: str, model_name: str):
detail = {
"error": f"{route}: Invalid model name passed in model={model_name}. Call `/v1/models` to view available models for your key."
}
super().__init__(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)
async def route_request(
data: dict,
llm_router: Optional[LitellmRouter],
@ -110,10 +118,7 @@ async def route_request(
# if no route found then it's a bad request
route_name = ROUTE_ENDPOINT_MAPPING.get(route_type, route_type)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={
"error": f"{route_name}: Invalid model name passed in model="
+ data.get("model", "")
},
raise ProxyModelNotFoundError(
route=route_name,
model_name=data.get("model", ""),
)