show all error types on swagger

This commit is contained in:
Ishaan Jaff 2024-08-29 18:50:41 -07:00
parent ef47b2bc87
commit ad88c7d0a8
2 changed files with 50 additions and 0 deletions

View file

@ -0,0 +1,48 @@
from typing import Any, Dict
from pydantic import BaseModel, Field
from litellm.exceptions import LITELLM_EXCEPTION_TYPES
class ErrorResponse(BaseModel):
detail: Dict[str, Any] = Field(
...,
example={ # type: ignore
"error": {
"message": "Error message",
"type": "error_type",
"param": "error_param",
"code": "error_code",
}
},
)
# Define a function to get the status code
def get_status_code(exception):
if hasattr(exception, "status_code"):
return exception.status_code
# Default status codes for exceptions without a status_code attribute
if exception.__name__ == "Timeout":
return 408 # Request Timeout
if exception.__name__ == "APIConnectionError":
return 503 # Service Unavailable
return 500 # Internal Server Error as default
# Create error responses
ERROR_RESPONSES = {
get_status_code(exception): {
"model": ErrorResponse,
"description": exception.__doc__ or exception.__name__,
}
for exception in LITELLM_EXCEPTION_TYPES
}
# Ensure we have a 500 error response
if 500 not in ERROR_RESPONSES:
ERROR_RESPONSES[500] = {
"model": ErrorResponse,
"description": "Internal Server Error",
}

View file

@ -167,6 +167,7 @@ from litellm.proxy.common_utils.load_config_utils import get_file_contents_from_
from litellm.proxy.common_utils.openai_endpoint_utils import ( from litellm.proxy.common_utils.openai_endpoint_utils import (
remove_sensitive_info_from_deployment, remove_sensitive_info_from_deployment,
) )
from litellm.proxy.common_utils.swagger_utils import ERROR_RESPONSES
from litellm.proxy.fine_tuning_endpoints.endpoints import router as fine_tuning_router from litellm.proxy.fine_tuning_endpoints.endpoints import router as fine_tuning_router
from litellm.proxy.fine_tuning_endpoints.endpoints import set_fine_tuning_config from litellm.proxy.fine_tuning_endpoints.endpoints import set_fine_tuning_config
from litellm.proxy.guardrails.init_guardrails import ( from litellm.proxy.guardrails.init_guardrails import (
@ -3036,6 +3037,7 @@ def model_list(
"/openai/deployments/{model:path}/chat/completions", "/openai/deployments/{model:path}/chat/completions",
dependencies=[Depends(user_api_key_auth)], dependencies=[Depends(user_api_key_auth)],
tags=["chat/completions"], tags=["chat/completions"],
responses={200: {"description": "Successful response"}, **ERROR_RESPONSES},
) # azure compatible endpoint ) # azure compatible endpoint
@backoff.on_exception( @backoff.on_exception(
backoff.expo, backoff.expo,