mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-28 04:04:31 +00:00
(feat) proxy - introduce OpenAIException class
This commit is contained in:
parent
ba4253ee27
commit
04db5b7bbf
1 changed files with 46 additions and 6 deletions
|
@ -94,7 +94,12 @@ from fastapi import (
|
||||||
from fastapi.routing import APIRouter
|
from fastapi.routing import APIRouter
|
||||||
from fastapi.security import OAuth2PasswordBearer
|
from fastapi.security import OAuth2PasswordBearer
|
||||||
from fastapi.encoders import jsonable_encoder
|
from fastapi.encoders import jsonable_encoder
|
||||||
from fastapi.responses import StreamingResponse, FileResponse, ORJSONResponse
|
from fastapi.responses import (
|
||||||
|
StreamingResponse,
|
||||||
|
FileResponse,
|
||||||
|
ORJSONResponse,
|
||||||
|
JSONResponse,
|
||||||
|
)
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from fastapi.security.api_key import APIKeyHeader
|
from fastapi.security.api_key import APIKeyHeader
|
||||||
import json
|
import json
|
||||||
|
@ -106,6 +111,39 @@ app = FastAPI(
|
||||||
title="LiteLLM API",
|
title="LiteLLM API",
|
||||||
description="Proxy Server to call 100+ LLMs in the OpenAI format\n\nAdmin Panel on [https://dashboard.litellm.ai/admin](https://dashboard.litellm.ai/admin)",
|
description="Proxy Server to call 100+ LLMs in the OpenAI format\n\nAdmin Panel on [https://dashboard.litellm.ai/admin](https://dashboard.litellm.ai/admin)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAIException(Exception):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
message: str,
|
||||||
|
type: str,
|
||||||
|
param: Optional[str],
|
||||||
|
code: Optional[int],
|
||||||
|
):
|
||||||
|
self.message = message
|
||||||
|
self.type = type
|
||||||
|
self.param = param
|
||||||
|
self.code = code
|
||||||
|
|
||||||
|
|
||||||
|
@app.exception_handler(OpenAIException)
|
||||||
|
async def openai_exception_handler(request: Request, exc: OpenAIException):
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=int(exc.code)
|
||||||
|
if exc.code
|
||||||
|
else status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||||
|
content={
|
||||||
|
"error": {
|
||||||
|
"message": exc.message,
|
||||||
|
"type": exc.type,
|
||||||
|
"param": exc.param,
|
||||||
|
"code": exc.code,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
origins = ["*"]
|
origins = ["*"]
|
||||||
|
|
||||||
|
@ -1611,11 +1649,13 @@ async def chat_completion(
|
||||||
else:
|
else:
|
||||||
error_traceback = traceback.format_exc()
|
error_traceback = traceback.format_exc()
|
||||||
error_msg = f"{str(e)}\n\n{error_traceback}"
|
error_msg = f"{str(e)}\n\n{error_traceback}"
|
||||||
try:
|
|
||||||
status = e.status_code # type: ignore
|
raise OpenAIException(
|
||||||
except:
|
message=getattr(e, "message", error_msg),
|
||||||
status = 500
|
type=getattr(e, "type", "None"),
|
||||||
raise HTTPException(status_code=status, detail=error_msg)
|
param=getattr(e, "param", "None"),
|
||||||
|
code=getattr(e, "status_code", 500),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
@router.post(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue