forked from phoenix/litellm-mirror
(linting) fix
This commit is contained in:
parent
73eb5a7718
commit
455c915b70
1 changed files with 4 additions and 4 deletions
|
@ -99,6 +99,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
||||||
from fastapi.security.api_key import APIKeyHeader
|
from fastapi.security.api_key import APIKeyHeader
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Union
|
||||||
# from litellm.proxy.queue import start_rq_worker_in_background
|
# from litellm.proxy.queue import start_rq_worker_in_background
|
||||||
|
|
||||||
app = FastAPI(docs_url="/", title="LiteLLM API")
|
app = FastAPI(docs_url="/", title="LiteLLM API")
|
||||||
|
@ -115,9 +116,9 @@ app.add_middleware(
|
||||||
|
|
||||||
|
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
from pydantic import BaseModel, Extra
|
from pydantic import BaseModel
|
||||||
######### Request Class Definition ######
|
######### Request Class Definition ######
|
||||||
class ChatCompletionRequest(BaseModel):
|
class ProxyChatCompletionRequest(BaseModel):
|
||||||
model: str
|
model: str
|
||||||
messages: List[Dict[str, str]]
|
messages: List[Dict[str, str]]
|
||||||
temperature: Optional[float] = None
|
temperature: Optional[float] = None
|
||||||
|
@ -151,7 +152,6 @@ class ChatCompletionRequest(BaseModel):
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
extra='allow' # allow params not defined here, these fall in litellm.completion(**kwargs)
|
extra='allow' # allow params not defined here, these fall in litellm.completion(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
user_api_base = None
|
user_api_base = None
|
||||||
user_model = None
|
user_model = None
|
||||||
|
@ -752,7 +752,7 @@ async def completion(request: Request, model: Optional[str] = None, user_api_key
|
||||||
@router.post("/v1/chat/completions", dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"])
|
@router.post("/v1/chat/completions", dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"])
|
||||||
@router.post("/chat/completions", dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"])
|
@router.post("/chat/completions", dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"])
|
||||||
@router.post("/openai/deployments/{model:path}/chat/completions", dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"]) # azure compatible endpoint
|
@router.post("/openai/deployments/{model:path}/chat/completions", dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"]) # azure compatible endpoint
|
||||||
async def chat_completion(request: ChatCompletionRequest, model: Optional[str] = None, user_api_key_dict: dict = Depends(user_api_key_auth)) -> litellm.ModelResponse:
|
async def chat_completion(request: ProxyChatCompletionRequest, model: Optional[str] = None, user_api_key_dict: dict = Depends(user_api_key_auth)) -> Union[litellm.ModelResponse, StreamingResponse]:
|
||||||
global general_settings, user_debug
|
global general_settings, user_debug
|
||||||
try:
|
try:
|
||||||
data = {}
|
data = {}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue