mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(fix) proxy_server linting errors
This commit is contained in:
parent
a1c9c80031
commit
c3101967b6
1 changed files with 2 additions and 2 deletions
|
@ -381,12 +381,12 @@ async def chat_completion(request: Request):
|
||||||
|
|
||||||
# V1 Endpoints - some apps expect a v1 endpoint - these call the regular function
|
# V1 Endpoints - some apps expect a v1 endpoint - these call the regular function
|
||||||
@router.post("/v1/completions")
|
@router.post("/v1/completions")
|
||||||
async def completion(request: Request):
|
async def v1_completion(request: Request):
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
return litellm_completion(data=data, type="completion")
|
return litellm_completion(data=data, type="completion")
|
||||||
|
|
||||||
@router.post("/v1/chat/completions")
|
@router.post("/v1/chat/completions")
|
||||||
async def chat_completion(request: Request):
|
async def v1_chat_completion(request: Request):
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
print_verbose(f"data passed in: {data}")
|
print_verbose(f"data passed in: {data}")
|
||||||
response = litellm_completion(data, type="chat_completion")
|
response = litellm_completion(data, type="chat_completion")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue