mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(proxy_server.py): accept single quote json body
This commit is contained in:
parent
a12d0508c4
commit
cb20554c79
1 changed files with 6 additions and 3 deletions
|
@ -1,5 +1,5 @@
|
||||||
import sys, os, platform, time, copy
|
import sys, os, platform, time, copy
|
||||||
import threading
|
import threading, ast
|
||||||
import shutil, random, traceback, requests
|
import shutil, random, traceback, requests
|
||||||
|
|
||||||
messages: list = []
|
messages: list = []
|
||||||
|
@ -508,7 +508,9 @@ def model_list():
|
||||||
@router.post("/completions")
|
@router.post("/completions")
|
||||||
@router.post("/engines/{model:path}/completions")
|
@router.post("/engines/{model:path}/completions")
|
||||||
async def completion(request: Request):
|
async def completion(request: Request):
|
||||||
data = await request.json()
|
body = await request.body()
|
||||||
|
body = body.decode()
|
||||||
|
data = ast.literal_eval(body)
|
||||||
return litellm_completion(data=data, type="completion", user_model=user_model, user_temperature=user_temperature,
|
return litellm_completion(data=data, type="completion", user_model=user_model, user_temperature=user_temperature,
|
||||||
user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers,
|
user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers,
|
||||||
user_debug=user_debug, model_router=model_router, user_request_timeout=user_request_timeout)
|
user_debug=user_debug, model_router=model_router, user_request_timeout=user_request_timeout)
|
||||||
|
@ -517,7 +519,8 @@ async def completion(request: Request):
|
||||||
@router.post("/v1/chat/completions")
|
@router.post("/v1/chat/completions")
|
||||||
@router.post("/chat/completions")
|
@router.post("/chat/completions")
|
||||||
async def chat_completion(request: Request):
|
async def chat_completion(request: Request):
|
||||||
data = await request.json()
|
body = body.decode()
|
||||||
|
data = ast.literal_eval(body)
|
||||||
print_verbose(f"data passed in: {data}")
|
print_verbose(f"data passed in: {data}")
|
||||||
return litellm_completion(data, type="chat_completion", user_model=user_model,
|
return litellm_completion(data, type="chat_completion", user_model=user_model,
|
||||||
user_temperature=user_temperature, user_max_tokens=user_max_tokens,
|
user_temperature=user_temperature, user_max_tokens=user_max_tokens,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue