diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 603d5b6d2..b5a0f0e91 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -509,8 +509,8 @@ def model_list(): @router.post("/engines/{model:path}/completions") async def completion(request: Request): body = await request.body() - body = body.decode() - data = ast.literal_eval(body) + body_str = body.decode() + data = ast.literal_eval(body_str) return litellm_completion(data=data, type="completion", user_model=user_model, user_temperature=user_temperature, user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers, user_debug=user_debug, model_router=model_router, user_request_timeout=user_request_timeout) @@ -519,9 +519,9 @@ async def completion(request: Request): @router.post("/v1/chat/completions") @router.post("/chat/completions") async def chat_completion(request: Request): - body = body.decode() - data = ast.literal_eval(body) - print_verbose(f"data passed in: {data}") + body = await request.body() + body_str = body.decode() + data = ast.literal_eval(body_str) return litellm_completion(data, type="chat_completion", user_model=user_model, user_temperature=user_temperature, user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers, user_debug=user_debug, model_router=model_router, user_request_timeout=user_request_timeout)