litellm-mirror/litellm/proxy_server/proxy_server.py
2023-09-26 15:15:46 -07:00

32 lines
No EOL
1 KiB
Python

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
import json
app = FastAPI()
user_api_base = None
user_model = None
def initialize(model, api_base):
global user_model, user_api_base
user_model = model
user_api_base = api_base
@app.get("/models") # if project requires model list
def model_list():
return dict(
data=[{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}],
object="list",
)
@app.post("/chat/completions")
async def completion(request: Request):
data = await request.json()
if (user_model is None):
raise ValueError("Proxy model needs to be set")
data["model"] = user_model
if user_api_base:
data["api_base"] = user_api_base
response = litellm.completion(**data)
if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
return StreamingResponse(data_generator(response), media_type='text/event-stream')
return response