forked from phoenix/litellm-mirror
fix the litellm init issue
This commit is contained in:
parent
ad5dff66a9
commit
0acde1c72d
8 changed files with 54 additions and 59 deletions
BIN
dist/litellm-0.1.7701-py3-none-any.whl
vendored
Normal file
BIN
dist/litellm-0.1.7701-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.7701.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.7701.tar.gz
vendored
Normal file
Binary file not shown.
|
@ -322,4 +322,4 @@ from .exceptions import (
|
|||
|
||||
)
|
||||
from .budget_manager import BudgetManager
|
||||
from .proxy import run_server
|
||||
from .proxy_server.proxy_cli import run_server
|
Binary file not shown.
|
@ -1,57 +0,0 @@
|
|||
import litellm
|
||||
import click, json
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
try:
|
||||
from fastapi import FastAPI, Request, status, HTTPException, Depends
|
||||
from fastapi.responses import StreamingResponse
|
||||
except:
|
||||
raise ImportError("FastAPI needs to be imported. Run - `pip install fastapi`")
|
||||
|
||||
try:
|
||||
import uvicorn
|
||||
except:
|
||||
raise ImportError("Uvicorn needs to be imported. Run - `pip install uvicorn`")
|
||||
|
||||
app = FastAPI()
|
||||
user_api_base = None
|
||||
user_model = None
|
||||
|
||||
|
||||
# for streaming
|
||||
def data_generator(response):
|
||||
for chunk in response:
|
||||
yield f"data: {json.dumps(chunk)}\n\n"
|
||||
|
||||
@app.get("/models") # if project requires model list
|
||||
def model_list():
|
||||
return dict(
|
||||
data=[
|
||||
{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}
|
||||
],
|
||||
object="list",
|
||||
)
|
||||
|
||||
@app.post("/chat/completions")
|
||||
async def completion(request: Request):
|
||||
data = await request.json()
|
||||
if (user_model is None):
|
||||
raise ValueError("Proxy model needs to be set")
|
||||
data["model"] = user_model
|
||||
if user_api_base:
|
||||
data["api_base"] = user_api_base
|
||||
response = litellm.completion(**data)
|
||||
if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
|
||||
return StreamingResponse(data_generator(response), media_type='text/event-stream')
|
||||
return response
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--port', default=8000, help='Port to bind the server to.')
|
||||
@click.option('--api_base',default=None, help='API base URL.')
|
||||
@click.option('--model', required=True, help='The model name to pass to litellm expects')
|
||||
def run_server(port, api_base, model):
|
||||
global user_api_base, user_model
|
||||
user_api_base = api_base
|
||||
user_model = model
|
||||
uvicorn.run(app, host='0.0.0.0', port=port)
|
20
litellm/proxy_server/proxy_cli.py
Normal file
20
litellm/proxy_server/proxy_cli.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
import click
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
@click.command()
|
||||
@click.option('--port', default=8000, help='Port to bind the server to.')
|
||||
@click.option('--api_base', default=None, help='API base URL.')
|
||||
@click.option('--model', required=True, help='The model name to pass to litellm expects')
|
||||
def run_server(port, api_base, model):
|
||||
from proxy_server import app, initialize
|
||||
initialize(model, api_base)
|
||||
try:
|
||||
import uvicorn
|
||||
except:
|
||||
raise ImportError("Uvicorn needs to be imported. Run - `pip install uvicorn`")
|
||||
uvicorn.run(app, host='0.0.0.0', port=port)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_server()
|
32
litellm/proxy_server/proxy_server.py
Normal file
32
litellm/proxy_server/proxy_server.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
import json
|
||||
|
||||
app = FastAPI()
|
||||
user_api_base = None
|
||||
user_model = None
|
||||
|
||||
def initialize(model, api_base):
|
||||
global user_model, user_api_base
|
||||
user_model = model
|
||||
user_api_base = api_base
|
||||
|
||||
@app.get("/models") # if project requires model list
|
||||
def model_list():
|
||||
return dict(
|
||||
data=[{"id": user_model, "object": "model", "created": 1677610602, "owned_by": "openai"}],
|
||||
object="list",
|
||||
)
|
||||
|
||||
@app.post("/chat/completions")
|
||||
async def completion(request: Request):
|
||||
data = await request.json()
|
||||
if (user_model is None):
|
||||
raise ValueError("Proxy model needs to be set")
|
||||
data["model"] = user_model
|
||||
if user_api_base:
|
||||
data["api_base"] = user_api_base
|
||||
response = litellm.completion(**data)
|
||||
if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
|
||||
return StreamingResponse(data_generator(response), media_type='text/event-stream')
|
||||
return response
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.769"
|
||||
version = "0.1.771"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue