mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
adding support for completions endpoint in proxy
This commit is contained in:
parent
b08aea851f
commit
c737462beb
5 changed files with 42 additions and 4 deletions
Binary file not shown.
|
@ -1431,7 +1431,25 @@ def text_completion(*args, **kwargs):
|
||||||
messages = [{"role": "system", "content": kwargs["prompt"]}]
|
messages = [{"role": "system", "content": kwargs["prompt"]}]
|
||||||
kwargs["messages"] = messages
|
kwargs["messages"] = messages
|
||||||
kwargs.pop("prompt")
|
kwargs.pop("prompt")
|
||||||
return completion(*args, **kwargs)
|
response = completion(*args, **kwargs) # assume the response is the openai response object
|
||||||
|
response_2 = {
|
||||||
|
"id": response["id"],
|
||||||
|
"object": "text_completion",
|
||||||
|
"created": response["created"],
|
||||||
|
"model": response["model"],
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"text": response["choices"][0]["message"]["content"],
|
||||||
|
"index": response["choices"][0]["index"],
|
||||||
|
"logprobs": None,
|
||||||
|
"finish_reason": response["choices"][0]["finish_reason"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"usage": response["usage"]
|
||||||
|
}
|
||||||
|
return response_2
|
||||||
|
else:
|
||||||
|
raise ValueError("please pass prompt into the `text_completion` endpoint - `text_completion(model, prompt='hello world')`")
|
||||||
|
|
||||||
##### Moderation #######################
|
##### Moderation #######################
|
||||||
def moderation(input: str, api_key: Optional[str]=None):
|
def moderation(input: str, api_key: Optional[str]=None):
|
||||||
|
|
|
@ -7,7 +7,8 @@ load_dotenv()
|
||||||
@click.option('--api_base', default=None, help='API base URL.')
|
@click.option('--api_base', default=None, help='API base URL.')
|
||||||
@click.option('--model', required=True, help='The model name to pass to litellm expects')
|
@click.option('--model', required=True, help='The model name to pass to litellm expects')
|
||||||
def run_server(port, api_base, model):
|
def run_server(port, api_base, model):
|
||||||
from .proxy_server import app, initialize
|
# from .proxy_server import app, initialize
|
||||||
|
from proxy_server import app, initialize
|
||||||
initialize(model, api_base)
|
initialize(model, api_base)
|
||||||
try:
|
try:
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
|
import sys, os
|
||||||
|
sys.path.insert(
|
||||||
|
0, os.path.abspath("../..")
|
||||||
|
) # Adds the parent directory to the system path
|
||||||
|
|
||||||
import litellm
|
import litellm
|
||||||
|
print(litellm.__file__)
|
||||||
from fastapi import FastAPI, Request
|
from fastapi import FastAPI, Request
|
||||||
from fastapi.responses import StreamingResponse
|
from fastapi.responses import StreamingResponse
|
||||||
import json
|
import json
|
||||||
|
@ -25,8 +31,21 @@ def model_list():
|
||||||
object="list",
|
object="list",
|
||||||
)
|
)
|
||||||
|
|
||||||
@app.post("/chat/completions")
|
@app.post("/completions")
|
||||||
async def completion(request: Request):
|
async def completion(request: Request):
|
||||||
|
data = await request.json()
|
||||||
|
if (user_model is None):
|
||||||
|
raise ValueError("Proxy model needs to be set")
|
||||||
|
data["model"] = user_model
|
||||||
|
if user_api_base:
|
||||||
|
data["api_base"] = user_api_base
|
||||||
|
response = litellm.text_completion(**data)
|
||||||
|
if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
|
||||||
|
return StreamingResponse(data_generator(response), media_type='text/event-stream')
|
||||||
|
return response
|
||||||
|
|
||||||
|
@app.post("/chat/completions")
|
||||||
|
async def chat_completion(request: Request):
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
if (user_model is None):
|
if (user_model is None):
|
||||||
raise ValueError("Proxy model needs to be set")
|
raise ValueError("Proxy model needs to be set")
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.789"
|
version = "0.1.790"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue