mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
* remove unused imports * fix AmazonConverseConfig * fix test * fix import * ruff check fixes * test fixes * fix testing * fix imports
48 lines
1.1 KiB
Python
48 lines
1.1 KiB
Python
# import sys, os
|
|
# sys.path.insert(
|
|
# 0, os.path.abspath("../")
|
|
# ) # Adds the parent directory to the system path
|
|
from fastapi import FastAPI, Request
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
from openai import AsyncOpenAI
|
|
|
|
app = FastAPI()
|
|
|
|
app.add_middleware(
|
|
CORSMiddleware,
|
|
allow_origins=["*"],
|
|
allow_credentials=True,
|
|
allow_methods=["*"],
|
|
allow_headers=["*"],
|
|
)
|
|
|
|
litellm_client = AsyncOpenAI(
|
|
base_url="https://exampleopenaiendpoint-production.up.railway.app/",
|
|
api_key="sk-1234",
|
|
)
|
|
|
|
|
|
# for completion
|
|
@app.post("/chat/completions")
|
|
@app.post("/v1/chat/completions")
|
|
async def completion(request: Request):
|
|
# this proxy uses the OpenAI SDK to call a fixed endpoint
|
|
|
|
response = await litellm_client.chat.completions.create(
|
|
model="anything",
|
|
messages=[
|
|
{
|
|
"role": "user",
|
|
"content": "hello who are you",
|
|
}
|
|
],
|
|
)
|
|
|
|
return response
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
|
|
# run this on 8090, 8091, 8092 and 8093
|
|
uvicorn.run(app, host="0.0.0.0", port=8090)
|