litellm-mirror/litellm/proxy/proxy_load_test/simple_litellm_proxy.py
Ishaan Jaff 62a1cdec47 (code quality) run ruff rule to ban unused imports (#7313)
* remove unused imports

* fix AmazonConverseConfig

* fix test

* fix import

* ruff check fixes

* test fixes

* fix testing

* fix imports
2024-12-19 12:33:42 -08:00

51 lines
1.1 KiB
Python

# import sys, os
# sys.path.insert(
# 0, os.path.abspath("../")
# ) # Adds the parent directory to the system path
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from openai import AsyncOpenAI
import litellm
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
litellm_client = AsyncOpenAI(
base_url="https://exampleopenaiendpoint-production.up.railway.app/",
api_key="sk-1234",
)
# for completion
@app.post("/chat/completions")
@app.post("/v1/chat/completions")
async def completion(request: Request):
# this proxy uses the OpenAI SDK to call a fixed endpoint
response = await litellm.acompletion(
model="openai/anything",
messages=[
{
"role": "user",
"content": "hello who are you",
}
],
client=litellm_client,
)
return response
if __name__ == "__main__":
import uvicorn
# run this on 8090, 8091, 8092 and 8093
uvicorn.run(app, host="0.0.0.0", port=8090)