diff --git a/openai-proxy/main.py b/openai-proxy/main.py index a74a2b60d..9bba0d96b 100644 --- a/openai-proxy/main.py +++ b/openai-proxy/main.py @@ -5,6 +5,9 @@ from fastapi.responses import StreamingResponse, FileResponse from fastapi.middleware.cors import CORSMiddleware import json import os +from utils import set_callbacks +import dotenv +dotenv.load_dotenv() # load env variables app = FastAPI(docs_url="/", title="LiteLLM API") router = APIRouter() @@ -17,9 +20,7 @@ app.add_middleware( allow_methods=["*"], allow_headers=["*"], ) - -if ("LANGUFSE_PUBLIC_KEY" in os.environ and "LANGUFSE_SECRET_KEY" in os.environ) or "LANGFUSE_HOST" in os.environ: - litellm.success_callback = ["langfuse"] +set_callbacks() # sets litellm callbacks for logging if they exist in the environment #### API ENDPOINTS #### @router.post("/v1/models") diff --git a/openai-proxy/requirements.txt b/openai-proxy/requirements.txt index b7d6e99bf..8dda011eb 100644 --- a/openai-proxy/requirements.txt +++ b/openai-proxy/requirements.txt @@ -2,4 +2,5 @@ openai fastapi uvicorn boto3 -litellm \ No newline at end of file +litellm +dotenv \ No newline at end of file diff --git a/openai-proxy/utils.py b/openai-proxy/utils.py new file mode 100644 index 000000000..3a469a416 --- /dev/null +++ b/openai-proxy/utils.py @@ -0,0 +1,7 @@ +import os, litellm +import dotenv +dotenv.load_dotenv() # load env variables + +def set_callbacks(): + if ("LANGUFSE_PUBLIC_KEY" in os.environ and "LANGUFSE_SECRET_KEY" in os.environ) or "LANGFUSE_HOST" in os.environ: + litellm.success_callback = ["langfuse"]