forked from phoenix/litellm-mirror
refactor(proxy_server.py): move admin ui to be locally hosted within proxy
This commit is contained in:
parent
2f012a5088
commit
e5a287379a
4 changed files with 205 additions and 2 deletions
143
litellm/proxy/admin_ui.py
Normal file
143
litellm/proxy/admin_ui.py
Normal file
|
@ -0,0 +1,143 @@
|
|||
def add_new_model():
|
||||
import streamlit as st
|
||||
import json, requests, uuid
|
||||
|
||||
model_name = st.text_input(
|
||||
"Model Name - user-facing model name", placeholder="gpt-3.5-turbo"
|
||||
)
|
||||
st.subheader("LiteLLM Params")
|
||||
litellm_model_name = st.text_input(
|
||||
"Model", placeholder="azure/gpt-35-turbo-us-east"
|
||||
)
|
||||
litellm_api_key = st.text_input("API Key")
|
||||
litellm_api_base = st.text_input(
|
||||
"API Base",
|
||||
placeholder="https://my-endpoint.openai.azure.com",
|
||||
)
|
||||
litellm_api_version = st.text_input("API Version", placeholder="2023-07-01-preview")
|
||||
litellm_params = json.loads(
|
||||
st.text_area(
|
||||
"Additional Litellm Params (JSON dictionary). [See all possible inputs](https://github.com/BerriAI/litellm/blob/3f15d7230fe8e7492c95a752963e7fbdcaf7bf98/litellm/main.py#L293)",
|
||||
value={},
|
||||
)
|
||||
)
|
||||
st.subheader("Model Info")
|
||||
mode_options = ("completion", "embedding", "image generation")
|
||||
mode_selected = st.selectbox("Mode", mode_options)
|
||||
model_info = json.loads(
|
||||
st.text_area(
|
||||
"Additional Model Info (JSON dictionary)",
|
||||
value={},
|
||||
)
|
||||
)
|
||||
|
||||
if st.button("Submit"):
|
||||
try:
|
||||
model_info = {
|
||||
"model_name": model_name,
|
||||
"litellm_params": {
|
||||
"model": litellm_model_name,
|
||||
"api_key": litellm_api_key,
|
||||
"api_base": litellm_api_base,
|
||||
"api_version": litellm_api_version,
|
||||
},
|
||||
"model_info": {
|
||||
"id": str(uuid.uuid4()),
|
||||
"mode": mode_selected,
|
||||
},
|
||||
}
|
||||
# Make the POST request to the specified URL
|
||||
complete_url = ""
|
||||
if st.session_state["api_url"].endswith("/"):
|
||||
complete_url = f"{st.session_state['api_url']}model/new"
|
||||
else:
|
||||
complete_url = f"{st.session_state['api_url']}/model/new"
|
||||
|
||||
headers = {"Authorization": f"Bearer {st.session_state['proxy_key']}"}
|
||||
response = requests.post(complete_url, json=model_info, headers=headers)
|
||||
|
||||
if response.status_code == 200:
|
||||
st.success("Model added successfully!")
|
||||
else:
|
||||
st.error(f"Failed to add model. Status code: {response.status_code}")
|
||||
|
||||
st.success("Form submitted successfully!")
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
||||
def streamlit_ui():
|
||||
import streamlit as st
|
||||
import requests
|
||||
|
||||
st.header("Admin Configuration")
|
||||
|
||||
# Add a navigation sidebar
|
||||
st.sidebar.title("Navigation")
|
||||
page = st.sidebar.radio("Go to", ("Proxy Setup", "Add Models", "List Models"))
|
||||
|
||||
# Initialize session state variables if not already present
|
||||
if "api_url" not in st.session_state:
|
||||
st.session_state["api_url"] = None
|
||||
if "proxy_key" not in st.session_state:
|
||||
st.session_state["proxy_key"] = None
|
||||
|
||||
# Display different pages based on navigation selection
|
||||
if page == "Proxy Setup":
|
||||
# Use text inputs with intermediary variables
|
||||
input_api_url = st.text_input(
|
||||
"Proxy Endpoint",
|
||||
value=st.session_state.get("api_url", ""),
|
||||
placeholder="http://0.0.0.0:8000",
|
||||
)
|
||||
input_proxy_key = st.text_input(
|
||||
"Proxy Key",
|
||||
value=st.session_state.get("proxy_key", ""),
|
||||
placeholder="sk-...",
|
||||
)
|
||||
# When the "Save" button is clicked, update the session state
|
||||
if st.button("Save"):
|
||||
st.session_state["api_url"] = input_api_url
|
||||
st.session_state["proxy_key"] = input_proxy_key
|
||||
st.success("Configuration saved!")
|
||||
elif page == "Add Models":
|
||||
add_new_model()
|
||||
elif page == "List Models":
|
||||
# Check if the necessary configuration is available
|
||||
if (
|
||||
st.session_state.get("api_url", None) is not None
|
||||
and st.session_state.get("proxy_key", None) is not None
|
||||
):
|
||||
# Make the GET request
|
||||
try:
|
||||
complete_url = ""
|
||||
if isinstance(st.session_state["api_url"], str) and st.session_state[
|
||||
"api_url"
|
||||
].endswith("/"):
|
||||
complete_url = f"{st.session_state['api_url']}models"
|
||||
else:
|
||||
complete_url = f"{st.session_state['api_url']}/models"
|
||||
response = requests.get(
|
||||
complete_url,
|
||||
headers={
|
||||
"Authorization": f"Bearer {st.session_state['proxy_key']}"
|
||||
},
|
||||
)
|
||||
# Check if the request was successful
|
||||
if response.status_code == 200:
|
||||
models = response.json()
|
||||
st.write(models) # or st.json(models) to pretty print the JSON
|
||||
else:
|
||||
st.error(
|
||||
f"Failed to get models. Status code: {response.status_code}"
|
||||
)
|
||||
except Exception as e:
|
||||
st.error(f"An error occurred while requesting models: {e}")
|
||||
else:
|
||||
st.warning(
|
||||
"Please configure the Proxy Endpoint and Proxy Key on the Proxy Setup page."
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
streamlit_ui()
|
|
@ -100,7 +100,7 @@ from typing import Union
|
|||
app = FastAPI(
|
||||
docs_url="/",
|
||||
title="LiteLLM API",
|
||||
description="Proxy Server to call 100+ LLMs in the OpenAI format",
|
||||
description="Proxy Server to call 100+ LLMs in the OpenAI format\n\nAdmin Panel on `/admin` endpoint",
|
||||
)
|
||||
router = APIRouter()
|
||||
origins = ["*"]
|
||||
|
@ -346,6 +346,29 @@ def load_from_azure_key_vault(use_azure_key_vault: bool = False):
|
|||
)
|
||||
|
||||
|
||||
async def run_streamlit_ui():
|
||||
try:
|
||||
# Start Streamlit without opening the browser automatically
|
||||
process = subprocess.Popen(
|
||||
[
|
||||
"streamlit",
|
||||
"run",
|
||||
"admin_ui.py",
|
||||
"--server.headless=true",
|
||||
"--browser.serverAddress=0.0.0.0",
|
||||
"--server.enableCORS=false",
|
||||
]
|
||||
)
|
||||
# Wait for the server to start before exiting the context manager
|
||||
await asyncio.sleep(1)
|
||||
print("Streamlit UI server has started successfully.")
|
||||
# Keep the background task running
|
||||
while True:
|
||||
await asyncio.sleep(3600)
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
|
||||
def cost_tracking():
|
||||
global prisma_client
|
||||
if prisma_client is not None:
|
||||
|
@ -982,6 +1005,9 @@ async def startup_event():
|
|||
duration=None, models=[], aliases={}, config={}, spend=0, token=master_key
|
||||
)
|
||||
|
||||
# Run streamlit_ui as a background task
|
||||
asyncio.create_task(run_streamlit_ui())
|
||||
|
||||
|
||||
#### API ENDPOINTS ####
|
||||
@router.get(
|
||||
|
@ -1728,6 +1754,8 @@ async def add_new_model(model_params: ModelParams):
|
|||
}
|
||||
)
|
||||
|
||||
print(f"updated model list: {config['model_list']}")
|
||||
|
||||
# Save the updated config
|
||||
with open(f"{user_config_file_path}", "w") as config_file:
|
||||
yaml.dump(config, config_file, default_flow_style=False)
|
||||
|
@ -1737,6 +1765,7 @@ async def add_new_model(model_params: ModelParams):
|
|||
router=llm_router, config_file_path=user_config_file_path
|
||||
)
|
||||
|
||||
print(f"llm_model_list: {llm_model_list}")
|
||||
return {"message": "Model added successfully"}
|
||||
|
||||
except Exception as e:
|
||||
|
@ -2036,6 +2065,36 @@ async def retrieve_server_log(request: Request):
|
|||
return FileResponse(filepath)
|
||||
|
||||
|
||||
#### ADMIN UI ENDPOINTS ####
|
||||
|
||||
|
||||
@router.get("/admin")
|
||||
async def admin_page(request: Request):
|
||||
from fastapi.responses import HTMLResponse
|
||||
|
||||
# Assuming your Streamlit app is running on localhost port 8501
|
||||
html_content = """
|
||||
<html>
|
||||
<head>
|
||||
<title>Admin Page</title>
|
||||
<style>
|
||||
html, body, iframe {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
border: none;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<iframe src="http://localhost:8501"></iframe>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
|
||||
#### BASIC ENDPOINTS ####
|
||||
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ proxy = [
|
|||
"pyyaml",
|
||||
"rq",
|
||||
"orjson",
|
||||
"streamlit"
|
||||
]
|
||||
|
||||
extra_proxy = [
|
||||
|
|
|
@ -5,7 +5,7 @@ from dotenv import load_dotenv
|
|||
|
||||
load_dotenv()
|
||||
import streamlit as st
|
||||
import base64, binascii, os
|
||||
import base64, binascii, os, json
|
||||
from admin import admin_page
|
||||
from auth import auth_page, verify_with_otp
|
||||
import urllib.parse
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue