diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 93bd8713c..5410b8ef6 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -374,7 +374,7 @@ async def run_streamlit_ui(): while True: await asyncio.sleep(3600) except Exception as e: - print(f"An error occurred: {e}") + print_verbose(f"Admin UI - Streamlit. An error occurred: {e}") def cost_tracking(): @@ -1013,9 +1013,6 @@ async def startup_event(): duration=None, models=[], aliases={}, config={}, spend=0, token=master_key ) - # Run streamlit_ui as a background task - asyncio.create_task(run_streamlit_ui()) - #### API ENDPOINTS #### @router.get( diff --git a/requirements.txt b/requirements.txt index f2e0c7933..8dbf49ef9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,6 @@ async_generator==1.10.0 # for async ollama calls traceloop-sdk==0.5.3 # for open telemetry logging langfuse>=2.0.0 # for langfuse self-hosted logging orjson==3.9.7 # fast /embedding responses -streamlit==1.27.2 # for admin ui ### LITELLM PACKAGE DEPENDENCIES python-dotenv>=0.2.0 # for env tiktoken>=0.4.0 # for calculating usage