fix(proxy_server.py): allow /model/new feature flag to work via env

This commit is contained in:
Krrish Dholakia 2024-04-08 14:57:19 -07:00
parent f2e34f456d
commit 009f548079
4 changed files with 17 additions and 2 deletions

View file

@ -0,0 +1,9 @@
# 🎉 Demo App
Here is a demo of the proxy. To log in do:
- Username: admin
- Password: sk-1234
<iframe src="https://litellm-production-12bb.up.railway.app/ui" width="100%" height="800px"></iframe>

View file

@ -36,6 +36,7 @@ const sidebars = {
label: "📖 All Endpoints (Swagger)",
href: "https://litellm-api.up.railway.app/",
},
"proxy/demo",
"proxy/configs",
"proxy/reliability",
"proxy/users",

View file

@ -36,7 +36,7 @@ general_settings:
master_key: sk-1234
allow_user_auth: true
alerting: ["slack"]
store_model_in_db: True
# store_model_in_db: True // set via environment variable - os.environ["STORE_MODEL_IN_DB"] = "True"
# proxy_batch_write_at: 60 # 👈 Frequency of batch writing logs to server (in seconds)
enable_jwt_auth: True
alerting: ["slack"]

View file

@ -2333,7 +2333,12 @@ class ProxyConfig:
"background_health_checks", False
)
health_check_interval = general_settings.get("health_check_interval", 300)
## check env ##
_store_model_in_db = litellm.get_secret(
"STORE_MODEL_IN_DB", None
) # feature flag for `/model/new`
if _store_model_in_db is not None and _store_model_in_db == True:
general_settings["store_model_in_db"] = True
router_params: dict = {
"cache_responses": litellm.cache
!= None, # cache if user passed in cache values