fix(proxy_server.py): add new flag for disable sharing master key on ui

This commit is contained in:
Krrish Dholakia 2024-04-24 10:06:01 -07:00
parent b918f58262
commit 26e9ae38ce
3 changed files with 10 additions and 1 deletions

View file

@ -600,6 +600,7 @@ general_settings:
"general_settings": {
"completion_model": "string",
"disable_spend_logs": "boolean", # turn off writing each transaction to the db
"disable_master_key_return": "boolean", # turn off returning master key on UI (checked on '/user/info' endpoint)
"disable_reset_budget": "boolean", # turn off reset budget scheduled task
"enable_jwt_auth": "boolean", # allow proxy admin to auth in via jwt tokens with 'litellm_proxy_admin' in claims
"enforce_user_param": "boolean", # requires all openai endpoint requests to have a 'user' param

View file

@ -52,5 +52,6 @@ litellm_settings:
general_settings:
alerting: ["slack"]
disable_master_key_return: true
alerting_threshold: 300 # sends alerts if requests hang for 5min+ and responses take 5min+
proxy_batch_write_at: 60 # Frequency of batch writing logs to server (in seconds)

View file

@ -2312,7 +2312,7 @@ class ProxyConfig:
master_key = litellm.get_secret(master_key)
if master_key is not None and isinstance(master_key, str):
litellm_master_key_hash = master_key
litellm_master_key_hash = hash_token(master_key)
### STORE MODEL IN DB ### feature flag for `/model/new`
store_model_in_db = general_settings.get("store_model_in_db", False)
if store_model_in_db is None:
@ -5977,6 +5977,13 @@ async def user_info(
## REMOVE HASHED TOKEN INFO before returning ##
returned_keys = []
for key in keys:
if (
key.token == litellm_master_key_hash
and general_settings.get("disable_master_key_return", False)
== True ## [IMPORTANT] used by hosted proxy-ui to prevent sharing master key on ui
):
continue
try:
key = key.model_dump() # noqa
except: