feat(proxy_server.py): support disabling storing master key hash in db, for spend tracking

This commit is contained in:
Krrish Dholakia 2024-08-21 12:35:37 -07:00
parent 8812da04e3
commit 89014dfc07
4 changed files with 53 additions and 20 deletions

View file

@ -728,6 +728,7 @@ general_settings:
"disable_spend_logs": "boolean", # turn off writing each transaction to the db
"disable_master_key_return": "boolean", # turn off returning master key on UI (checked on '/user/info' endpoint)
"disable_reset_budget": "boolean", # turn off reset budget scheduled task
"disable_adding_master_key_hash_to_db": "boolean", # turn off storing master key hash in db, for spend tracking
"enable_jwt_auth": "boolean", # allow proxy admin to auth in via jwt tokens with 'litellm_proxy_admin' in claims
"enforce_user_param": "boolean", # requires all openai endpoint requests to have a 'user' param
"allowed_routes": "list", # list of allowed proxy API routes - a user can access. (currently JWT-Auth only)

View file

@ -1,4 +1,7 @@
model_list:
- model_name: ollama/mistral
- model_name: "*"
litellm_params:
model: ollama/mistral
model: "*"
general_settings:
disable_adding_master_key_hash_to_db: True

View file

@ -2784,26 +2784,29 @@ async def startup_event():
await custom_db_client.connect()
if prisma_client is not None and master_key is not None:
# add master key to db
if os.getenv("PROXY_ADMIN_ID", None) is not None:
litellm_proxy_admin_name = os.getenv(
"PROXY_ADMIN_ID", litellm_proxy_admin_name
)
asyncio.create_task(
generate_key_helper_fn(
request_type="user",
duration=None,
models=[],
aliases={},
config={},
spend=0,
token=master_key,
user_id=litellm_proxy_admin_name,
user_role=LitellmUserRoles.PROXY_ADMIN,
query_type="update_data",
update_key_values={"user_role": LitellmUserRoles.PROXY_ADMIN},
if general_settings.get("disable_adding_master_key_hash_to_db") is True:
verbose_proxy_logger.info("Skipping writing master key hash to db")
else:
# add master key to db
asyncio.create_task(
generate_key_helper_fn(
request_type="user",
duration=None,
models=[],
aliases={},
config={},
spend=0,
token=master_key,
user_id=litellm_proxy_admin_name,
user_role=LitellmUserRoles.PROXY_ADMIN,
query_type="update_data",
update_key_values={"user_role": LitellmUserRoles.PROXY_ADMIN},
)
)
)
if prisma_client is not None and litellm.max_budget > 0:
if litellm.budget_duration is None:

View file

@ -1,4 +1,6 @@
import json
import os
import secrets
import traceback
from typing import Optional
@ -8,12 +10,30 @@ from litellm.proxy._types import SpendLogsMetadata, SpendLogsPayload
from litellm.proxy.utils import hash_token
def _is_master_key(api_key: str, _master_key: Optional[str]) -> bool:
if _master_key is None:
return False
## string comparison
is_master_key = secrets.compare_digest(api_key, _master_key)
if is_master_key:
return True
## hash comparison
is_master_key = secrets.compare_digest(api_key, hash_token(_master_key))
if is_master_key:
return True
return False
def get_logging_payload(
kwargs, response_obj, start_time, end_time, end_user_id: Optional[str]
) -> SpendLogsPayload:
from pydantic import Json
from litellm.proxy._types import LiteLLM_SpendLogs
from litellm.proxy.proxy_server import general_settings, master_key
verbose_proxy_logger.debug(
f"SpendTable: get_logging_payload - kwargs: {kwargs}\n\n"
@ -36,9 +56,15 @@ def get_logging_payload(
usage = dict(usage)
id = response_obj.get("id", kwargs.get("litellm_call_id"))
api_key = metadata.get("user_api_key", "")
if api_key is not None and isinstance(api_key, str) and api_key.startswith("sk-"):
# hash the api_key
api_key = hash_token(api_key)
if api_key is not None and isinstance(api_key, str):
if api_key.startswith("sk-"):
# hash the api_key
api_key = hash_token(api_key)
if (
_is_master_key(api_key=api_key, _master_key=master_key)
and general_settings.get("disable_adding_master_key_hash_to_db") is True
):
api_key = "litellm_proxy_master_key" # use a known alias, if the user disabled storing master key in db
_model_id = metadata.get("model_info", {}).get("id", "")
_model_group = metadata.get("model_group", "")