fix for key management

This commit is contained in:
Ishaan Jaff 2025-04-16 17:30:58 -07:00
parent d16f9238ff
commit 4fe81bc9f7
2 changed files with 23 additions and 20 deletions

View file

@ -648,6 +648,7 @@ class GenerateRequestBase(LiteLLMPydanticObjectBase):
rpm_limit: Optional[int] = None rpm_limit: Optional[int] = None
budget_duration: Optional[str] = None budget_duration: Optional[str] = None
allowed_cache_controls: Optional[list] = [] allowed_cache_controls: Optional[list] = []
allowed_routes: Optional[list] = []
config: Optional[dict] = {} config: Optional[dict] = {}
permissions: Optional[dict] = {} permissions: Optional[dict] = {}
model_max_budget: Optional[dict] = ( model_max_budget: Optional[dict] = (

View file

@ -372,7 +372,7 @@ async def generate_key_fn( # noqa: PLR0915
- soft_budget: Optional[float] - Specify soft budget for a given key. Will trigger a slack alert when this soft budget is reached. - soft_budget: Optional[float] - Specify soft budget for a given key. Will trigger a slack alert when this soft budget is reached.
- tags: Optional[List[str]] - Tags for [tracking spend](https://litellm.vercel.app/docs/proxy/enterprise#tracking-spend-for-custom-tags) and/or doing [tag-based routing](https://litellm.vercel.app/docs/proxy/tag_routing). - tags: Optional[List[str]] - Tags for [tracking spend](https://litellm.vercel.app/docs/proxy/enterprise#tracking-spend-for-custom-tags) and/or doing [tag-based routing](https://litellm.vercel.app/docs/proxy/tag_routing).
- enforced_params: Optional[List[str]] - List of enforced params for the key (Enterprise only). [Docs](https://docs.litellm.ai/docs/proxy/enterprise#enforce-required-params-for-llm-requests) - enforced_params: Optional[List[str]] - List of enforced params for the key (Enterprise only). [Docs](https://docs.litellm.ai/docs/proxy/enterprise#enforce-required-params-for-llm-requests)
- allowed_routes: Optional[list] - List of allowed routes for the key. Store the actual route or store a wildcard pattern for a set of routes. Example - ["/chat/completions", "/embeddings", "/keys/*"]
Examples: Examples:
1. Allow users to turn on/off pii masking 1. Allow users to turn on/off pii masking
@ -577,9 +577,9 @@ async def generate_key_fn( # noqa: PLR0915
request_type="key", **data_json, table_name="key" request_type="key", **data_json, table_name="key"
) )
response[ response["soft_budget"] = (
"soft_budget" data.soft_budget
] = data.soft_budget # include the user-input soft budget in the response ) # include the user-input soft budget in the response
response = GenerateKeyResponse(**response) response = GenerateKeyResponse(**response)
@ -1167,6 +1167,7 @@ async def generate_key_helper_fn( # noqa: PLR0915
send_invite_email: Optional[bool] = None, send_invite_email: Optional[bool] = None,
created_by: Optional[str] = None, created_by: Optional[str] = None,
updated_by: Optional[str] = None, updated_by: Optional[str] = None,
allowed_routes: Optional[list] = None,
): ):
from litellm.proxy.proxy_server import ( from litellm.proxy.proxy_server import (
litellm_proxy_budget_name, litellm_proxy_budget_name,
@ -1272,6 +1273,7 @@ async def generate_key_helper_fn( # noqa: PLR0915
"blocked": blocked, "blocked": blocked,
"created_by": created_by, "created_by": created_by,
"updated_by": updated_by, "updated_by": updated_by,
"allowed_routes": allowed_routes,
} }
if ( if (
@ -1467,10 +1469,10 @@ async def delete_verification_tokens(
try: try:
if prisma_client: if prisma_client:
tokens = [_hash_token_if_needed(token=key) for key in tokens] tokens = [_hash_token_if_needed(token=key) for key in tokens]
_keys_being_deleted: List[ _keys_being_deleted: List[LiteLLM_VerificationToken] = (
LiteLLM_VerificationToken await prisma_client.db.litellm_verificationtoken.find_many(
] = await prisma_client.db.litellm_verificationtoken.find_many( where={"token": {"in": tokens}}
where={"token": {"in": tokens}} )
) )
# Assuming 'db' is your Prisma Client instance # Assuming 'db' is your Prisma Client instance
@ -1572,9 +1574,9 @@ async def _rotate_master_key(
from litellm.proxy.proxy_server import proxy_config from litellm.proxy.proxy_server import proxy_config
try: try:
models: Optional[ models: Optional[List] = (
List await prisma_client.db.litellm_proxymodeltable.find_many()
] = await prisma_client.db.litellm_proxymodeltable.find_many() )
except Exception: except Exception:
models = None models = None
# 2. process model table # 2. process model table
@ -1861,11 +1863,11 @@ async def validate_key_list_check(
param="user_id", param="user_id",
code=status.HTTP_403_FORBIDDEN, code=status.HTTP_403_FORBIDDEN,
) )
complete_user_info_db_obj: Optional[ complete_user_info_db_obj: Optional[BaseModel] = (
BaseModel await prisma_client.db.litellm_usertable.find_unique(
] = await prisma_client.db.litellm_usertable.find_unique( where={"user_id": user_api_key_dict.user_id},
where={"user_id": user_api_key_dict.user_id}, include={"organization_memberships": True},
include={"organization_memberships": True}, )
) )
if complete_user_info_db_obj is None: if complete_user_info_db_obj is None:
@ -1926,10 +1928,10 @@ async def get_admin_team_ids(
if complete_user_info is None: if complete_user_info is None:
return [] return []
# Get all teams that user is an admin of # Get all teams that user is an admin of
teams: Optional[ teams: Optional[List[BaseModel]] = (
List[BaseModel] await prisma_client.db.litellm_teamtable.find_many(
] = await prisma_client.db.litellm_teamtable.find_many( where={"team_id": {"in": complete_user_info.teams}}
where={"team_id": {"in": complete_user_info.teams}} )
) )
if teams is None: if teams is None:
return [] return []