feat(model_management_endpoints.py): support audit logs on /model/add and /model/update endpoints

complete CUD endpoint audit logging on models + users
This commit is contained in:
Krrish Dholakia 2025-03-13 19:17:40 -07:00
parent 9145e8db77
commit dc3b02920f
2 changed files with 298 additions and 230 deletions

View file

@ -5438,235 +5438,6 @@ async def transform_request(request: TransformRequestBody):
return return_raw_request(endpoint=request.call_type, kwargs=request.request_body)
#### [BETA] - This is a beta endpoint, format might change based on user feedback. - https://github.com/BerriAI/litellm/issues/964
@router.post(
"/model/new",
description="Allows adding new models to the model list in the config.yaml",
tags=["model management"],
dependencies=[Depends(user_api_key_auth)],
)
async def add_new_model(
model_params: Deployment,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
global llm_router, llm_model_list, general_settings, user_config_file_path, proxy_config, prisma_client, master_key, store_model_in_db, proxy_logging_obj, premium_user
try:
import base64
if prisma_client is None:
raise HTTPException(
status_code=500,
detail={
"error": "No DB Connected. Here's how to do it - https://docs.litellm.ai/docs/proxy/virtual_keys"
},
)
if model_params.model_info.team_id is not None and premium_user is not True:
raise HTTPException(
status_code=403,
detail={"error": CommonProxyErrors.not_premium_user.value},
)
if not check_if_team_id_matches_key(
team_id=model_params.model_info.team_id, user_api_key_dict=user_api_key_dict
):
raise HTTPException(
status_code=403,
detail={"error": "Team ID does not match the API key's team ID"},
)
model_response = None
# update DB
if store_model_in_db is True:
"""
- store model_list in db
- store keys separately
"""
try:
_original_litellm_model_name = model_params.model_name
if model_params.model_info.team_id is None:
model_response = await _add_model_to_db(
model_params=model_params,
user_api_key_dict=user_api_key_dict,
prisma_client=prisma_client,
)
else:
model_response = await _add_team_model_to_db(
model_params=model_params,
user_api_key_dict=user_api_key_dict,
prisma_client=prisma_client,
)
await proxy_config.add_deployment(
prisma_client=prisma_client, proxy_logging_obj=proxy_logging_obj
)
# don't let failed slack alert block the /model/new response
_alerting = general_settings.get("alerting", []) or []
if "slack" in _alerting:
# send notification - new model added
await proxy_logging_obj.slack_alerting_instance.model_added_alert(
model_name=model_params.model_name,
litellm_model_name=_original_litellm_model_name,
passed_model_info=model_params.model_info,
)
except Exception as e:
verbose_proxy_logger.exception(f"Exception in add_new_model: {e}")
else:
raise HTTPException(
status_code=500,
detail={
"error": "Set `'STORE_MODEL_IN_DB='True'` in your env to enable this feature."
},
)
return model_response
except Exception as e:
verbose_proxy_logger.error(
"litellm.proxy.proxy_server.add_new_model(): Exception occured - {}".format(
str(e)
)
)
verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
type=ProxyErrorTypes.auth_error,
param=getattr(e, "param", "None"),
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
elif isinstance(e, ProxyException):
raise e
raise ProxyException(
message="Authentication Error, " + str(e),
type=ProxyErrorTypes.auth_error,
param=getattr(e, "param", "None"),
code=status.HTTP_400_BAD_REQUEST,
)
#### MODEL MANAGEMENT ####
@router.post(
"/model/update",
description="Edit existing model params",
tags=["model management"],
dependencies=[Depends(user_api_key_auth)],
)
async def update_model(
model_params: updateDeployment,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
"""
Old endpoint for model update. Makes a PUT request.
Use `/model/{model_id}/update` to PATCH the stored model in db.
"""
global llm_router, llm_model_list, general_settings, user_config_file_path, proxy_config, prisma_client, master_key, store_model_in_db, proxy_logging_obj
try:
import base64
global prisma_client
if prisma_client is None:
raise HTTPException(
status_code=500,
detail={
"error": "No DB Connected. Here's how to do it - https://docs.litellm.ai/docs/proxy/virtual_keys"
},
)
# update DB
if store_model_in_db is True:
_model_id = None
_model_info = getattr(model_params, "model_info", None)
if _model_info is None:
raise Exception("model_info not provided")
_model_id = _model_info.id
if _model_id is None:
raise Exception("model_info.id not provided")
_existing_litellm_params = (
await prisma_client.db.litellm_proxymodeltable.find_unique(
where={"model_id": _model_id}
)
)
if _existing_litellm_params is None:
if (
llm_router is not None
and llm_router.get_deployment(model_id=_model_id) is not None
):
raise HTTPException(
status_code=400,
detail={
"error": "Can't edit model. Model in config. Store model in db via `/model/new`. to edit."
},
)
raise Exception("model not found")
_existing_litellm_params_dict = dict(
_existing_litellm_params.litellm_params
)
if model_params.litellm_params is None:
raise Exception("litellm_params not provided")
_new_litellm_params_dict = model_params.litellm_params.dict(
exclude_none=True
)
### ENCRYPT PARAMS ###
for k, v in _new_litellm_params_dict.items():
encrypted_value = encrypt_value_helper(value=v)
model_params.litellm_params[k] = encrypted_value
### MERGE WITH EXISTING DATA ###
merged_dictionary = {}
_mp = model_params.litellm_params.dict()
for key, value in _mp.items():
if value is not None:
merged_dictionary[key] = value
elif (
key in _existing_litellm_params_dict
and _existing_litellm_params_dict[key] is not None
):
merged_dictionary[key] = _existing_litellm_params_dict[key]
else:
pass
_data: dict = {
"litellm_params": json.dumps(merged_dictionary), # type: ignore
"updated_by": user_api_key_dict.user_id or litellm_proxy_admin_name,
}
model_response = await prisma_client.db.litellm_proxymodeltable.update(
where={"model_id": _model_id},
data=_data, # type: ignore
)
return model_response
except Exception as e:
verbose_proxy_logger.error(
"litellm.proxy.proxy_server.update_model(): Exception occured - {}".format(
str(e)
)
)
verbose_proxy_logger.debug(traceback.format_exc())
if isinstance(e, HTTPException):
raise ProxyException(
message=getattr(e, "detail", f"Authentication Error({str(e)})"),
type=ProxyErrorTypes.auth_error,
param=getattr(e, "param", "None"),
code=getattr(e, "status_code", status.HTTP_400_BAD_REQUEST),
)
elif isinstance(e, ProxyException):
raise e
raise ProxyException(
message="Authentication Error, " + str(e),
type=ProxyErrorTypes.auth_error,
param=getattr(e, "param", "None"),
code=status.HTTP_400_BAD_REQUEST,
)
@router.get(
"/v2/model/info",
description="v2 - returns all the models set on the config.yaml, shows 'user_access' = True if the user has access to the model. Provides more info about each model in /models, including config.yaml descriptions (except api key and api base)",