fix(router.py): mask the api key in debug statements on router

This commit is contained in:
Krrish Dholakia 2024-02-21 18:13:03 -08:00
parent 0733bf1e7a
commit 2a0d2dbdf9
2 changed files with 24 additions and 5 deletions

View file

@ -936,7 +936,14 @@
"mode": "chat" "mode": "chat"
}, },
"openrouter/mistralai/mistral-7b-instruct": { "openrouter/mistralai/mistral-7b-instruct": {
"max_tokens": 4096, "max_tokens": 8192,
"input_cost_per_token": 0.00000013,
"output_cost_per_token": 0.00000013,
"litellm_provider": "openrouter",
"mode": "chat"
},
"openrouter/mistralai/mistral-7b-instruct:free": {
"max_tokens": 8192,
"input_cost_per_token": 0.0, "input_cost_per_token": 0.0,
"output_cost_per_token": 0.0, "output_cost_per_token": 0.0,
"litellm_provider": "openrouter", "litellm_provider": "openrouter",

View file

@ -142,11 +142,13 @@ class Router:
Router: An instance of the litellm.Router class. Router: An instance of the litellm.Router class.
""" """
self.set_verbose = set_verbose self.set_verbose = set_verbose
if self.set_verbose: self.debug_level = debug_level
if self.set_verbose == True:
if debug_level == "INFO": if debug_level == "INFO":
verbose_router_logger.setLevel(logging.INFO) verbose_router_logger.setLevel(logging.INFO)
elif debug_level == "DEBUG": elif debug_level == "DEBUG":
verbose_router_logger.setLevel(logging.DEBUG) verbose_router_logger.setLevel(logging.DEBUG)
self.deployment_names: List = ( self.deployment_names: List = (
[] []
) # names of models under litellm_params. ex. azure/chatgpt-v-2 ) # names of models under litellm_params. ex. azure/chatgpt-v-2
@ -273,6 +275,16 @@ class Router:
f"Intialized router with Routing strategy: {self.routing_strategy}\n" f"Intialized router with Routing strategy: {self.routing_strategy}\n"
) )
def print_deployment(self, deployment: dict):
"""
returns a copy of the deployment with the api key masked
"""
_deployment_copy = copy.deepcopy(deployment)
litellm_params: dict = _deployment_copy["litellm_params"]
if "api_key" in litellm_params:
litellm_params["api_key"] = litellm_params["api_key"][:2] + "*" * 10
return _deployment_copy
### COMPLETION, EMBEDDING, IMG GENERATION FUNCTIONS ### COMPLETION, EMBEDDING, IMG GENERATION FUNCTIONS
def completion( def completion(
@ -2060,7 +2072,7 @@ class Router:
verbose_router_logger.debug(f"\n selected index, {selected_index}") verbose_router_logger.debug(f"\n selected index, {selected_index}")
deployment = healthy_deployments[selected_index] deployment = healthy_deployments[selected_index]
verbose_router_logger.info( verbose_router_logger.info(
f"get_available_deployment for model: {model}, Selected deployment: {deployment or deployment[0]} for model: {model}" f"get_available_deployment for model: {model}, Selected deployment: {self.print_deployment(deployment) or deployment[0]} for model: {model}"
) )
return deployment or deployment[0] return deployment or deployment[0]
############## Check if we can do a RPM/TPM based weighted pick ################# ############## Check if we can do a RPM/TPM based weighted pick #################
@ -2077,7 +2089,7 @@ class Router:
verbose_router_logger.debug(f"\n selected index, {selected_index}") verbose_router_logger.debug(f"\n selected index, {selected_index}")
deployment = healthy_deployments[selected_index] deployment = healthy_deployments[selected_index]
verbose_router_logger.info( verbose_router_logger.info(
f"get_available_deployment for model: {model}, Selected deployment: {deployment or deployment[0]} for model: {model}" f"get_available_deployment for model: {model}, Selected deployment: {self.print_deployment(deployment) or deployment[0]} for model: {model}"
) )
return deployment or deployment[0] return deployment or deployment[0]
@ -2108,7 +2120,7 @@ class Router:
) )
raise ValueError("No models available.") raise ValueError("No models available.")
verbose_router_logger.info( verbose_router_logger.info(
f"get_available_deployment for model: {model}, Selected deployment: {deployment} for model: {model}" f"get_available_deployment for model: {model}, Selected deployment: {self.print_deployment(deployment)} for model: {model}"
) )
return deployment return deployment