diff --git a/litellm/router.py b/litellm/router.py index e9bbd1ffc..e4b14dd09 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -294,11 +294,17 @@ class Router: """ returns a copy of the deployment with the api key masked """ - _deployment_copy = copy.deepcopy(deployment) - litellm_params: dict = _deployment_copy["litellm_params"] - if "api_key" in litellm_params: - litellm_params["api_key"] = litellm_params["api_key"][:2] + "*" * 10 - return _deployment_copy + try: + _deployment_copy = copy.deepcopy(deployment) + litellm_params: dict = _deployment_copy["litellm_params"] + if "api_key" in litellm_params: + litellm_params["api_key"] = litellm_params["api_key"][:2] + "*" * 10 + return _deployment_copy + except Exception as e: + verbose_router_logger.debug( + f"Error occurred while printing deployment - {str(e)}" + ) + raise e ### COMPLETION, EMBEDDING, IMG GENERATION FUNCTIONS @@ -310,6 +316,7 @@ class Router: response = router.completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hey, how's it going?"}] """ try: + verbose_router_logger.debug(f"router.completion(model={model},..)") kwargs["model"] = model kwargs["messages"] = messages kwargs["original_function"] = self._completion diff --git a/litellm/router_strategy/lowest_tpm_rpm.py b/litellm/router_strategy/lowest_tpm_rpm.py index b2f9d6e4e..3f1c67b61 100644 --- a/litellm/router_strategy/lowest_tpm_rpm.py +++ b/litellm/router_strategy/lowest_tpm_rpm.py @@ -148,6 +148,7 @@ class LowestTPMLoggingHandler(CustomLogger): input_tokens = token_counter(messages=messages, text=input) except: input_tokens = 0 + verbose_router_logger.debug(f"input_tokens={input_tokens}") # ----------------------- # Find lowest used model # ---------------------- @@ -209,4 +210,5 @@ class LowestTPMLoggingHandler(CustomLogger): elif item_tpm < lowest_tpm: lowest_tpm = item_tpm deployment = _deployment + verbose_router_logger.info(f"returning picked lowest tpm/rpm deployment.") return deployment