diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c70d67bf12..4cdb4d082e 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -609,6 +609,8 @@ def initialize( generate_feedback_box() user_model = model user_debug = debug + if debug==True: # this needs to be first, so users can see Router init debugg + litellm.set_verbose = True dynamic_config = {"general": {}, user_model: {}} if config: llm_router, llm_model_list, general_settings = load_router_config(router=llm_router, config_file_path=config) @@ -646,8 +648,6 @@ def initialize( if max_budget: # litellm-specific param litellm.max_budget = max_budget dynamic_config["general"]["max_budget"] = max_budget - if debug==True: # litellm-specific param - litellm.set_verbose = True if use_queue: celery_setup(use_queue=use_queue) if experimental: diff --git a/litellm/router.py b/litellm/router.py index be6cbd917b..4f344e18f1 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -959,7 +959,7 @@ class Router: ) else: - self.print_verbose(f"Initializing OpenAI Client for {model_name}, {str(api_base)}") + self.print_verbose(f"Initializing OpenAI Client for {model_name}, {str(api_base)}, {api_key}") model["async_client"] = openai.AsyncOpenAI( api_key=api_key, base_url=api_base,