From d2dab362dfe357f1c2e84a3c5ebd97f3d5c18c0c Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 5 Dec 2023 16:08:12 -0800 Subject: [PATCH] (fix) proxy debugging display Init API key --- litellm/proxy/proxy_server.py | 4 ++-- litellm/router.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c70d67bf1..4cdb4d082 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -609,6 +609,8 @@ def initialize( generate_feedback_box() user_model = model user_debug = debug + if debug==True: # this needs to be first, so users can see Router init debugg + litellm.set_verbose = True dynamic_config = {"general": {}, user_model: {}} if config: llm_router, llm_model_list, general_settings = load_router_config(router=llm_router, config_file_path=config) @@ -646,8 +648,6 @@ def initialize( if max_budget: # litellm-specific param litellm.max_budget = max_budget dynamic_config["general"]["max_budget"] = max_budget - if debug==True: # litellm-specific param - litellm.set_verbose = True if use_queue: celery_setup(use_queue=use_queue) if experimental: diff --git a/litellm/router.py b/litellm/router.py index be6cbd917..4f344e18f 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -959,7 +959,7 @@ class Router: ) else: - self.print_verbose(f"Initializing OpenAI Client for {model_name}, {str(api_base)}") + self.print_verbose(f"Initializing OpenAI Client for {model_name}, {str(api_base)}, {api_key}") model["async_client"] = openai.AsyncOpenAI( api_key=api_key, base_url=api_base,