diff --git a/litellm/proxy/proxy_cli.py b/litellm/proxy/proxy_cli.py index 35c57b51b..b154b21e1 100644 --- a/litellm/proxy/proxy_cli.py +++ b/litellm/proxy/proxy_cli.py @@ -88,6 +88,13 @@ def is_port_in_use(port): @click.option( "--debug", default=False, is_flag=True, type=bool, help="To debug the input" ) +@click.option( + "--detailed_debug", + default=False, + is_flag=True, + type=bool, + help="To view detailed debug logs", +) @click.option( "--use_queue", default=False, @@ -139,12 +146,6 @@ def is_port_in_use(port): type=bool, help="Print LiteLLM version", ) -@click.option( - "--logs", - flag_value=False, - type=int, - help='Gets the "n" most recent logs. By default gets most recent log.', -) @click.option( "--health", flag_value=True, @@ -179,6 +180,7 @@ def run_server( headers, save, debug, + detailed_debug, temperature, max_tokens, request_timeout, @@ -187,7 +189,6 @@ def run_server( config, max_budget, telemetry, - logs, test, local, num_workers, @@ -212,32 +213,6 @@ def run_server( # this is just a local/relative import error, user git cloned litellm from proxy_server import app, save_worker_config, usage_telemetry feature_telemetry = usage_telemetry - if logs is not None: - if logs == 0: # default to 1 - logs = 1 - try: - with open("api_log.json") as f: - data = json.load(f) - - # convert keys to datetime objects - log_times = { - datetime.strptime(k, "%Y%m%d%H%M%S%f"): v for k, v in data.items() - } - - # sort by timestamp - sorted_times = sorted( - log_times.items(), key=operator.itemgetter(0), reverse=True - ) - - # get n recent logs - recent_logs = { - k.strftime("%Y%m%d%H%M%S%f"): v for k, v in sorted_times[:logs] - } - - print(json.dumps(recent_logs, indent=4)) # noqa - except: - raise Exception("LiteLLM: No logs saved!") - return if version == True: pkg_version = importlib.metadata.version("litellm") click.echo(f"\nLiteLLM: Current Version = {pkg_version}\n") @@ -377,6 +352,7 @@ def run_server( api_base=api_base, api_version=api_version, debug=debug, + detailed_debug=detailed_debug, temperature=temperature, max_tokens=max_tokens, request_timeout=request_timeout, diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index d8e9efafd..4392ec533 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -944,6 +944,7 @@ async def initialize( api_base=None, api_version=None, debug=False, + detailed_debug=False, temperature=None, max_tokens=None, request_timeout=600, @@ -956,7 +957,7 @@ async def initialize( use_queue=False, config=None, ): - global user_model, user_api_base, user_debug, user_max_tokens, user_request_timeout, user_temperature, user_telemetry, user_headers, experimental, llm_model_list, llm_router, general_settings, master_key, user_custom_auth, prisma_client + global user_model, user_api_base, user_debug, user_detailed_debug, user_user_max_tokens, user_request_timeout, user_temperature, user_telemetry, user_headers, experimental, llm_model_list, llm_router, general_settings, master_key, user_custom_auth, prisma_client generate_feedback_box() user_model = model user_debug = debug @@ -964,8 +965,14 @@ async def initialize( from litellm._logging import verbose_router_logger, verbose_proxy_logger import logging - verbose_router_logger.setLevel(level=logging.INFO) - verbose_router_logger.debug("initilized verbose router logger") + verbose_router_logger.setLevel(level=logging.INFO) # set router logs to info + if detailed_debug == True: + from litellm._logging import verbose_router_logger, verbose_proxy_logger + import logging + + verbose_router_logger.setLevel(level=logging.DEBUG) # set router logs to info + litellm.set_verbose = True + dynamic_config = {"general": {}, user_model: {}} if config: (