diff --git a/docs/my-website/docs/debugging/hosted_debugging.md b/docs/my-website/docs/debugging/hosted_debugging.md index 5464a201c..709918751 100644 --- a/docs/my-website/docs/debugging/hosted_debugging.md +++ b/docs/my-website/docs/debugging/hosted_debugging.md @@ -16,14 +16,13 @@ See our live dashboard 👉 [admin.litellm.ai](https://admin.litellm.ai/) By default, your dashboard is viewable at `admin.litellm.ai/`. ``` +import litellm, os + ## Set your email os.environ["LITELLM_EMAIL"] = "your_user_email" - ## LOG ON ALL 3 EVENTS - litellm.input_callback = ["lite_debugger"] - litellm.success_callback = ["lite_debugger"] - litellm.failure_callback = ["lite_debugger"] - +## Set debugger to true +litellm.debugger = True ``` ## Example Usage @@ -36,12 +35,8 @@ By default, your dashboard is viewable at `admin.litellm.ai/`. ## Set ENV variable os.environ["LITELLM_EMAIL"] = "your_email" - ## LOG ON ALL 3 EVENTS - litellm.input_callback = ["lite_debugger"] - litellm.success_callback = ["lite_debugger"] - litellm.failure_callback = ["lite_debugger"] - - litellm.set_verbose = True + ## Set debugger to true + litellm.debugger = True user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] @@ -52,6 +47,5 @@ By default, your dashboard is viewable at `admin.litellm.ai/`. # bad request call response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}]) - ``` diff --git a/litellm/__init__.py b/litellm/__init__.py index c1e914936..498fb0eae 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -21,6 +21,7 @@ hugging_api_token: Optional[str] = None togetherai_api_key: Optional[str] = None caching = False caching_with_models = False # if you want the caching key to be model + prompt +debugger = False model_cost = { "gpt-3.5-turbo": { "max_tokens": 4000, diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index c998bff4a..d18976588 100644 Binary files a/litellm/__pycache__/__init__.cpython-311.pyc and b/litellm/__pycache__/__init__.cpython-311.pyc differ diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 846016121..22622cde7 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 05731d21d..8af1d3c14 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_litedebugger_integration.py b/litellm/tests/test_litedebugger_integration.py index 7fc9e3069..fe58bc730 100644 --- a/litellm/tests/test_litedebugger_integration.py +++ b/litellm/tests/test_litedebugger_integration.py @@ -9,11 +9,7 @@ # import litellm # from litellm import embedding, completion -# litellm.input_callback = ["lite_debugger"] -# litellm.success_callback = ["lite_debugger"] -# litellm.failure_callback = ["lite_debugger"] - -# litellm.set_verbose = True +# litellm.debugger = True # user_message = "Hello, how are you?" # messages = [{ "content": user_message,"role": "user"}] diff --git a/litellm/utils.py b/litellm/utils.py index 93a637270..57ef91f26 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -286,6 +286,10 @@ def client(original_function): ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. try: global callback_list, add_breadcrumb, user_logger_fn + if litellm.debugger: # add to input, success and failure callbacks if user sets debugging to true + litellm.input_callback.append("lite_debugger") + litellm.success_callback.append("lite_debugger") + litellm.failure_callback.append("lite_debugger") if ( len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 or len(litellm.failure_callback) > 0 ) and len(callback_list) == 0: