diff --git a/litellm/__init__.py b/litellm/__init__.py index 052e542d6..6365b88a5 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -24,7 +24,7 @@ togetherai_api_key: Optional[str] = None caching = False caching_with_models = False # if you want the caching key to be model + prompt model_alias_map: Dict[str, str] = {} -debugger = False +client = True model_cost = { "babbage-002": { "max_tokens": 16384, diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 6a58088e3..1bd36790b 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -12,8 +12,6 @@ import pytest import litellm from litellm import embedding, completion -litellm.debugger = True - # from infisical import InfisicalClient # litellm.set_verbose = True diff --git a/litellm/utils.py b/litellm/utils.py index 8819abd31..e0b16885e 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -304,19 +304,19 @@ def client(original_function): ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. try: global callback_list, add_breadcrumb, user_logger_fn - if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product - get_all_keys() - if "lite_debugger" not in callback_list: + if litellm.client: # enable users to opt-out of the debugging dashboard by setting `litellm.client = False` + if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product + get_all_keys() + if "lite_debugger" not in callback_list: + litellm.input_callback.append("lite_debugger") + litellm.success_callback.append("lite_debugger") + litellm.failure_callback.append("lite_debugger") + else: + # create a litellm token for users + litellm.token = get_or_generate_uuid() litellm.input_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger") litellm.failure_callback.append("lite_debugger") - else: - # create a litellm token for users - litellm.token = get_or_generate_uuid() - litellm.input_callback.append("lite_debugger") - litellm.success_callback.append("lite_debugger") - litellm.failure_callback.append("lite_debugger") - if ( len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 diff --git a/pyproject.toml b/pyproject.toml index a9b04b146..5e1ebb664 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.477" +version = "0.1.478" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"