diff --git a/litellm/__init__.py b/litellm/__init__.py index 6365b88a51..b53d49e26e 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -24,7 +24,7 @@ togetherai_api_key: Optional[str] = None caching = False caching_with_models = False # if you want the caching key to be model + prompt model_alias_map: Dict[str, str] = {} -client = True +use_client = True model_cost = { "babbage-002": { "max_tokens": 16384, diff --git a/litellm/utils.py b/litellm/utils.py index e0b16885ec..b06da44e1e 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -304,7 +304,7 @@ def client(original_function): ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. try: global callback_list, add_breadcrumb, user_logger_fn - if litellm.client: # enable users to opt-out of the debugging dashboard by setting `litellm.client = False` + if litellm.use_client: # enable users to opt-out of the debugging dashboard by setting `litellm.client = False` if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product get_all_keys() if "lite_debugger" not in callback_list: