enable litedebugger opt-out

This commit is contained in:
Krrish Dholakia 2023-08-24 10:19:07 -07:00
parent 254dcce39f
commit efbb476020
4 changed files with 12 additions and 14 deletions

View file

@ -24,7 +24,7 @@ togetherai_api_key: Optional[str] = None
caching = False caching = False
caching_with_models = False # if you want the caching key to be model + prompt caching_with_models = False # if you want the caching key to be model + prompt
model_alias_map: Dict[str, str] = {} model_alias_map: Dict[str, str] = {}
debugger = False client = True
model_cost = { model_cost = {
"babbage-002": { "babbage-002": {
"max_tokens": 16384, "max_tokens": 16384,

View file

@ -12,8 +12,6 @@ import pytest
import litellm import litellm
from litellm import embedding, completion from litellm import embedding, completion
litellm.debugger = True
# from infisical import InfisicalClient # from infisical import InfisicalClient
# litellm.set_verbose = True # litellm.set_verbose = True

View file

@ -304,19 +304,19 @@ def client(original_function):
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try: try:
global callback_list, add_breadcrumb, user_logger_fn global callback_list, add_breadcrumb, user_logger_fn
if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product if litellm.client: # enable users to opt-out of the debugging dashboard by setting `litellm.client = False`
get_all_keys() if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product
if "lite_debugger" not in callback_list: get_all_keys()
if "lite_debugger" not in callback_list:
litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger")
else:
# create a litellm token for users
litellm.token = get_or_generate_uuid()
litellm.input_callback.append("lite_debugger") litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger") litellm.failure_callback.append("lite_debugger")
else:
# create a litellm token for users
litellm.token = get_or_generate_uuid()
litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger")
if ( if (
len(litellm.input_callback) > 0 len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0 or len(litellm.success_callback) > 0

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.477" version = "0.1.478"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"