allowing opt-in for litedebugger

This commit is contained in:
Krrish Dholakia 2023-08-25 17:18:29 -07:00
parent 198b95d7a0
commit fa53f7033b
3 changed files with 10 additions and 12 deletions

View file

@ -25,7 +25,6 @@ baseten_key: Optional[str] = None
caching = False caching = False
caching_with_models = False # if you want the caching key to be model + prompt caching_with_models = False # if you want the caching key to be model + prompt
model_alias_map: Dict[str, str] = {} model_alias_map: Dict[str, str] = {}
use_client = True
model_cost = { model_cost = {
"babbage-002": { "babbage-002": {
"max_tokens": 16384, "max_tokens": 16384,

View file

@ -332,19 +332,18 @@ def client(original_function):
): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc.
try: try:
global callback_list, add_breadcrumb, user_logger_fn global callback_list, add_breadcrumb, user_logger_fn
if litellm.use_client: # enable users to opt-out of the debugging dashboard by setting `litellm.client = False` if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product
if litellm.email is not None or os.getenv("LITELLM_EMAIL", None) is not None or litellm.token is not None or os.getenv("LITELLM_TOKEN", None): # add to input, success and failure callbacks if user is using hosted product get_all_keys()
get_all_keys() if "lite_debugger" not in callback_list:
if "lite_debugger" not in callback_list:
litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger")
else:
# create a litellm token for users
litellm.token = get_or_generate_uuid()
litellm.input_callback.append("lite_debugger") litellm.input_callback.append("lite_debugger")
litellm.success_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger")
litellm.failure_callback.append("lite_debugger") litellm.failure_callback.append("lite_debugger")
# else:
# # create a litellm token for users
# litellm.token = get_or_generate_uuid()
# litellm.input_callback.append("lite_debugger")
# litellm.success_callback.append("lite_debugger")
# litellm.failure_callback.append("lite_debugger")
if ( if (
len(litellm.input_callback) > 0 len(litellm.input_callback) > 0
or len(litellm.success_callback) > 0 or len(litellm.success_callback) > 0

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.483" version = "0.1.484"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"