From ff7b4ffcf1df641df23b775a1ca0b2e22b46dd06 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 26 Aug 2023 10:38:39 -0700 Subject: [PATCH] use client opt-in --- litellm/__init__.py | 1 + litellm/utils.py | 12 ++++++------ pyproject.toml | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index 8db635cde..c1d19d0c6 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -22,6 +22,7 @@ vertex_project: Optional[str] = None vertex_location: Optional[str] = None togetherai_api_key: Optional[str] = None baseten_key: Optional[str] = None +use_client = False caching = False caching_with_models = False # if you want the caching key to be model + prompt model_alias_map: Dict[str, str] = {} diff --git a/litellm/utils.py b/litellm/utils.py index 3a47b53a9..7474ac03e 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -338,12 +338,12 @@ def client(original_function): litellm.input_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger") litellm.failure_callback.append("lite_debugger") - # else: - # # create a litellm token for users - # litellm.token = get_or_generate_uuid() - # litellm.input_callback.append("lite_debugger") - # litellm.success_callback.append("lite_debugger") - # litellm.failure_callback.append("lite_debugger") + elif litellm.use_client: + # create a litellm token for users + litellm.token = get_or_generate_uuid() + litellm.input_callback.append("lite_debugger") + litellm.success_callback.append("lite_debugger") + litellm.failure_callback.append("lite_debugger") if ( len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 diff --git a/pyproject.toml b/pyproject.toml index c5dbdfe34..839349a27 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.485" +version = "0.1.486" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"