diff --git a/litellm/__init__.py b/litellm/__init__.py index 8db635cde4..c1d19d0c6d 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -22,6 +22,7 @@ vertex_project: Optional[str] = None vertex_location: Optional[str] = None togetherai_api_key: Optional[str] = None baseten_key: Optional[str] = None +use_client = False caching = False caching_with_models = False # if you want the caching key to be model + prompt model_alias_map: Dict[str, str] = {} diff --git a/litellm/utils.py b/litellm/utils.py index 3a47b53a95..7474ac03e0 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -338,12 +338,12 @@ def client(original_function): litellm.input_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger") litellm.failure_callback.append("lite_debugger") - # else: - # # create a litellm token for users - # litellm.token = get_or_generate_uuid() - # litellm.input_callback.append("lite_debugger") - # litellm.success_callback.append("lite_debugger") - # litellm.failure_callback.append("lite_debugger") + elif litellm.use_client: + # create a litellm token for users + litellm.token = get_or_generate_uuid() + litellm.input_callback.append("lite_debugger") + litellm.success_callback.append("lite_debugger") + litellm.failure_callback.append("lite_debugger") if ( len(litellm.input_callback) > 0 or len(litellm.success_callback) > 0 diff --git a/pyproject.toml b/pyproject.toml index c5dbdfe34e..839349a273 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.485" +version = "0.1.486" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"