diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index c82c00eec..0adac09a8 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 46a47c0c6..b7522252e 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_litedebugger_integration.py b/litellm/tests/test_litedebugger_integration.py index e6235aeb4..018a65916 100644 --- a/litellm/tests/test_litedebugger_integration.py +++ b/litellm/tests/test_litedebugger_integration.py @@ -44,13 +44,13 @@ split_per_model = { user_message = "Hello, how are you?" messages = [{"content": user_message, "role": "user"}] -# #Test 1: On completion call - without setting client to true -> ensure litedebugger is not initialized +# # #Test 1: On completion call - without setting client to true -> ensure litedebugger is not initialized # try: # # Redirect stdout # old_stdout = sys.stdout # sys.stdout = new_stdout = io.StringIO() -# response = completion_with_split_tests(models=split_per_model, messages=messages) +# response = completion(model="gpt-3.5-turbo", messages=messages) # # Restore stdout # sys.stdout = old_stdout @@ -64,13 +64,14 @@ messages = [{"content": user_message, "role": "user"}] # # Test 2: On normal completion call - setting client to true +# litellm.use_client=True # def test_completion_with_client(): # try: # # Redirect stdout # old_stdout = sys.stdout # sys.stdout = new_stdout = io.StringIO() # litellm.token = "a67abbaf-35b8-4649-8647-68c5fe8d37fb" # generate one here - https://www.uuidgenerator.net/version4 -# response = completion(model="gpt-3.5-turbo", messages=messages, use_client=True) +# response = completion(model="gpt-3.5-turbo", messages=messages) # # Restore stdout # sys.stdout = old_stdout @@ -83,9 +84,8 @@ messages = [{"content": user_message, "role": "user"}] # if "LiteDebugger: Success/Failure Call Logging" not in output: # raise Exception("LiteLLMDebugger: success/failure call not logged!") # except Exception as e: -# print(output) # pytest.fail(f"Error occurred: {e}") - +# test_completion_with_client() # # Test 3: On streaming completion call - setting client to true # try: # # Redirect stdout diff --git a/litellm/utils.py b/litellm/utils.py index 22101f5b5..818f46c9d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -439,7 +439,7 @@ def client(original_function): try: global callback_list, add_breadcrumb, user_logger_fn, Logging function_id = kwargs["id"] if "id" in kwargs else None - if "use_client" in kwargs and kwargs["use_client"] == True: + if litellm.use_client or ("use_client" in kwargs and kwargs["use_client"] == True): print_verbose(f"litedebugger initialized") litellm.input_callback.append("lite_debugger") litellm.success_callback.append("lite_debugger") diff --git a/pyproject.toml b/pyproject.toml index 00dabe2ab..139a19441 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.559" +version = "0.1.560" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"