diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index d9973c270e..f89ab45020 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 27f57b6dae..0cd1f95f69 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_logging.py b/litellm/tests/test_logging.py index 37caeffa97..567103c85b 100644 --- a/litellm/tests/test_logging.py +++ b/litellm/tests/test_logging.py @@ -1,66 +1,285 @@ #### What this tests #### # This tests error logging (with custom user functions) for the raw `completion` + `embedding` endpoints -import sys, os -import traceback +# Test Scenarios (test across completion, streaming, embedding) +## 1: Pre-API-Call +## 2: Post-API-Call +## 3: On LiteLLM Call success +## 4: On LiteLLM Call failure + +import sys, os, io +import traceback, logging +import pytest +import dotenv +dotenv.load_dotenv() + +# Create logger +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + +# Create a stream handler +stream_handler = logging.StreamHandler(sys.stdout) +logger.addHandler(stream_handler) + +# Create a function to log information +def logger_fn(message): + logger.info(message) sys.path.insert( 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import litellm from litellm import embedding, completion - -litellm.set_verbose = False +from openai.error import AuthenticationError +litellm.set_verbose = True score = 0 - -def logger_fn(model_call_object: dict): - print(f"model call details: {model_call_object}") - - user_message = "Hello, how are you?" messages = [{"content": user_message, "role": "user"}] -# test on openai completion call +# 1. On Call Success +# normal completion +## test on openai completion call try: - response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="gpt-3.5-turbo", messages=messages) + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") score += 1 -except: - print(f"error occurred: {traceback.format_exc()}") +except Exception as e: + pytest.fail(f"Error occurred: {e}") pass -# test on non-openai completion call +## test on non-openai completion call try: - response = completion( - model="claude-instant-1", messages=messages, logger_fn=logger_fn - ) - print(f"claude response: {response}") + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") score += 1 -except: - print(f"error occurred: {traceback.format_exc()}") +except Exception as e: + pytest.fail(f"Error occurred: {e}") pass -# # test on openai embedding call -# try: -# response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) -# score +=1 -# except: -# traceback.print_exc() +# streaming completion +## test on openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() -# # test on bad azure openai embedding call -> missing azure flag and this isn't an embedding model -# try: -# response = embedding(model='chatgpt-test', input=[user_message], logger_fn=logger_fn) -# except: -# score +=1 # expect this to fail -# traceback.print_exc() + response = completion(model="gpt-3.5-turbo", messages=messages) -# # test on good azure openai embedding call -# try: -# response = embedding(model='azure-embedding-model', input=[user_message], azure=True, logger_fn=logger_fn) -# score +=1 -# except: -# traceback.print_exc() + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + pytest.fail(f"Error occurred: {e}") + pass -# print(f"Score: {score}, Overall score: {score/5}") +## test on non-openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + pytest.fail(f"Error occurred: {e}") + pass + +# embedding + +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"]) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Success Call" not in output: + raise Exception("Required log message not found!") +except Exception as e: + pytest.fail(f"Error occurred: {e}") + +## 2. On LiteLLM Call failure +## TEST BAD KEY + +temporary_oai_key = os.environ["OPENAI_API_KEY"] +os.environ["OPENAI_API_KEY"] = "bad-key" + +temporary_anthropic_key = os.environ["ANTHROPIC_API_KEY"] +os.environ["ANTHROPIC_API_KEY"] = "bad-key" + +# normal completion +## test on openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="gpt-3.5-turbo", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + print(f"exception type: {type(e).__name__}") + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +## test on non-openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +# streaming completion +## test on openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="gpt-3.5-turbo", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +## test on non-openai completion call +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = completion(model="claude-instant-1", messages=messages) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") + score += 1 +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +# embedding + +try: + # Redirect stdout + old_stdout = sys.stdout + sys.stdout = new_stdout = io.StringIO() + + response = embedding(model="text-embedding-ada-002", input=["good morning from litellm"]) + + # Restore stdout + sys.stdout = old_stdout + output = new_stdout.getvalue().strip() + + if "Logging Details Pre-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details Post-API Call" not in output: + raise Exception("Required log message not found!") + elif "Logging Details LiteLLM-Failure Call" not in output: + raise Exception("Required log message not found!") +except Exception as e: + if not isinstance(e, AuthenticationError): + pytest.fail(f"Error occurred: {e}") + +os.environ["OPENAI_API_KEY"] = temporary_oai_key +os.environ["ANTHROPIC_API_KEY"] = temporary_anthropic_key \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 3aaf00b9ec..053aa19d17 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -180,8 +180,10 @@ class Logging: } def pre_call(self, input, api_key, model=None, additional_args={}): + # Log the exact input to the LLM API + print_verbose(f"Logging Details Pre-API Call") try: - print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}") + # print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}") self.model_call_details["input"] = input self.model_call_details["api_key"] = api_key self.model_call_details["additional_args"] = additional_args @@ -193,9 +195,6 @@ class Logging: # User Logging -> if you pass in a custom logging function print_verbose(f"model call details: {self.model_call_details}") - print_verbose( - f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" - ) if self.logger_fn and callable(self.logger_fn): try: self.logger_fn( @@ -257,7 +256,7 @@ class Logging: capture_exception(e) def post_call(self, original_response, input=None, api_key=None, additional_args={}): - # Do something here + # Log the exact result from the LLM API, for streaming - log the type of response received try: self.model_call_details["input"] = input self.model_call_details["api_key"] = api_key @@ -266,7 +265,7 @@ class Logging: # User Logging -> if you pass in a custom logging function print_verbose( - f"Logging Details: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" + f"Logging Details Post-API Call: logger_fn - {self.logger_fn} | callable(logger_fn) - {callable(self.logger_fn)}" ) if self.logger_fn and callable(self.logger_fn): try: @@ -331,6 +330,9 @@ class Logging: def success_handler(self, result, start_time, end_time): + print_verbose( + f"Logging Details LiteLLM-Success Call" + ) try: for callback in litellm.success_callback: try: @@ -364,6 +366,9 @@ class Logging: pass def failure_handler(self, exception, traceback_exception, start_time, end_time): + print_verbose( + f"Logging Details LiteLLM-Failure Call" + ) try: for callback in litellm.failure_callback: if callback == "lite_debugger": @@ -1699,6 +1704,9 @@ class CustomStreamWrapper: self.model = model self.custom_llm_provider = custom_llm_provider self.logging_obj = logging_obj + if self.logging_obj: + # Log the type of the received item + self.logging_obj.post_call(str(type(completion_stream))) if model in litellm.cohere_models: # cohere does not return an iterator, so we need to wrap it in one self.completion_stream = iter(completion_stream) @@ -1825,7 +1833,7 @@ class CustomStreamWrapper: completion_obj["content"] = self.handle_openai_chat_completion_chunk(chunk) # LOGGING - self.logging_obj.post_call(completion_obj["content"]) + # self.logging_obj.post_call(completion_obj["content"]) # return this for all models return {"choices": [{"delta": completion_obj}]} except: diff --git a/pyproject.toml b/pyproject.toml index d226ddfeba..1eefe547fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.512" +version = "0.1.513" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"