diff --git a/docs/my-website/docs/observability/raw_request_response.md b/docs/my-website/docs/observability/raw_request_response.md new file mode 100644 index 000000000..7b978a643 --- /dev/null +++ b/docs/my-website/docs/observability/raw_request_response.md @@ -0,0 +1,38 @@ +import Image from '@theme/IdealImage'; + +# Raw Request/Response Logging + +See the raw request/response sent by LiteLLM in your logging provider (OTEL/Langfuse/etc.). + +```python +# pip install langfuse +import litellm +import os + +# log raw request/response +litellm.log_raw_request_response = True + +# from https://cloud.langfuse.com/ +os.environ["LANGFUSE_PUBLIC_KEY"] = "" +os.environ["LANGFUSE_SECRET_KEY"] = "" +# Optional, defaults to https://cloud.langfuse.com +os.environ["LANGFUSE_HOST"] # optional + +# LLM API Keys +os.environ['OPENAI_API_KEY']="" + +# set langfuse as a callback, litellm will send the data to langfuse +litellm.success_callback = ["langfuse"] + +# openai call +response = litellm.completion( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "Hi 👋 - i'm openai"} + ] +) +``` + +**Expected Log** + + \ No newline at end of file diff --git a/docs/my-website/img/raw_request_log.png b/docs/my-website/img/raw_request_log.png new file mode 100644 index 000000000..f07e5fd18 Binary files /dev/null and b/docs/my-website/img/raw_request_log.png differ diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index ff110bb62..5618eb41e 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -182,6 +182,7 @@ const sidebars = { label: "Logging & Observability", items: [ "debugging/local_debugging", + "observability/raw_request_response", "observability/callbacks", "observability/custom_callback", "observability/langfuse_integration", diff --git a/litellm/__init__.py b/litellm/__init__.py index e92ae355e..4ddc4552c 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -60,6 +60,7 @@ _async_failure_callback: List[Callable] = ( pre_call_rules: List[Callable] = [] post_call_rules: List[Callable] = [] turn_off_message_logging: Optional[bool] = False +log_raw_request_response: bool = False redact_messages_in_exceptions: Optional[bool] = False store_audit_logs = False # Enterprise feature, allow users to see audit logs ## end of callbacks ############# diff --git a/litellm/utils.py b/litellm/utils.py index 5e85419dc..98461d58b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1347,28 +1347,29 @@ class Logging: ) else: verbose_logger.debug(f"\033[92m{curl_command}\033[0m\n") - # log raw request to provider (like LangFuse) - try: - # [Non-blocking Extra Debug Information in metadata] - _litellm_params = self.model_call_details.get("litellm_params", {}) - _metadata = _litellm_params.get("metadata", {}) or {} - if ( - litellm.turn_off_message_logging is not None - and litellm.turn_off_message_logging is True - ): + # log raw request to provider (like LangFuse) -- if opted in. + if litellm.log_raw_request_response is True: + try: + # [Non-blocking Extra Debug Information in metadata] + _litellm_params = self.model_call_details.get("litellm_params", {}) + _metadata = _litellm_params.get("metadata", {}) or {} + if ( + litellm.turn_off_message_logging is not None + and litellm.turn_off_message_logging is True + ): + _metadata["raw_request"] = ( + "redacted by litellm. \ + 'litellm.turn_off_message_logging=True'" + ) + else: + _metadata["raw_request"] = str(curl_command) + except Exception as e: _metadata["raw_request"] = ( - "redacted by litellm. \ - 'litellm.turn_off_message_logging=True'" + "Unable to Log \ + raw request: {}".format( + str(e) + ) ) - else: - _metadata["raw_request"] = str(curl_command) - except Exception as e: - _metadata["raw_request"] = ( - "Unable to Log \ - raw request: {}".format( - str(e) - ) - ) if self.logger_fn and callable(self.logger_fn): try: self.logger_fn( @@ -2735,7 +2736,7 @@ class Logging: only redacts when litellm.turn_off_message_logging == True """ # check if user opted out of logging message/response to callbacks - if litellm.turn_off_message_logging == True: + if litellm.turn_off_message_logging is True: # remove messages, prompts, input, response from logging self.model_call_details["messages"] = [ {"role": "user", "content": "redacted-by-litellm"}