diff --git a/litellm/integrations/datadog/datadog.py b/litellm/integrations/datadog/datadog.py index 2217ded37d..e8a74baa78 100644 --- a/litellm/integrations/datadog/datadog.py +++ b/litellm/integrations/datadog/datadog.py @@ -256,6 +256,10 @@ class DataDogLogger(CustomBatchLogger): """ import json + from litellm.litellm_core_utils.litellm_logging import ( + truncate_standard_logging_payload_content, + ) + standard_logging_object: Optional[StandardLoggingPayload] = kwargs.get( "standard_logging_object", None ) @@ -267,6 +271,7 @@ class DataDogLogger(CustomBatchLogger): status = DataDogStatus.ERROR # Build the initial payload + truncate_standard_logging_payload_content(standard_logging_object) json_payload = json.dumps(standard_logging_object, default=str) verbose_logger.debug("Datadog: Logger - Logging payload = %s", json_payload) diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py index d4b0b42d1f..0c59d0c93c 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket.py @@ -138,12 +138,12 @@ class GCSBucketLogger(GCSBucketBase): logging_payload=logging_payload, ) - except Exception as e: - verbose_logger.exception(f"GCS Bucket batch logging error: {str(e)}") - finally: # Clear the queue after processing self.log_queue.clear() + except Exception as e: + verbose_logger.exception(f"GCS Bucket batch logging error: {str(e)}") + def _get_object_name( self, kwargs: Dict, logging_payload: StandardLoggingPayload, response_obj: Any ) -> str: diff --git a/litellm/litellm_core_utils/core_helpers.py b/litellm/litellm_core_utils/core_helpers.py index ceb150946c..9c31159517 100644 --- a/litellm/litellm_core_utils/core_helpers.py +++ b/litellm/litellm_core_utils/core_helpers.py @@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, Union import httpx from litellm._logging import verbose_logger -from litellm.types.llms.openai import AllMessageValues +from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolParam if TYPE_CHECKING: from opentelemetry.trace import Span as _Span diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 851fc3f55c..d783279444 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -3026,8 +3026,6 @@ def get_standard_logging_object_payload( ), ) - truncate_standard_logging_payload_content(payload) - return payload except Exception as e: verbose_logger.exception( @@ -3042,26 +3040,20 @@ def truncate_standard_logging_payload_content( """ Truncate error strings and message content in logging payload - Most logging integrations - DataDog / GCS Bucket / have a limit on the size of the payload. ~around(1MB) + Some loggers like DataDog have a limit on the size of the payload. (1MB) This function truncates the error string and the message content if they exceed a certain length. """ - try: - MAX_STR_LENGTH = 10_000 + MAX_STR_LENGTH = 10_000 - # Truncate fields that might exceed max length - fields_to_truncate = ["error_str", "messages", "response"] - for field in fields_to_truncate: - _truncate_field( - standard_logging_object=standard_logging_object, - field_name=field, - max_length=MAX_STR_LENGTH, - ) - except Exception as e: - verbose_logger.exception( - "Error truncating standard logging payload - {}".format(str(e)) + # Truncate fields that might exceed max length + fields_to_truncate = ["error_str", "messages", "response"] + for field in fields_to_truncate: + _truncate_field( + standard_logging_object=standard_logging_object, + field_name=field, + max_length=MAX_STR_LENGTH, ) - return def _truncate_text(text: str, max_length: int) -> str: diff --git a/litellm/llms/azure_ai/chat/transformation.py b/litellm/llms/azure_ai/chat/transformation.py index afedc95001..5c6f004e0e 100644 --- a/litellm/llms/azure_ai/chat/transformation.py +++ b/litellm/llms/azure_ai/chat/transformation.py @@ -13,7 +13,7 @@ from litellm.llms.base_llm.chat.transformation import LiteLLMLoggingObj from litellm.llms.openai.common_utils import drop_params_from_unprocessable_entity_error from litellm.llms.openai.openai import OpenAIConfig from litellm.secret_managers.main import get_secret_str -from litellm.types.llms.openai import AllMessageValues +from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolParam from litellm.types.utils import ModelResponse, ProviderField from litellm.utils import _add_path_to_api_base diff --git a/tests/logging_callback_tests/test_datadog.py b/tests/logging_callback_tests/test_datadog.py index e81416efb5..2d3cb36046 100644 --- a/tests/logging_callback_tests/test_datadog.py +++ b/tests/logging_callback_tests/test_datadog.py @@ -1,10 +1,7 @@ import io import os -from re import M import sys -from litellm.integrations.custom_logger import CustomLogger - sys.path.insert(0, os.path.abspath("../..")) @@ -395,14 +392,6 @@ async def test_datadog_payload_environment_variables(): pytest.fail(f"Test failed with exception: {str(e)}") -class TestDDLogger(CustomLogger): - def __init__(self): - self.standard_logging_object: Optional[StandardLoggingPayload] = None - - async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): - self.standard_logging_object = kwargs["standard_logging_object"] - - @pytest.mark.asyncio async def test_datadog_payload_content_truncation(): """ @@ -410,13 +399,15 @@ async def test_datadog_payload_content_truncation(): DataDog has a limit of 1MB for the logged payload size. """ - dd_logger = TestDDLogger() - litellm.callbacks = [dd_logger] + dd_logger = DataDogLogger() + # Create a standard payload with very long content + standard_payload = create_standard_logging_payload() long_content = "x" * 80_000 # Create string longer than MAX_STR_LENGTH (10_000) - # messages with long content - messages = [ + # Modify payload with long content + standard_payload["error_str"] = long_content + standard_payload["messages"] = [ { "role": "user", "content": [ @@ -430,26 +421,28 @@ async def test_datadog_payload_content_truncation(): ], } ] - await litellm.acompletion( - model="gpt-3.5-turbo", - messages=messages, - temperature=0.2, - mock_response=long_content, - ) - - await asyncio.sleep(2) + standard_payload["response"] = {"choices": [{"message": {"content": long_content}}]} # Create the payload - standard_logging_payload = dd_logger.standard_logging_object + dd_payload = dd_logger.create_datadog_logging_payload( + kwargs={"standard_logging_object": standard_payload}, + response_obj=None, + start_time=datetime.now(), + end_time=datetime.now(), + ) - print("standard_logging_payload", json.dumps(standard_logging_payload, indent=2)) + print("dd_payload", json.dumps(dd_payload, indent=2)) + + # Parse the message back to dict to verify truncation + message_dict = json.loads(dd_payload["message"]) # Verify truncation of fields + assert len(message_dict["error_str"]) < 10_100, "error_str not truncated correctly" assert ( - len(str(standard_logging_payload["messages"])) < 10_100 + len(str(message_dict["messages"])) < 10_100 ), "messages not truncated correctly" assert ( - len(str(standard_logging_payload["response"])) < 10_100 + len(str(message_dict["response"])) < 10_100 ), "response not truncated correctly"