diff --git a/litellm/integrations/datadog/datadog.py b/litellm/integrations/datadog/datadog.py index e8a74baa78..2217ded37d 100644 --- a/litellm/integrations/datadog/datadog.py +++ b/litellm/integrations/datadog/datadog.py @@ -256,10 +256,6 @@ class DataDogLogger(CustomBatchLogger): """ import json - from litellm.litellm_core_utils.litellm_logging import ( - truncate_standard_logging_payload_content, - ) - standard_logging_object: Optional[StandardLoggingPayload] = kwargs.get( "standard_logging_object", None ) @@ -271,7 +267,6 @@ class DataDogLogger(CustomBatchLogger): status = DataDogStatus.ERROR # Build the initial payload - truncate_standard_logging_payload_content(standard_logging_object) json_payload = json.dumps(standard_logging_object, default=str) verbose_logger.debug("Datadog: Logger - Logging payload = %s", json_payload) diff --git a/litellm/integrations/gcs_bucket/gcs_bucket.py b/litellm/integrations/gcs_bucket/gcs_bucket.py index 0c59d0c93c..d4b0b42d1f 100644 --- a/litellm/integrations/gcs_bucket/gcs_bucket.py +++ b/litellm/integrations/gcs_bucket/gcs_bucket.py @@ -138,11 +138,11 @@ class GCSBucketLogger(GCSBucketBase): logging_payload=logging_payload, ) - # Clear the queue after processing - self.log_queue.clear() - except Exception as e: verbose_logger.exception(f"GCS Bucket batch logging error: {str(e)}") + finally: + # Clear the queue after processing + self.log_queue.clear() def _get_object_name( self, kwargs: Dict, logging_payload: StandardLoggingPayload, response_obj: Any diff --git a/litellm/litellm_core_utils/core_helpers.py b/litellm/litellm_core_utils/core_helpers.py index 9c31159517..ceb150946c 100644 --- a/litellm/litellm_core_utils/core_helpers.py +++ b/litellm/litellm_core_utils/core_helpers.py @@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any, List, Optional, Union import httpx from litellm._logging import verbose_logger -from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolParam +from litellm.types.llms.openai import AllMessageValues if TYPE_CHECKING: from opentelemetry.trace import Span as _Span diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 97b8799284..1be9bbf2e1 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -3026,6 +3026,8 @@ def get_standard_logging_object_payload( ), ) + truncate_standard_logging_payload_content(payload) + return payload except Exception as e: verbose_logger.exception( @@ -3040,20 +3042,26 @@ def truncate_standard_logging_payload_content( """ Truncate error strings and message content in logging payload - Some loggers like DataDog have a limit on the size of the payload. (1MB) + Most logging integrations - DataDog / GCS Bucket / have a limit on the size of the payload. ~around(1MB) This function truncates the error string and the message content if they exceed a certain length. """ - MAX_STR_LENGTH = 10_000 + try: + MAX_STR_LENGTH = 10_000 - # Truncate fields that might exceed max length - fields_to_truncate = ["error_str", "messages", "response"] - for field in fields_to_truncate: - _truncate_field( - standard_logging_object=standard_logging_object, - field_name=field, - max_length=MAX_STR_LENGTH, + # Truncate fields that might exceed max length + fields_to_truncate = ["error_str", "messages", "response"] + for field in fields_to_truncate: + _truncate_field( + standard_logging_object=standard_logging_object, + field_name=field, + max_length=MAX_STR_LENGTH, + ) + except Exception as e: + verbose_logger.exception( + "Error truncating standard logging payload - {}".format(str(e)) ) + return def _truncate_text(text: str, max_length: int) -> str: diff --git a/litellm/llms/azure_ai/chat/transformation.py b/litellm/llms/azure_ai/chat/transformation.py index 5c6f004e0e..afedc95001 100644 --- a/litellm/llms/azure_ai/chat/transformation.py +++ b/litellm/llms/azure_ai/chat/transformation.py @@ -13,7 +13,7 @@ from litellm.llms.base_llm.chat.transformation import LiteLLMLoggingObj from litellm.llms.openai.common_utils import drop_params_from_unprocessable_entity_error from litellm.llms.openai.openai import OpenAIConfig from litellm.secret_managers.main import get_secret_str -from litellm.types.llms.openai import AllMessageValues, ChatCompletionToolParam +from litellm.types.llms.openai import AllMessageValues from litellm.types.utils import ModelResponse, ProviderField from litellm.utils import _add_path_to_api_base diff --git a/tests/logging_callback_tests/test_datadog.py b/tests/logging_callback_tests/test_datadog.py index 2d3cb36046..e81416efb5 100644 --- a/tests/logging_callback_tests/test_datadog.py +++ b/tests/logging_callback_tests/test_datadog.py @@ -1,7 +1,10 @@ import io import os +from re import M import sys +from litellm.integrations.custom_logger import CustomLogger + sys.path.insert(0, os.path.abspath("../..")) @@ -392,6 +395,14 @@ async def test_datadog_payload_environment_variables(): pytest.fail(f"Test failed with exception: {str(e)}") +class TestDDLogger(CustomLogger): + def __init__(self): + self.standard_logging_object: Optional[StandardLoggingPayload] = None + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + self.standard_logging_object = kwargs["standard_logging_object"] + + @pytest.mark.asyncio async def test_datadog_payload_content_truncation(): """ @@ -399,15 +410,13 @@ async def test_datadog_payload_content_truncation(): DataDog has a limit of 1MB for the logged payload size. """ - dd_logger = DataDogLogger() + dd_logger = TestDDLogger() + litellm.callbacks = [dd_logger] - # Create a standard payload with very long content - standard_payload = create_standard_logging_payload() long_content = "x" * 80_000 # Create string longer than MAX_STR_LENGTH (10_000) - # Modify payload with long content - standard_payload["error_str"] = long_content - standard_payload["messages"] = [ + # messages with long content + messages = [ { "role": "user", "content": [ @@ -421,28 +430,26 @@ async def test_datadog_payload_content_truncation(): ], } ] - standard_payload["response"] = {"choices": [{"message": {"content": long_content}}]} - - # Create the payload - dd_payload = dd_logger.create_datadog_logging_payload( - kwargs={"standard_logging_object": standard_payload}, - response_obj=None, - start_time=datetime.now(), - end_time=datetime.now(), + await litellm.acompletion( + model="gpt-3.5-turbo", + messages=messages, + temperature=0.2, + mock_response=long_content, ) - print("dd_payload", json.dumps(dd_payload, indent=2)) + await asyncio.sleep(2) - # Parse the message back to dict to verify truncation - message_dict = json.loads(dd_payload["message"]) + # Create the payload + standard_logging_payload = dd_logger.standard_logging_object + + print("standard_logging_payload", json.dumps(standard_logging_payload, indent=2)) # Verify truncation of fields - assert len(message_dict["error_str"]) < 10_100, "error_str not truncated correctly" assert ( - len(str(message_dict["messages"])) < 10_100 + len(str(standard_logging_payload["messages"])) < 10_100 ), "messages not truncated correctly" assert ( - len(str(message_dict["response"])) < 10_100 + len(str(standard_logging_payload["response"])) < 10_100 ), "response not truncated correctly"