From bd5d32a9952ae595029fa90721ca893745337fc6 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 16 Apr 2025 16:13:48 -0700 Subject: [PATCH 1/6] fix(triton/completion/transformation.py): remove bad_words / stop words from triton call parameter 'bad_words' has invalid type. It should be either 'int', 'bool', or 'string'. --- .../llms/triton/completion/transformation.py | 2 -- tests/llm_translation/test_triton.py | 21 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/litellm/llms/triton/completion/transformation.py b/litellm/llms/triton/completion/transformation.py index 21fcf2eefb..0db83b2d3d 100644 --- a/litellm/llms/triton/completion/transformation.py +++ b/litellm/llms/triton/completion/transformation.py @@ -201,8 +201,6 @@ class TritonGenerateConfig(TritonConfig): "max_tokens": int( optional_params.get("max_tokens", DEFAULT_MAX_TOKENS_FOR_TRITON) ), - "bad_words": [""], - "stop_words": [""], }, "stream": bool(stream), } diff --git a/tests/llm_translation/test_triton.py b/tests/llm_translation/test_triton.py index 7e4ba92f23..8a3bbb4661 100644 --- a/tests/llm_translation/test_triton.py +++ b/tests/llm_translation/test_triton.py @@ -20,6 +20,7 @@ from litellm.llms.triton.embedding.transformation import TritonEmbeddingConfig import litellm + def test_split_embedding_by_shape_passes(): try: data = [ @@ -230,3 +231,23 @@ async def test_triton_embeddings(): assert response.data[0]["embedding"] == [0.1, 0.2] except Exception as e: pytest.fail(f"Error occurred: {e}") + + + +def test_triton_generate_raw_request(): + from litellm.utils import return_raw_request + from litellm.types.utils import CallTypes + try: + kwargs = { + "model": "triton/llama-3-8b-instruct", + "messages": [{"role": "user", "content": "who are u?"}], + "api_base": "http://localhost:8000/generate", + } + raw_request = return_raw_request(endpoint=CallTypes.completion, kwargs=kwargs) + print("raw_request", raw_request) + assert raw_request is not None + assert "bad_words" not in json.dumps(raw_request["raw_request_body"]) + assert "stop_words" not in json.dumps(raw_request["raw_request_body"]) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + From f661dd7776b96fd284e3354eb468e4f91b04f0e1 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 16 Apr 2025 16:58:02 -0700 Subject: [PATCH 2/6] fix(proxy_track_cost_callback.py): add debug logging for track cost callback error --- .../proxy/hooks/proxy_track_cost_callback.py | 8 +++++-- .../litellm_core_utils/test_core_helpers.py | 22 +++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 tests/litellm/litellm_core_utils/test_core_helpers.py diff --git a/litellm/proxy/hooks/proxy_track_cost_callback.py b/litellm/proxy/hooks/proxy_track_cost_callback.py index cf0e0a07ed..4b8447fb03 100644 --- a/litellm/proxy/hooks/proxy_track_cost_callback.py +++ b/litellm/proxy/hooks/proxy_track_cost_callback.py @@ -199,9 +199,13 @@ class _ProxyDBLogger(CustomLogger): except Exception as e: error_msg = f"Error in tracking cost callback - {str(e)}\n Traceback:{traceback.format_exc()}" model = kwargs.get("model", "") - metadata = kwargs.get("litellm_params", {}).get("metadata", {}) + metadata = get_litellm_metadata_from_kwargs(kwargs=kwargs) + litellm_metadata = kwargs.get("litellm_params", {}).get( + "litellm_metadata", {} + ) + old_metadata = kwargs.get("litellm_params", {}).get("metadata", {}) call_type = kwargs.get("call_type", "") - error_msg += f"\n Args to _PROXY_track_cost_callback\n model: {model}\n metadata: {metadata}\n call_type: {call_type}\n" + error_msg += f"\n Args to _PROXY_track_cost_callback\n model: {model}\n chosen_metadata: {metadata}\n litellm_metadata: {litellm_metadata}\n old_metadata: {old_metadata}\n call_type: {call_type}\n" asyncio.create_task( proxy_logging_obj.failed_tracking_alert( error_message=error_msg, diff --git a/tests/litellm/litellm_core_utils/test_core_helpers.py b/tests/litellm/litellm_core_utils/test_core_helpers.py new file mode 100644 index 0000000000..3b7acd0d50 --- /dev/null +++ b/tests/litellm/litellm_core_utils/test_core_helpers.py @@ -0,0 +1,22 @@ +import json +import os +import sys +from unittest.mock import MagicMock, patch + +import pytest + +sys.path.insert( + 0, os.path.abspath("../../..") +) # Adds the parent directory to the system path + +from litellm.litellm_core_utils.core_helpers import get_litellm_metadata_from_kwargs + + +def test_get_litellm_metadata_from_kwargs(): + kwargs = { + "litellm_params": { + "litellm_metadata": {}, + "metadata": {"user_api_key": "1234567890"}, + }, + } + assert get_litellm_metadata_from_kwargs(kwargs) == {"user_api_key": "1234567890"} From d2ad3bbfc0894e50f416bd55815b657f0fc3d56d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 16 Apr 2025 17:27:35 -0700 Subject: [PATCH 3/6] fix(_logging.py): add sensitive data filter to logging Fixes https://github.com/BerriAI/litellm/issues/7603 Also addresses https://github.com/BerriAI/litellm/issues/9815#issuecomment-2806844725 --- litellm/_logging.py | 55 +++++++++++++++ litellm/proxy/_new_secret_config.yaml | 9 ++- tests/litellm/test_logging.py | 99 +++++++++++++++++++++++++++ 3 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 tests/litellm/test_logging.py diff --git a/litellm/_logging.py b/litellm/_logging.py index d7e2c9e778..9af4a53ad8 100644 --- a/litellm/_logging.py +++ b/litellm/_logging.py @@ -1,6 +1,7 @@ import json import logging import os +import re import sys from datetime import datetime from logging import Formatter @@ -41,6 +42,54 @@ class JsonFormatter(Formatter): return json.dumps(json_record) +class SensitiveDataFilter(logging.Filter): + """Filter to redact sensitive information from logs""" + + SENSITIVE_KEYS = [ + "credentials", + "api_key", + "key", + "api_base", + "password", + "secret", + "token", + ] + + def filter(self, record): + if not hasattr(record, "msg") or not record.msg: + return True + + # Convert message to string if it's not already + msg = str(record.msg) + + key_pattern = r'["\']?([^"\':\s]+)["\']?\s*[:=]' + keys = re.findall(key_pattern, msg) + + # Redact sensitive information + for key in keys: + # Check if any sensitive key is a substring of the current key + if any( + sensitive_key in key.lower() for sensitive_key in self.SENSITIVE_KEYS + ): + # Handle JSON-like strings + pattern = f'"{key}":\\s*"[^"]*"' + msg = re.sub(pattern, f'"{key}": "REDACTED"', msg) + + # Handle key-value pairs in plain text + pattern = f"{key}\\s*=\\s*[^\\s,}}]+" + msg = re.sub(pattern, f"{key}=REDACTED", msg) + + # Handle dictionary-like strings + pattern = f"'{key}':\\s*'[^']*'" + msg = re.sub(pattern, f"'{key}': 'REDACTED'", msg) + + pattern = f"\"{key}\":\\s*'[^']*'" + msg = re.sub(pattern, f"\"{key}\": 'REDACTED'", msg) + + record.msg = msg + return True + + # Function to set up exception handlers for JSON logging def _setup_json_exception_handlers(formatter): # Create a handler with JSON formatting for exceptions @@ -103,6 +152,12 @@ verbose_proxy_logger = logging.getLogger("LiteLLM Proxy") verbose_router_logger = logging.getLogger("LiteLLM Router") verbose_logger = logging.getLogger("LiteLLM") +# Add the sensitive data filter to all loggers +sensitive_filter = SensitiveDataFilter() +verbose_router_logger.addFilter(sensitive_filter) +verbose_proxy_logger.addFilter(sensitive_filter) +verbose_logger.addFilter(sensitive_filter) + # Add the handler to the logger verbose_router_logger.addHandler(handler) verbose_proxy_logger.addHandler(handler) diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index e166133cec..f1af784e5e 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -28,13 +28,20 @@ model_list: api_base: https://krris-m2f9a9i7-eastus2.openai.azure.com/ model_info: base_model: azure/gpt-4o-realtime-preview-2024-10-01 - + - model_name: "vertex_ai/gemini-1.5-pro-001" + litellm_params: + model: vertex_ai/gemini-1.5-pro-001 + vertex_credentials: {"project_id": "krris-m2f9a9i7", "location": "us-central1"} + api_base: https://us-central1-aiplatform.googleapis.com/v1 litellm_settings: num_retries: 0 callbacks: ["prometheus"] check_provider_endpoint: true +router_settings: + routing_strategy: "usage-based-routing-v2" + files_settings: - custom_llm_provider: gemini api_key: os.environ/GEMINI_API_KEY diff --git a/tests/litellm/test_logging.py b/tests/litellm/test_logging.py new file mode 100644 index 0000000000..5e8eb1d10d --- /dev/null +++ b/tests/litellm/test_logging.py @@ -0,0 +1,99 @@ +import logging + +import pytest + +from litellm._logging import SensitiveDataFilter + + +def test_sensitive_data_filter(): + # Create a test logger + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + + # Create a filter + sensitive_filter = SensitiveDataFilter() + + # Test cases + test_cases = [ + { + "input": '{"vertex_credentials": {"project_id": "test-project", "location": "us-central1", "private_key": "test-private-key"}}', + "expected": '{"vertex_credentials": {"project_id": "test-project", "location": "us-central1", "private_key": "REDACTED"}}', + }, + { + "input": '{"api_key": "sk-1234567890"}', + "expected": '{"api_key": "REDACTED"}', + }, + { + "input": '{"openai_api_key": "sk-1234567890"}', + "expected": '{"openai_api_key": "REDACTED"}', + }, + {"input": '{"password": "secret123"}', "expected": '{"password": "REDACTED"}'}, + {"input": '{"token": "abc123"}', "expected": '{"token": "REDACTED"}'}, + { + "input": '{"api_base": "https://api.example.com"}', + "expected": '{"api_base": "REDACTED"}', + }, + { + "input": '{"non_sensitive": "value", "credentials": "secret"}', + "expected": '{"non_sensitive": "value", "credentials": "REDACTED"}', + }, + ] + + for test_case in test_cases: + # Create a log record + record = logging.LogRecord( + name="test_logger", + level=logging.INFO, + pathname="test.py", + lineno=1, + msg=test_case["input"], + args=(), + exc_info=None, + ) + + # Apply the filter + sensitive_filter.filter(record) + + # Verify the output + assert ( + record.msg == test_case["expected"] + ), f"Failed for input: {test_case['input']}" + + +def test_sensitive_data_filter_with_different_formats(): + # Create a test logger + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + + # Create a filter + sensitive_filter = SensitiveDataFilter() + + # Test different formats + test_cases = [ + {"input": "api_key=sk-1234567890", "expected": "api_key=REDACTED"}, + { + "input": "'credentials': 'secret123'", + "expected": "'credentials': 'REDACTED'", + }, + {"input": "\"token\": 'abc123'", "expected": "\"token\": 'REDACTED'"}, + ] + + for test_case in test_cases: + # Create a log record + record = logging.LogRecord( + name="test_logger", + level=logging.INFO, + pathname="test.py", + lineno=1, + msg=test_case["input"], + args=(), + exc_info=None, + ) + + # Apply the filter + sensitive_filter.filter(record) + + # Verify the output + assert ( + record.msg == test_case["expected"] + ), f"Failed for input: {test_case['input']}" From b461905745dea076160913264fec88c2488d0a9f Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 19 Apr 2025 09:34:07 -0700 Subject: [PATCH 4/6] fix(_logging.py): fix logging --- litellm/_logging.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/litellm/_logging.py b/litellm/_logging.py index 9af4a53ad8..41f2bc2bfd 100644 --- a/litellm/_logging.py +++ b/litellm/_logging.py @@ -59,8 +59,11 @@ class SensitiveDataFilter(logging.Filter): if not hasattr(record, "msg") or not record.msg: return True - # Convert message to string if it's not already - msg = str(record.msg) + # If the message is a format string with args, we need to format it first + if record.args: + msg = record.msg % record.args + else: + msg = str(record.msg) key_pattern = r'["\']?([^"\':\s]+)["\']?\s*[:=]' keys = re.findall(key_pattern, msg) @@ -86,7 +89,9 @@ class SensitiveDataFilter(logging.Filter): pattern = f"\"{key}\":\\s*'[^']*'" msg = re.sub(pattern, f"\"{key}\": 'REDACTED'", msg) + # Set the message and clear args since we've already formatted it record.msg = msg + record.args = None return True From aa587d9fc8d8cd58dfa9df0cead72ab7cad86926 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 19 Apr 2025 10:00:37 -0700 Subject: [PATCH 5/6] fix(_logging.py): handle more cases of sensitive keys in logs --- litellm/_logging.py | 49 +++++++++++------- tests/litellm/test_logging.py | 97 +++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 19 deletions(-) diff --git a/litellm/_logging.py b/litellm/_logging.py index 41f2bc2bfd..8572d28f36 100644 --- a/litellm/_logging.py +++ b/litellm/_logging.py @@ -53,6 +53,7 @@ class SensitiveDataFilter(logging.Filter): "password", "secret", "token", + "private_key", # Added for nested JSON case ] def filter(self, record): @@ -65,29 +66,39 @@ class SensitiveDataFilter(logging.Filter): else: msg = str(record.msg) - key_pattern = r'["\']?([^"\':\s]+)["\']?\s*[:=]' - keys = re.findall(key_pattern, msg) - # Redact sensitive information - for key in keys: - # Check if any sensitive key is a substring of the current key - if any( - sensitive_key in key.lower() for sensitive_key in self.SENSITIVE_KEYS - ): - # Handle JSON-like strings - pattern = f'"{key}":\\s*"[^"]*"' - msg = re.sub(pattern, f'"{key}": "REDACTED"', msg) + for key in self.SENSITIVE_KEYS: + # Create patterns for compound keys (e.g., openai_api_key) + key_pattern = f"[a-zA-Z0-9_/\\\\-]*{key}[a-zA-Z0-9_/\\\\-]*" - # Handle key-value pairs in plain text - pattern = f"{key}\\s*=\\s*[^\\s,}}]+" - msg = re.sub(pattern, f"{key}=REDACTED", msg) + # Handle JSON-like strings with double quotes + json_pattern = f'"({key_pattern})":\\s*"[^"]*"' + msg = re.sub(json_pattern, r'"\1": "REDACTED"', msg, flags=re.IGNORECASE) - # Handle dictionary-like strings - pattern = f"'{key}':\\s*'[^']*'" - msg = re.sub(pattern, f"'{key}': 'REDACTED'", msg) + # Handle dictionary-like strings with single quotes + dict_pattern = f"'({key_pattern})':\\s*'[^']*'" + msg = re.sub(dict_pattern, r"'\1': 'REDACTED'", msg, flags=re.IGNORECASE) - pattern = f"\"{key}\":\\s*'[^']*'" - msg = re.sub(pattern, f"\"{key}\": 'REDACTED'", msg) + # Handle mixed quote styles + mixed_pattern = f"\"({key_pattern})\":\\s*'[^']*'" + msg = re.sub(mixed_pattern, r'"\1": \'REDACTED\'', msg, flags=re.IGNORECASE) + + # Handle key-value pairs in plain text + # Convert snake_case and special characters to flexible matching + display_key = key.replace("_", "[-_ ]") + # Match both original and display versions of the key, preserving the separator and spacing + plain_pattern = ( + f"\\b({key_pattern}|{display_key})\\s*([:=])\\s*[^,\\s][^,]*" + ) + msg = re.sub( + plain_pattern, + lambda m: f"{m.group(1)}{m.group(2)}{' ' if m.group(2) == ':' else ''}REDACTED", + msg, + flags=re.IGNORECASE, + ) + + # Handle mixed quotes without escaping + msg = msg.replace('\\"', '"').replace("\\'", "'") # Set the message and clear args since we've already formatted it record.msg = msg diff --git a/tests/litellm/test_logging.py b/tests/litellm/test_logging.py index 5e8eb1d10d..d151699d8e 100644 --- a/tests/litellm/test_logging.py +++ b/tests/litellm/test_logging.py @@ -97,3 +97,100 @@ def test_sensitive_data_filter_with_different_formats(): assert ( record.msg == test_case["expected"] ), f"Failed for input: {test_case['input']}" + + +def test_sensitive_data_filter_with_special_characters(): + # Create a test logger + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + + # Create a filter + sensitive_filter = SensitiveDataFilter() + + # Test cases with special characters in keys + test_cases = [ + { + "input": '{"api_key": "sk-1234567890"}', + "expected": '{"api_key": "REDACTED"}', + }, + { + "input": '{"api-key": "sk-1234567890"}', + "expected": '{"api-key": "REDACTED"}', + }, + { + "input": '{"api/key": "sk-1234567890"}', + "expected": '{"api/key": "REDACTED"}', + }, + { + "input": '{"api\\key": "sk-1234567890"}', + "expected": '{"api\\key": "REDACTED"}', + }, + ] + + for test_case in test_cases: + # Create a log record + record = logging.LogRecord( + name="test_logger", + level=logging.INFO, + pathname="test.py", + lineno=1, + msg=test_case["input"], + args=(), + exc_info=None, + ) + + # Apply the filter + sensitive_filter.filter(record) + + # Verify the output + assert ( + record.msg == test_case["expected"] + ), f"Failed for input: {test_case['input']}" + + +def test_sensitive_data_filter_with_format_strings(): + # Create a test logger + logger = logging.getLogger("test_logger") + logger.setLevel(logging.INFO) + + # Create a filter + sensitive_filter = SensitiveDataFilter() + + # Test cases with format strings + test_cases = [ + { + "input": "API key: %s", + "args": ("sk-1234567890",), + "expected": "API key: REDACTED", + }, + { + "input": "Credentials: %s, Token: %s", + "args": ("secret123", "abc123"), + "expected": "Credentials: REDACTED, Token: REDACTED", + }, + { + "input": "API base: %s, Key: %s", + "args": ("https://api.example.com", "sk-1234567890"), + "expected": "API base: REDACTED, Key: REDACTED", + }, + ] + + for test_case in test_cases: + # Create a log record + record = logging.LogRecord( + name="test_logger", + level=logging.INFO, + pathname="test.py", + lineno=1, + msg=test_case["input"], + args=test_case["args"], + exc_info=None, + ) + + # Apply the filter + sensitive_filter.filter(record) + + # Verify the output + assert ( + record.msg == test_case["expected"] + ), f"Failed for input: {test_case['input']} with args: {test_case['args']}" From b85fd5e4b0b73920a704df66fff1d52fd09f7a1a Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Sat, 19 Apr 2025 10:06:51 -0700 Subject: [PATCH 6/6] fix(_logging.py): add reliability handling for sensitive filter --- litellm/_logging.py | 136 +++++++++++++++++++++++----------- tests/litellm/test_logging.py | 24 ++++++ 2 files changed, 115 insertions(+), 45 deletions(-) diff --git a/litellm/_logging.py b/litellm/_logging.py index 8572d28f36..5aa9aeff27 100644 --- a/litellm/_logging.py +++ b/litellm/_logging.py @@ -57,53 +57,66 @@ class SensitiveDataFilter(logging.Filter): ] def filter(self, record): - if not hasattr(record, "msg") or not record.msg: + try: + if not hasattr(record, "msg") or not record.msg: + return True + + # If the message is a format string with args, we need to format it first + if record.args: + msg = record.msg % record.args + else: + msg = str(record.msg) + + # Redact sensitive information + for key in self.SENSITIVE_KEYS: + # Create patterns for compound keys (e.g., openai_api_key) + key_pattern = f"[a-zA-Z0-9_/\\\\-]*{key}[a-zA-Z0-9_/\\\\-]*" + + # Handle JSON-like strings with double quotes + json_pattern = f'"({key_pattern})":\\s*"[^"]*"' + msg = re.sub( + json_pattern, r'"\1": "REDACTED"', msg, flags=re.IGNORECASE + ) + + # Handle dictionary-like strings with single quotes + dict_pattern = f"'({key_pattern})':\\s*'[^']*'" + msg = re.sub( + dict_pattern, r"'\1': 'REDACTED'", msg, flags=re.IGNORECASE + ) + + # Handle mixed quote styles + mixed_pattern = f"\"({key_pattern})\":\\s*'[^']*'" + msg = re.sub( + mixed_pattern, r'"\1": \'REDACTED\'', msg, flags=re.IGNORECASE + ) + + # Handle key-value pairs in plain text + # Convert snake_case and special characters to flexible matching + display_key = key.replace("_", "[-_ ]") + # Match both original and display versions of the key, preserving the separator and spacing + plain_pattern = ( + f"\\b({key_pattern}|{display_key})\\s*([:=])\\s*[^,\\s][^,]*" + ) + msg = re.sub( + plain_pattern, + lambda m: f"{m.group(1)}{m.group(2)}{' ' if m.group(2) == ':' else ''}REDACTED", + msg, + flags=re.IGNORECASE, + ) + + # Handle mixed quotes without escaping + msg = msg.replace('\\"', '"').replace("\\'", "'") + + # Set the message and clear args since we've already formatted it + record.msg = msg + record.args = None return True - - # If the message is a format string with args, we need to format it first - if record.args: - msg = record.msg % record.args - else: - msg = str(record.msg) - - # Redact sensitive information - for key in self.SENSITIVE_KEYS: - # Create patterns for compound keys (e.g., openai_api_key) - key_pattern = f"[a-zA-Z0-9_/\\\\-]*{key}[a-zA-Z0-9_/\\\\-]*" - - # Handle JSON-like strings with double quotes - json_pattern = f'"({key_pattern})":\\s*"[^"]*"' - msg = re.sub(json_pattern, r'"\1": "REDACTED"', msg, flags=re.IGNORECASE) - - # Handle dictionary-like strings with single quotes - dict_pattern = f"'({key_pattern})':\\s*'[^']*'" - msg = re.sub(dict_pattern, r"'\1': 'REDACTED'", msg, flags=re.IGNORECASE) - - # Handle mixed quote styles - mixed_pattern = f"\"({key_pattern})\":\\s*'[^']*'" - msg = re.sub(mixed_pattern, r'"\1": \'REDACTED\'', msg, flags=re.IGNORECASE) - - # Handle key-value pairs in plain text - # Convert snake_case and special characters to flexible matching - display_key = key.replace("_", "[-_ ]") - # Match both original and display versions of the key, preserving the separator and spacing - plain_pattern = ( - f"\\b({key_pattern}|{display_key})\\s*([:=])\\s*[^,\\s][^,]*" + except Exception as e: + # If any error occurs during filtering, log the error and continue + logging.getLogger("litellm").error( + f"Error in SensitiveDataFilter: {str(e)}" ) - msg = re.sub( - plain_pattern, - lambda m: f"{m.group(1)}{m.group(2)}{' ' if m.group(2) == ':' else ''}REDACTED", - msg, - flags=re.IGNORECASE, - ) - - # Handle mixed quotes without escaping - msg = msg.replace('\\"', '"').replace("\\'", "'") - - # Set the message and clear args since we've already formatted it - record.msg = msg - record.args = None - return True + return True # Function to set up exception handlers for JSON logging @@ -236,3 +249,36 @@ def _is_debugging_on() -> bool: if verbose_logger.isEnabledFor(logging.DEBUG) or set_verbose is True: return True return False + + +class ResilientLogger(logging.Logger): + """A logger that continues to work even if filters fail""" + + def handle(self, record): + """ + Handle a record by passing it to all handlers. + If a filter fails, log the error and continue. + """ + if self.disabled: + return + + # Try to filter the record + try: + if not self.filter(record): + return + except Exception as e: + # If filter fails, log the error and continue + logging.getLogger("litellm").error(f"Filter failed: {str(e)}") + + # If we get here, either filtering passed or failed gracefully + # Now pass to handlers + for handler in self.handlers: + try: + if handler.filter(record): + handler.handle(record) + except Exception as e: + logging.getLogger("litellm").error(f"Handler failed: {str(e)}") + + +# Replace the default logger class with our resilient one +logging.setLoggerClass(ResilientLogger) diff --git a/tests/litellm/test_logging.py b/tests/litellm/test_logging.py index d151699d8e..6d29941389 100644 --- a/tests/litellm/test_logging.py +++ b/tests/litellm/test_logging.py @@ -194,3 +194,27 @@ def test_sensitive_data_filter_with_format_strings(): assert ( record.msg == test_case["expected"] ), f"Failed for input: {test_case['input']} with args: {test_case['args']}" + + +def test_sensitive_data_filter_reliability(): + # Create a test logger + logger = logging.getLogger("test_logger") + logger.setLevel(logging.DEBUG) + + # Create a SensitiveDataFilter and break its regex pattern to cause failure + sensitive_filter = SensitiveDataFilter() + sensitive_filter.SENSITIVE_KEYS = [ + ")" + ] # Invalid regex pattern that will cause failure + + # Add the filter + logger.addFilter(sensitive_filter) + + # Try to log a message - this should not raise an exception + try: + logger.debug("Test message with sensitive data: api_key=sk-1234567890") + except Exception as e: + pytest.fail(f"Logging failed with exception: {str(e)}") + + # Clean up + logger.removeFilter(sensitive_filter)