From 7c9591881c9981c53a53aa7afeba86a2d18c70ed Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 9 Sep 2024 16:05:48 -0700 Subject: [PATCH] use callback_settings when intializing otel --- litellm/proxy/common_utils/callback_utils.py | 8 +++-- litellm/proxy/proxy_config.yaml | 6 +++- litellm/proxy/proxy_server.py | 6 +++- litellm/tests/test_async_opentelemetry.py | 35 ++++++++++++++++++++ 4 files changed, 51 insertions(+), 4 deletions(-) diff --git a/litellm/proxy/common_utils/callback_utils.py b/litellm/proxy/common_utils/callback_utils.py index 4ccf61e234..4d0fd23030 100644 --- a/litellm/proxy/common_utils/callback_utils.py +++ b/litellm/proxy/common_utils/callback_utils.py @@ -17,7 +17,7 @@ def initialize_callbacks_on_proxy( litellm_settings: dict, callback_specific_params: dict = {}, ): - from litellm.proxy.proxy_server import prisma_client + from litellm.proxy.proxy_server import callback_settings, prisma_client verbose_proxy_logger.debug( f"{blue_color_code}initializing callbacks={value} on proxy{reset_color_code}" @@ -34,7 +34,11 @@ def initialize_callbacks_on_proxy( from litellm.integrations.opentelemetry import OpenTelemetry from litellm.proxy import proxy_server - open_telemetry_logger = OpenTelemetry() + _otel_settings = {} + if "otel" in callback_settings: + _otel_settings = callback_settings["otel"] + + open_telemetry_logger = OpenTelemetry(**_otel_settings) # Add Otel as a service callback if "otel" not in litellm.service_callback: diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index b407b0d7ad..e385a23d7e 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -16,7 +16,11 @@ guardrails: output_parse_pii: True litellm_settings: - callbacks: ["prometheus"] + callbacks: ["otel"] + +callback_settings: + otel: + message_logging: False general_settings: master_key: sk-1234 diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index b6ebbe1df8..341f2b5d62 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -478,6 +478,7 @@ experimental = False llm_router: Optional[litellm.Router] = None llm_model_list: Optional[list] = None general_settings: dict = {} +callback_settings: dict = {} log_file = "api_log.json" worker_config = None master_key = None @@ -1491,7 +1492,7 @@ class ProxyConfig: """ Load config values into proxy global state """ - global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, user_custom_key_generate, use_background_health_checks, health_check_interval, use_queue, custom_db_client, proxy_budget_rescheduler_max_time, proxy_budget_rescheduler_min_time, ui_access_mode, litellm_master_key_hash, proxy_batch_write_at, disable_spend_logs, prompt_injection_detection_obj, redis_usage_cache, store_model_in_db, premium_user, open_telemetry_logger, health_check_details + global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, user_custom_key_generate, use_background_health_checks, health_check_interval, use_queue, custom_db_client, proxy_budget_rescheduler_max_time, proxy_budget_rescheduler_min_time, ui_access_mode, litellm_master_key_hash, proxy_batch_write_at, disable_spend_logs, prompt_injection_detection_obj, redis_usage_cache, store_model_in_db, premium_user, open_telemetry_logger, health_check_details, callback_settings # Load existing config if os.environ.get("LITELLM_CONFIG_BUCKET_NAME") is not None: @@ -1533,6 +1534,9 @@ class ProxyConfig: _license_check.license_str = os.getenv("LITELLM_LICENSE", None) premium_user = _license_check.is_premium() + ## Callback settings + callback_settings = config.get("callback_settings", None) + ## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..) litellm_settings = config.get("litellm_settings", None) if litellm_settings is None: diff --git a/litellm/tests/test_async_opentelemetry.py b/litellm/tests/test_async_opentelemetry.py index aee434f2a4..1fac0bb67e 100644 --- a/litellm/tests/test_async_opentelemetry.py +++ b/litellm/tests/test_async_opentelemetry.py @@ -12,6 +12,41 @@ from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfi verbose_logger.setLevel(logging.DEBUG) +class TestOpenTelemetry(OpenTelemetry): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.kwargs = None + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print("in async_log_success_event for TestOpenTelemetry kwargs=", self.kwargs) + self.kwargs = kwargs + await super().async_log_success_event( + kwargs, response_obj, start_time, end_time + ) + + +@pytest.mark.asyncio +async def test_otel_with_message_logging_off(): + from litellm.integrations.opentelemetry import OpenTelemetry + + otel_logger = TestOpenTelemetry( + message_logging=False, config=OpenTelemetryConfig(exporter="console") + ) + + litellm.callbacks = [otel_logger] + + response = await litellm.acompletion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "hi"}], + mock_response="hi", + ) + print("response", response) + + assert otel_logger.kwargs["messages"] == [ + {"role": "user", "content": "redacted-by-litellm"} + ] + + @pytest.mark.asyncio @pytest.mark.skip(reason="Local only test. WIP.") async def test_async_otel_callback():