use callback_settings when intializing otel

This commit is contained in:
Ishaan Jaff 2024-09-09 16:05:48 -07:00
parent b36f964217
commit 7c9591881c
4 changed files with 51 additions and 4 deletions

View file

@ -17,7 +17,7 @@ def initialize_callbacks_on_proxy(
litellm_settings: dict, litellm_settings: dict,
callback_specific_params: dict = {}, callback_specific_params: dict = {},
): ):
from litellm.proxy.proxy_server import prisma_client from litellm.proxy.proxy_server import callback_settings, prisma_client
verbose_proxy_logger.debug( verbose_proxy_logger.debug(
f"{blue_color_code}initializing callbacks={value} on proxy{reset_color_code}" f"{blue_color_code}initializing callbacks={value} on proxy{reset_color_code}"
@ -34,7 +34,11 @@ def initialize_callbacks_on_proxy(
from litellm.integrations.opentelemetry import OpenTelemetry from litellm.integrations.opentelemetry import OpenTelemetry
from litellm.proxy import proxy_server from litellm.proxy import proxy_server
open_telemetry_logger = OpenTelemetry() _otel_settings = {}
if "otel" in callback_settings:
_otel_settings = callback_settings["otel"]
open_telemetry_logger = OpenTelemetry(**_otel_settings)
# Add Otel as a service callback # Add Otel as a service callback
if "otel" not in litellm.service_callback: if "otel" not in litellm.service_callback:

View file

@ -16,7 +16,11 @@ guardrails:
output_parse_pii: True output_parse_pii: True
litellm_settings: litellm_settings:
callbacks: ["prometheus"] callbacks: ["otel"]
callback_settings:
otel:
message_logging: False
general_settings: general_settings:
master_key: sk-1234 master_key: sk-1234

View file

@ -478,6 +478,7 @@ experimental = False
llm_router: Optional[litellm.Router] = None llm_router: Optional[litellm.Router] = None
llm_model_list: Optional[list] = None llm_model_list: Optional[list] = None
general_settings: dict = {} general_settings: dict = {}
callback_settings: dict = {}
log_file = "api_log.json" log_file = "api_log.json"
worker_config = None worker_config = None
master_key = None master_key = None
@ -1491,7 +1492,7 @@ class ProxyConfig:
""" """
Load config values into proxy global state Load config values into proxy global state
""" """
global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, user_custom_key_generate, use_background_health_checks, health_check_interval, use_queue, custom_db_client, proxy_budget_rescheduler_max_time, proxy_budget_rescheduler_min_time, ui_access_mode, litellm_master_key_hash, proxy_batch_write_at, disable_spend_logs, prompt_injection_detection_obj, redis_usage_cache, store_model_in_db, premium_user, open_telemetry_logger, health_check_details global master_key, user_config_file_path, otel_logging, user_custom_auth, user_custom_auth_path, user_custom_key_generate, use_background_health_checks, health_check_interval, use_queue, custom_db_client, proxy_budget_rescheduler_max_time, proxy_budget_rescheduler_min_time, ui_access_mode, litellm_master_key_hash, proxy_batch_write_at, disable_spend_logs, prompt_injection_detection_obj, redis_usage_cache, store_model_in_db, premium_user, open_telemetry_logger, health_check_details, callback_settings
# Load existing config # Load existing config
if os.environ.get("LITELLM_CONFIG_BUCKET_NAME") is not None: if os.environ.get("LITELLM_CONFIG_BUCKET_NAME") is not None:
@ -1533,6 +1534,9 @@ class ProxyConfig:
_license_check.license_str = os.getenv("LITELLM_LICENSE", None) _license_check.license_str = os.getenv("LITELLM_LICENSE", None)
premium_user = _license_check.is_premium() premium_user = _license_check.is_premium()
## Callback settings
callback_settings = config.get("callback_settings", None)
## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..) ## LITELLM MODULE SETTINGS (e.g. litellm.drop_params=True,..)
litellm_settings = config.get("litellm_settings", None) litellm_settings = config.get("litellm_settings", None)
if litellm_settings is None: if litellm_settings is None:

View file

@ -12,6 +12,41 @@ from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfi
verbose_logger.setLevel(logging.DEBUG) verbose_logger.setLevel(logging.DEBUG)
class TestOpenTelemetry(OpenTelemetry):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.kwargs = None
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
print("in async_log_success_event for TestOpenTelemetry kwargs=", self.kwargs)
self.kwargs = kwargs
await super().async_log_success_event(
kwargs, response_obj, start_time, end_time
)
@pytest.mark.asyncio
async def test_otel_with_message_logging_off():
from litellm.integrations.opentelemetry import OpenTelemetry
otel_logger = TestOpenTelemetry(
message_logging=False, config=OpenTelemetryConfig(exporter="console")
)
litellm.callbacks = [otel_logger]
response = await litellm.acompletion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "hi"}],
mock_response="hi",
)
print("response", response)
assert otel_logger.kwargs["messages"] == [
{"role": "user", "content": "redacted-by-litellm"}
]
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.skip(reason="Local only test. WIP.") @pytest.mark.skip(reason="Local only test. WIP.")
async def test_async_otel_callback(): async def test_async_otel_callback():