From 3ccdb42d26713670912d8d1eeca3d56260d648b9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 24 Sep 2024 18:29:52 -0700 Subject: [PATCH] [Fix] OTEL - Don't log messages when callback settings disable message logging (#5875) * fix otel dont log messages * otel fix redis failure hook logging --- litellm/_service_logger.py | 31 +++++++----- litellm/integrations/opentelemetry.py | 72 +++++++++++++++------------ 2 files changed, 60 insertions(+), 43 deletions(-) diff --git a/litellm/_service_logger.py b/litellm/_service_logger.py index 6412ec801..af191eaa0 100644 --- a/litellm/_service_logger.py +++ b/litellm/_service_logger.py @@ -196,20 +196,27 @@ class ServiceLogging(CustomLogger): end_time=end_time, event_metadata=event_metadata, ) + elif callback == "otel": + from litellm.integrations.opentelemetry import OpenTelemetry + from litellm.proxy.proxy_server import open_telemetry_logger - from litellm.proxy.proxy_server import open_telemetry_logger + await self.init_otel_logger_if_none() - if not isinstance(error, str): - error = str(error) - if open_telemetry_logger is not None: - await self.otel_logger.async_service_failure_hook( - payload=payload, - parent_otel_span=parent_otel_span, - start_time=start_time, - end_time=end_time, - event_metadata=event_metadata, - error=error, - ) + if not isinstance(error, str): + error = str(error) + + if ( + parent_otel_span is not None + and open_telemetry_logger is not None + and isinstance(open_telemetry_logger, OpenTelemetry) + ): + await self.otel_logger.async_service_success_hook( + payload=payload, + parent_otel_span=parent_otel_span, + start_time=start_time, + end_time=end_time, + event_metadata=event_metadata, + ) async def async_post_call_failure_hook( self, diff --git a/litellm/integrations/opentelemetry.py b/litellm/integrations/opentelemetry.py index 27bd25d76..877fccd1e 100644 --- a/litellm/integrations/opentelemetry.py +++ b/litellm/integrations/opentelemetry.py @@ -409,13 +409,51 @@ class OpenTelemetry(CustomLogger): str(optional_params.get("stream", False)), ) + if optional_params.get("user"): + span.set_attribute(SpanAttributes.LLM_USER, optional_params.get("user")) + + # The unique identifier for the completion. + if response_obj.get("id"): + span.set_attribute("gen_ai.response.id", response_obj.get("id")) + + # The model used to generate the response. + if response_obj.get("model"): + span.set_attribute( + SpanAttributes.LLM_RESPONSE_MODEL, response_obj.get("model") + ) + + usage = response_obj.get("usage") + if usage: + span.set_attribute( + SpanAttributes.LLM_USAGE_TOTAL_TOKENS, + usage.get("total_tokens"), + ) + + # The number of tokens used in the LLM response (completion). + span.set_attribute( + SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, + usage.get("completion_tokens"), + ) + + # The number of tokens used in the LLM prompt. + span.set_attribute( + SpanAttributes.LLM_USAGE_PROMPT_TOKENS, + usage.get("prompt_tokens"), + ) + + ######################################################################## + ########## LLM Request Medssages / tools / content Attributes ########### + ######################################################################### + + if litellm.turn_off_message_logging is True: + return + if self.message_logging is not True: + return + if optional_params.get("tools"): tools = optional_params["tools"] self.set_tools_attributes(span, tools) - if optional_params.get("user"): - span.set_attribute(SpanAttributes.LLM_USER, optional_params.get("user")) - if kwargs.get("messages"): for idx, prompt in enumerate(kwargs.get("messages")): if prompt.get("role"): @@ -472,34 +510,6 @@ class OpenTelemetry(CustomLogger): tool_calls[0].get("function").get("arguments"), ) - # The unique identifier for the completion. - if response_obj.get("id"): - span.set_attribute("gen_ai.response.id", response_obj.get("id")) - - # The model used to generate the response. - if response_obj.get("model"): - span.set_attribute( - SpanAttributes.LLM_RESPONSE_MODEL, response_obj.get("model") - ) - - usage = response_obj.get("usage") - if usage: - span.set_attribute( - SpanAttributes.LLM_USAGE_TOTAL_TOKENS, - usage.get("total_tokens"), - ) - - # The number of tokens used in the LLM response (completion). - span.set_attribute( - SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, - usage.get("completion_tokens"), - ) - - # The number of tokens used in the LLM prompt. - span.set_attribute( - SpanAttributes.LLM_USAGE_PROMPT_TOKENS, - usage.get("prompt_tokens"), - ) except Exception as e: verbose_logger.error( "OpenTelemetry logging error in set_attributes %s", str(e)