diff --git a/litellm/__init__.py b/litellm/__init__.py index 645a0bccdf..7dcc934a68 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -38,7 +38,7 @@ success_callback: List[Union[str, Callable]] = [] failure_callback: List[Union[str, Callable]] = [] service_callback: List[Union[str, Callable]] = [] _custom_logger_compatible_callbacks_literal = Literal[ - "lago", "openmeter", "logfire", "dynamic_rate_limiter" + "lago", "openmeter", "logfire", "dynamic_rate_limiter", "langsmith", "galileo" ] callbacks: List[Union[Callable, _custom_logger_compatible_callbacks_literal]] = [] _langfuse_default_tags: Optional[ diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 3fde07815e..001c69d35a 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -39,7 +39,6 @@ from litellm.utils import ( add_breadcrumb, capture_exception, customLogger, - langsmithLogger, liteDebuggerClient, logfireLogger, lunaryLogger, @@ -89,7 +88,6 @@ alerts_channel = None heliconeLogger = None athinaLogger = None promptLayerLogger = None -langsmithLogger = None logfireLogger = None weightsBiasesLogger = None customLogger = None @@ -136,7 +134,7 @@ in_memory_trace_id_cache = ServiceTraceIDCache() class Logging: - global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, logfireLogger, capture_exception, add_breadcrumb, lunaryLogger, logfireLogger, prometheusLogger, slack_app + global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, logfireLogger, capture_exception, add_breadcrumb, lunaryLogger, logfireLogger, prometheusLogger, slack_app custom_pricing: bool = False stream_options = None @@ -738,23 +736,6 @@ class Logging: end_time=end_time, print_verbose=print_verbose, ) - if callback == "langsmith": - print_verbose("reaches langsmith for logging!") - if self.stream: - if "complete_streaming_response" not in kwargs: - continue - else: - print_verbose( - "reaches langsmith for streaming logging!" - ) - result = kwargs["complete_streaming_response"] - langsmithLogger.log_event( - kwargs=self.model_call_details, - response_obj=result, - start_time=start_time, - end_time=end_time, - print_verbose=print_verbose, - ) if callback == "logfire": global logfireLogger verbose_logger.debug("reaches logfire for success logging!") @@ -1822,7 +1803,7 @@ def set_callbacks(callback_list, function_id=None): """ Globally sets the callback client """ - global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, athinaLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, lunaryLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger, logfireLogger, dynamoLogger, s3Logger, dataDogLogger, prometheusLogger, greenscaleLogger, openMeterLogger + global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, athinaLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, lunaryLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, logfireLogger, dynamoLogger, s3Logger, dataDogLogger, prometheusLogger, greenscaleLogger, openMeterLogger try: for callback in callback_list: @@ -1903,8 +1884,6 @@ def set_callbacks(callback_list, function_id=None): s3Logger = S3Logger() elif callback == "wandb": weightsBiasesLogger = WeightsBiasesLogger() - elif callback == "langsmith": - langsmithLogger = LangsmithLogger() elif callback == "logfire": logfireLogger = LogfireLogger() elif callback == "aispend": @@ -1957,6 +1936,15 @@ def _init_custom_logger_compatible_class( _in_memory_loggers.append(_openmeter_logger) return _openmeter_logger # type: ignore + elif logging_integration == "langsmith": + for callback in _in_memory_loggers: + if isinstance(callback, LangsmithLogger): + return callback # type: ignore + + _langsmith_logger = LangsmithLogger() + _in_memory_loggers.append(_langsmith_logger) + return _langsmith_logger # type: ignore + elif logging_integration == "galileo": for callback in _in_memory_loggers: if isinstance(callback, GalileoObserve): @@ -2025,6 +2013,10 @@ def get_custom_logger_compatible_class( for callback in _in_memory_loggers: if isinstance(callback, GalileoObserve): return callback + elif logging_integration == "langsmith": + for callback in _in_memory_loggers: + if isinstance(callback, LangsmithLogger): + return callback elif logging_integration == "logfire": if "LOGFIRE_TOKEN" not in os.environ: raise ValueError("LOGFIRE_TOKEN not found in environment variables") diff --git a/litellm/utils.py b/litellm/utils.py index 48fdf80c59..5ee34197d7 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -417,6 +417,16 @@ def function_setup( # we only support async dynamo db logging for acompletion/aembedding since that's used on proxy litellm._async_success_callback.append(callback) removed_async_items.append(index) + elif callback == "langsmith": + callback_class = litellm.litellm_core_utils.litellm_logging._init_custom_logger_compatible_class( # type: ignore + callback, internal_usage_cache=None, llm_router=None + ) + + # don't double add a callback + if not any( + isinstance(cb, type(callback_class)) for cb in litellm.callbacks + ): + litellm.callbacks.append(callback_class) # type: ignore # Pop the async items from success_callback in reverse order to avoid index issues for index in reversed(removed_async_items):