diff --git a/litellm/__init__.py b/litellm/__init__.py index 165113a595..fcef6bc56f 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -50,10 +50,10 @@ if set_verbose == True: _turn_on_debug() ############################################### ### Callbacks /Logging / Success / Failure Handlers ##### -input_callback: List[Union[str, Callable]] = [] -success_callback: List[Union[str, Callable]] = [] -failure_callback: List[Union[str, Callable]] = [] -service_callback: List[Union[str, Callable]] = [] +input_callback: List[Union[str, Callable, CustomLogger]] = [] +success_callback: List[Union[str, Callable, CustomLogger]] = [] +failure_callback: List[Union[str, Callable, CustomLogger]] = [] +service_callback: List[Union[str, Callable, CustomLogger]] = [] _custom_logger_compatible_callbacks_literal = Literal[ "lago", "openmeter", @@ -90,13 +90,13 @@ langsmith_batch_size: Optional[int] = None argilla_batch_size: Optional[int] = None datadog_use_v1: Optional[bool] = False # if you want to use v1 datadog logged payload argilla_transformation_object: Optional[Dict[str, Any]] = None -_async_input_callback: List[Callable] = ( +_async_input_callback: List[Union[str, Callable, CustomLogger]] = ( [] ) # internal variable - async custom callbacks are routed here. -_async_success_callback: List[Union[str, Callable]] = ( +_async_success_callback: List[Union[str, Callable, CustomLogger]] = ( [] ) # internal variable - async custom callbacks are routed here. -_async_failure_callback: List[Callable] = ( +_async_failure_callback: List[Union[str, Callable, CustomLogger]] = ( [] ) # internal variable - async custom callbacks are routed here. pre_call_rules: List[Callable] = [] diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 1722a4d796..9933ff64b7 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -10,3 +10,6 @@ model_list: api_base: http://0.0.0.0:8090 timeout: 2 num_retries: 0 + +litellm_settings: + success_callback: ["langfuse"] \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 37d721fc29..f3fec6c2a0 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -318,9 +318,61 @@ def custom_llm_setup(): litellm._custom_providers.append(custom_llm["provider"]) +def _add_custom_logger_callback_to_specific_event( + callback: str, logging_event: Literal["success", "failure"] +) -> None: + """ + Add a custom logger callback to the specific event + """ + from litellm import _custom_logger_compatible_callbacks_literal + from litellm.litellm_core_utils.litellm_logging import ( + _init_custom_logger_compatible_class, + ) + + if callback not in litellm._known_custom_logger_compatible_callbacks: + verbose_logger.debug( + f"Callback {callback} is not a valid custom logger compatible callback. Known list - {litellm._known_custom_logger_compatible_callbacks}" + ) + return + + callback_class = _init_custom_logger_compatible_class( + cast(_custom_logger_compatible_callbacks_literal, callback), + internal_usage_cache=None, + llm_router=None, + ) + + # don't double add a callback + if callback_class is not None and not any( + isinstance(cb, type(callback_class)) for cb in litellm.callbacks # type: ignore + ): + if logging_event == "success": + litellm.success_callback.append(callback_class) + litellm._async_success_callback.append(callback_class) + if callback in litellm.success_callback: + litellm.success_callback.remove( + callback + ) # remove the string from the callback list + if callback in litellm._async_success_callback: + litellm._async_success_callback.remove( + callback + ) # remove the string from the callback list + elif logging_event == "failure": + litellm.failure_callback.append(callback_class) + litellm._async_failure_callback.append(callback_class) + if callback in litellm.failure_callback: + litellm.failure_callback.remove( + callback + ) # remove the string from the callback list + if callback in litellm._async_failure_callback: + litellm._async_failure_callback.remove( + callback + ) # remove the string from the callback list + + def function_setup( # noqa: PLR0915 original_function: str, rules_obj, start_time, *args, **kwargs ): # just run once to check if user wants to send their data anywhere - PostHog/Sentry/Slack/etc. + ### NOTICES ### from litellm import Logging as LiteLLMLogging from litellm.litellm_core_utils.litellm_logging import set_callbacks @@ -401,27 +453,11 @@ def function_setup( # noqa: PLR0915 # we only support async dynamo db logging for acompletion/aembedding since that's used on proxy litellm._async_success_callback.append(callback) removed_async_items.append(index) - elif callback in litellm._known_custom_logger_compatible_callbacks: - from litellm.litellm_core_utils.litellm_logging import ( - _init_custom_logger_compatible_class, - ) - - callback_class = _init_custom_logger_compatible_class( - callback, # type: ignore - internal_usage_cache=None, - llm_router=None, # type: ignore - ) - - # don't double add a callback - if callback_class is not None and not any( - isinstance(cb, type(callback_class)) for cb in litellm.callbacks - ): - litellm.callbacks.append(callback_class) # type: ignore - litellm.input_callback.append(callback_class) # type: ignore - litellm.success_callback.append(callback_class) # type: ignore - litellm.failure_callback.append(callback_class) # type: ignore - litellm._async_success_callback.append(callback_class) # type: ignore - litellm._async_failure_callback.append(callback_class) # type: ignore + elif ( + callback in litellm._known_custom_logger_compatible_callbacks + and isinstance(callback, str) + ): + _add_custom_logger_callback_to_specific_event(callback, "success") # Pop the async items from success_callback in reverse order to avoid index issues for index in reversed(removed_async_items): diff --git a/tests/local_testing/test_utils.py b/tests/local_testing/test_utils.py index fbc1f9d7d4..ea3649ca83 100644 --- a/tests/local_testing/test_utils.py +++ b/tests/local_testing/test_utils.py @@ -1496,24 +1496,34 @@ def test_get_num_retries(num_retries): ) -@pytest.mark.parametrize("filter_invalid_headers", [True, False]) -@pytest.mark.parametrize( - "custom_llm_provider, expected_result", - [("anthropic", {"anthropic-beta": "123"}), ("bedrock", {}), ("vertex_ai", {})], -) -def test_get_clean_extra_headers( - filter_invalid_headers, custom_llm_provider, expected_result, monkeypatch -): - from litellm.utils import get_clean_extra_headers +def test_add_custom_logger_callback_to_specific_event(monkeypatch): + from litellm.utils import _add_custom_logger_callback_to_specific_event - monkeypatch.setattr(litellm, "filter_invalid_headers", filter_invalid_headers) + monkeypatch.setattr(litellm, "success_callback", []) + monkeypatch.setattr(litellm, "failure_callback", []) - if filter_invalid_headers: - assert ( - get_clean_extra_headers({"anthropic-beta": "123"}, custom_llm_provider) - == expected_result - ) - else: - assert get_clean_extra_headers( - {"anthropic-beta": "123"}, custom_llm_provider - ) == {"anthropic-beta": "123"} + _add_custom_logger_callback_to_specific_event("langfuse", "success") + + assert len(litellm.success_callback) == 1 + assert len(litellm.failure_callback) == 0 + + +def test_add_custom_logger_callback_to_specific_event_e2e(monkeypatch): + + monkeypatch.setattr(litellm, "success_callback", []) + monkeypatch.setattr(litellm, "failure_callback", []) + monkeypatch.setattr(litellm, "callbacks", []) + + litellm.success_callback = ["humanloop"] + + curr_len_success_callback = len(litellm.success_callback) + curr_len_failure_callback = len(litellm.failure_callback) + + litellm.completion( + model="gpt-4o-mini", + messages=[{"role": "user", "content": "Hello, world!"}], + mock_response="Testing langfuse", + ) + + assert len(litellm.success_callback) == curr_len_success_callback + assert len(litellm.failure_callback) == curr_len_failure_callback diff --git a/tests/logging_callback_tests/test_unit_tests_init_callbacks.py b/tests/logging_callback_tests/test_unit_tests_init_callbacks.py index 69bee7d402..53ad2f7196 100644 --- a/tests/logging_callback_tests/test_unit_tests_init_callbacks.py +++ b/tests/logging_callback_tests/test_unit_tests_init_callbacks.py @@ -193,8 +193,8 @@ async def use_callback_in_llm_call( elif used_in == "success_callback": print(f"litellm.success_callback: {litellm.success_callback}") print(f"litellm._async_success_callback: {litellm._async_success_callback}") - assert isinstance(litellm.success_callback[1], expected_class) - assert len(litellm.success_callback) == 2 # ["lago", LagoLogger] + assert isinstance(litellm.success_callback[0], expected_class) + assert len(litellm.success_callback) == 1 # ["lago", LagoLogger] assert isinstance(litellm._async_success_callback[0], expected_class) assert len(litellm._async_success_callback) == 1