(Refactor / QA) - Use LoggingCallbackManager to append callbacks and ensure no duplicate callbacks are added (#8112)

* LoggingCallbackManager

* add logging_callback_manager

* use logging_callback_manager

* add add_litellm_failure_callback

* use add_litellm_callback

* use add_litellm_async_success_callback

* add_litellm_async_failure_callback

* linting fix

* fix logging callback manager

* test_duplicate_multiple_loggers_test

* use _reset_all_callbacks

* fix testing with dup callbacks

* test_basic_image_generation

* reset callbacks for tests

* fix check for _add_custom_logger_to_list

* fix test_amazing_sync_embedding

* fix _get_custom_logger_key

* fix batches testing

* fix _reset_all_callbacks

* fix _check_callback_list_size

* add callback_manager_test

* fix test gemini-2.0-flash-thinking-exp-01-21
This commit is contained in:
Ishaan Jaff 2025-01-30 19:35:50 -08:00 committed by GitHub
parent 11c8d07ed3
commit fa1c42378f
19 changed files with 607 additions and 59 deletions

View file

@ -323,8 +323,8 @@ class ProxyLogging:
# NOTE: ENSURE we only add callbacks when alerting is on
# We should NOT add callbacks when alerting is off
if "daily_reports" in self.alert_types:
litellm.callbacks.append(self.slack_alerting_instance) # type: ignore
litellm.success_callback.append(
litellm.logging_callback_manager.add_litellm_callback(self.slack_alerting_instance) # type: ignore
litellm.logging_callback_manager.add_litellm_success_callback(
self.slack_alerting_instance.response_taking_too_long_callback
)
@ -332,10 +332,10 @@ class ProxyLogging:
self.internal_usage_cache.dual_cache.redis_cache = redis_cache
def _init_litellm_callbacks(self, llm_router: Optional[Router] = None):
litellm.callbacks.append(self.max_parallel_request_limiter) # type: ignore
litellm.callbacks.append(self.max_budget_limiter) # type: ignore
litellm.callbacks.append(self.cache_control_check) # type: ignore
litellm.callbacks.append(self.service_logging_obj) # type: ignore
litellm.logging_callback_manager.add_litellm_callback(self.max_parallel_request_limiter) # type: ignore
litellm.logging_callback_manager.add_litellm_callback(self.max_budget_limiter) # type: ignore
litellm.logging_callback_manager.add_litellm_callback(self.cache_control_check) # type: ignore
litellm.logging_callback_manager.add_litellm_callback(self.service_logging_obj) # type: ignore
for callback in litellm.callbacks:
if isinstance(callback, str):
callback = litellm.litellm_core_utils.litellm_logging._init_custom_logger_compatible_class( # type: ignore
@ -348,13 +348,13 @@ class ProxyLogging:
if callback not in litellm.input_callback:
litellm.input_callback.append(callback) # type: ignore
if callback not in litellm.success_callback:
litellm.success_callback.append(callback) # type: ignore
litellm.logging_callback_manager.add_litellm_success_callback(callback) # type: ignore
if callback not in litellm.failure_callback:
litellm.failure_callback.append(callback) # type: ignore
litellm.logging_callback_manager.add_litellm_failure_callback(callback) # type: ignore
if callback not in litellm._async_success_callback:
litellm._async_success_callback.append(callback) # type: ignore
litellm.logging_callback_manager.add_litellm_async_success_callback(callback) # type: ignore
if callback not in litellm._async_failure_callback:
litellm._async_failure_callback.append(callback) # type: ignore
litellm.logging_callback_manager.add_litellm_async_failure_callback(callback) # type: ignore
if callback not in litellm.service_callback:
litellm.service_callback.append(callback) # type: ignore