diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index a6b64c5f3..a6ad8eb65 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -688,6 +688,7 @@ async def _PROXY_track_cost_callback( start_time=None, end_time=None, # start/end time for completion ): + verbose_proxy_logger.debug(f"INSIDE _PROXY_track_cost_callback") global prisma_client, custom_db_client try: # check if it has collected an entire stream response diff --git a/litellm/utils.py b/litellm/utils.py index e5bdcacdb..90616ab3c 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1418,9 +1418,6 @@ class Logging: """ Implementing async callbacks, to handle asyncio event loop issues when custom integrations need to use async functions. """ - verbose_logger.debug( - f"Async success callbacks: {litellm._async_success_callback}" - ) start_time, end_time, result = self._success_handler_helper_fn( start_time=start_time, end_time=end_time, result=result, cache_hit=cache_hit ) @@ -1479,6 +1476,7 @@ class Logging: callbacks.append(callback) else: callbacks = litellm._async_success_callback + verbose_logger.debug(f"Async success callbacks: {callbacks}") for callback in callbacks: try: if callback == "cache" and litellm.cache is not None: @@ -1525,6 +1523,7 @@ class Logging: end_time=end_time, ) if callable(callback): # custom logger functions + print_verbose(f"Making async function logging call") if self.stream: if "complete_streaming_response" in self.model_call_details: await customLogger.async_log_event(