diff --git a/litellm/integrations/braintrust_logging.py b/litellm/integrations/braintrust_logging.py index 5128fcc49..b08cea54e 100644 --- a/litellm/integrations/braintrust_logging.py +++ b/litellm/integrations/braintrust_logging.py @@ -17,10 +17,17 @@ from pydantic import BaseModel import litellm from litellm import verbose_logger from litellm.integrations.custom_logger import CustomLogger -from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + HTTPHandler, + get_async_httpx_client, + httpxSpecialProvider, +) from litellm.utils import get_formatted_prompt -global_braintrust_http_handler = AsyncHTTPHandler() +global_braintrust_http_handler = get_async_httpx_client( + llm_provider=httpxSpecialProvider.LoggingCallback +) global_braintrust_sync_http_handler = HTTPHandler() API_BASE = "https://api.braintrustdata.com/v1" diff --git a/litellm/proxy/guardrails/guardrail_hooks/aporia_ai.py b/litellm/proxy/guardrails/guardrail_hooks/aporia_ai.py index c16c0543d..23ac33927 100644 --- a/litellm/proxy/guardrails/guardrail_hooks/aporia_ai.py +++ b/litellm/proxy/guardrails/guardrail_hooks/aporia_ai.py @@ -30,7 +30,11 @@ from litellm.integrations.custom_guardrail import CustomGuardrail from litellm.litellm_core_utils.logging_utils import ( convert_litellm_response_object_to_str, ) -from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + get_async_httpx_client, + httpxSpecialProvider, +) from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.guardrails.guardrail_helpers import should_proceed_based_on_metadata from litellm.types.guardrails import GuardrailEventHooks @@ -44,8 +48,8 @@ class AporiaGuardrail(CustomGuardrail): def __init__( self, api_key: Optional[str] = None, api_base: Optional[str] = None, **kwargs ): - self.async_handler = AsyncHTTPHandler( - timeout=httpx.Timeout(timeout=600.0, connect=5.0) + self.async_handler = get_async_httpx_client( + llm_provider=httpxSpecialProvider.GuardrailCallback ) self.aporia_api_key = api_key or os.environ["APORIO_API_KEY"] self.aporia_api_base = api_base or os.environ["APORIO_API_BASE"] diff --git a/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py b/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py index 5aebbcf3e..99b56daa8 100644 --- a/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py +++ b/litellm/proxy/guardrails/guardrail_hooks/lakera_ai.py @@ -21,7 +21,11 @@ from fastapi import HTTPException import litellm from litellm._logging import verbose_proxy_logger from litellm.integrations.custom_guardrail import CustomGuardrail -from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler +from litellm.llms.custom_httpx.http_handler import ( + AsyncHTTPHandler, + get_async_httpx_client, + httpxSpecialProvider, +) from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.guardrails.guardrail_helpers import should_proceed_based_on_metadata from litellm.secret_managers.main import get_secret @@ -50,8 +54,8 @@ class lakeraAI_Moderation(CustomGuardrail): api_key: Optional[str] = None, **kwargs, ): - self.async_handler = AsyncHTTPHandler( - timeout=httpx.Timeout(timeout=600.0, connect=5.0) + self.async_handler = get_async_httpx_client( + llm_provider=httpxSpecialProvider.GuardrailCallback ) self.lakera_api_key = api_key or os.environ["LAKERA_API_KEY"] self.moderation_check = moderation_check