re-use base_llm_http_handler

This commit is contained in:
Ishaan Jaff 2025-03-12 09:31:34 -07:00
parent 047879c004
commit aa250088b2

View file

@ -9,10 +9,7 @@ import litellm
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
from litellm.responses.utils import ( from litellm.responses.utils import get_optional_params_responses_api
ResponsesAPIRequestParams,
get_optional_params_responses_api,
)
from litellm.types.llms.openai import ( from litellm.types.llms.openai import (
Reasoning, Reasoning,
ResponseIncludable, ResponseIncludable,
@ -26,10 +23,7 @@ from litellm.types.llms.openai import (
from litellm.types.router import GenericLiteLLMParams from litellm.types.router import GenericLiteLLMParams
from litellm.utils import ProviderConfigManager, client from litellm.utils import ProviderConfigManager, client
from .streaming_iterator import ( from .streaming_iterator import BaseResponsesAPIStreamingIterator
BaseResponsesAPIStreamingIterator,
ResponsesAPIStreamingIterator,
)
####### ENVIRONMENT VARIABLES ################### ####### ENVIRONMENT VARIABLES ###################
# Initialize any necessary instances or variables here # Initialize any necessary instances or variables here
@ -218,11 +212,8 @@ def responses(
custom_llm_provider=custom_llm_provider, custom_llm_provider=custom_llm_provider,
) )
# Get an instance of BaseLLMHTTPHandler
base_llm_http_handler_instance = BaseLLMHTTPHandler()
# Call the handler with _is_async flag instead of directly calling the async handler # Call the handler with _is_async flag instead of directly calling the async handler
response = base_llm_http_handler_instance.response_api_handler( response = base_llm_http_handler.response_api_handler(
model=model, model=model,
input=input, input=input,
responses_api_provider_config=responses_api_provider_config, responses_api_provider_config=responses_api_provider_config,