get_optional_params_responses_api

This commit is contained in:
Ishaan Jaff 2025-03-11 16:00:49 -07:00
parent 4d55212c62
commit 2c6774e3ee
3 changed files with 119 additions and 0 deletions

View file

@ -11,12 +11,14 @@ import litellm.types.utils
from litellm.llms.base_llm.chat.transformation import BaseConfig
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
from litellm.llms.base_llm.rerank.transformation import BaseRerankConfig
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.llms.custom_httpx.http_handler import (
AsyncHTTPHandler,
HTTPHandler,
_get_httpx_client,
get_async_httpx_client,
)
from litellm.types.llms.openai import ResponseInputParam, ResponsesAPIRequestParams
from litellm.types.rerank import OptionalRerankParams, RerankResponse
from litellm.types.utils import EmbeddingResponse, FileTypes, TranscriptionResponse
from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager
@ -952,6 +954,15 @@ class BaseLLMHTTPHandler:
return returned_response
return model_response
async def async_response_api_handler(
self,
model: str,
input: Union[str, ResponseInputParam],
responses_api_provider_config: BaseResponsesAPIConfig,
responses_api_request_params: ResponsesAPIRequestParams,
) -> Any:
pass
def _handle_error(
self, e: Exception, provider_config: Union[BaseConfig, BaseRerankConfig]
):

View file

@ -2,6 +2,13 @@ from typing import Any, Dict, Iterable, List, Literal, Optional, Union
import httpx
import litellm
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
from litellm.responses.utils import (
ResponsesAPIRequestParams,
get_optional_params_responses_api,
)
from litellm.types.llms.openai import (
Reasoning,
ResponseIncludable,
@ -10,6 +17,13 @@ from litellm.types.llms.openai import (
ToolChoice,
ToolParam,
)
from litellm.types.router import GenericLiteLLMParams
from litellm.utils import ProviderConfigManager
####### ENVIRONMENT VARIABLES ###################
# Initialize any necessary instances or variables here
base_llm_http_handler = BaseLLMHTTPHandler()
#################################################
async def aresponses(
@ -37,7 +51,55 @@ async def aresponses(
extra_query: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
**kwargs,
):
# get llm provider logic
litellm_params = GenericLiteLLMParams(**kwargs)
model, custom_llm_provider, dynamic_api_key, dynamic_api_base = (
litellm.get_llm_provider(
model=model,
custom_llm_provider=kwargs.get("custom_llm_provider", None),
api_base=litellm_params.api_base,
api_key=litellm_params.api_key,
)
)
# get provider config
responses_api_provider_config: Optional[BaseResponsesAPIConfig] = (
ProviderConfigManager.get_provider_responses_api_config(
model=model,
provider=litellm.LlmProviders(custom_llm_provider),
)
)
if responses_api_provider_config is None:
raise litellm.BadRequestError(
model=model,
llm_provider=custom_llm_provider,
message=f"Responses API not available for custom_llm_provider={custom_llm_provider}, model: {model}",
)
# Get all parameters using locals() and combine with kwargs
all_params = {**locals(), **kwargs}
# Get optional parameters for the responses API
responses_api_request_params: ResponsesAPIRequestParams = (
get_optional_params_responses_api(
model=model,
responses_api_provider_config=responses_api_provider_config,
optional_params={**locals(), **kwargs},
)
)
response = await base_llm_http_handler.async_response_api_handler(
model=model,
input=input,
responses_api_provider_config=responses_api_provider_config,
responses_api_request_params=responses_api_request_params,
)
return response
pass

View file

@ -0,0 +1,46 @@
from typing import Any, Dict
import litellm
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.types.llms.openai import ResponsesAPIRequestParams
def get_optional_params_responses_api(
model: str,
responses_api_provider_config: BaseResponsesAPIConfig,
optional_params: Dict[str, Any],
) -> ResponsesAPIRequestParams:
"""
Get optional parameters for the responses API.
Args:
params: Dictionary of all parameters
model: The model name
responses_api_provider_config: The provider configuration for responses API
Returns:
A dictionary of supported parameters for the responses API
"""
# Remove None values and internal parameters
filtered_params = {k: v for k, v in optional_params.items() if v is not None}
# Get supported parameters for the model
supported_params = responses_api_provider_config.get_supported_openai_params(model)
# Check for unsupported parameters
unsupported_params = [
param for param in filtered_params if param not in supported_params
]
if unsupported_params:
raise litellm.UnsupportedParamsError(
model=model,
message=f"The following parameters are not supported for model {model}: {', '.join(unsupported_params)}",
)
# Map parameters to provider-specific format
mapped_params = responses_api_provider_config.map_openai_params(
optional_params=filtered_params, model=model, drop_params=litellm.drop_params
)
return mapped_params