working basic openai response api request

This commit is contained in:
Ishaan Jaff 2025-03-11 17:37:19 -07:00
parent c063c4b090
commit 5dac3a5d3b
5 changed files with 66 additions and 51 deletions

View file

@ -1,6 +1,6 @@
import types
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Optional, Union
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
import httpx
@ -53,10 +53,10 @@ class BaseResponsesAPIConfig(ABC):
@abstractmethod
def map_openai_params(
self,
optional_params: dict,
response_api_optional_params: ResponsesAPIOptionalRequestParams,
model: str,
drop_params: bool,
) -> ResponsesAPIOptionalRequestParams:
) -> Dict:
pass
@ -92,7 +92,7 @@ class BaseResponsesAPIConfig(ABC):
self,
model: str,
input: Union[str, ResponseInputParam],
response_api_optional_request_params: ResponsesAPIOptionalRequestParams,
response_api_optional_request_params: Dict,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> ResponsesAPIRequestParams:

View file

@ -1,6 +1,6 @@
import io
import json
from typing import TYPE_CHECKING, Any, Optional, Tuple, Union
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import httpx # type: ignore
@ -966,10 +966,13 @@ class BaseLLMHTTPHandler:
custom_llm_provider: str,
input: Union[str, ResponseInputParam],
responses_api_provider_config: BaseResponsesAPIConfig,
response_api_optional_request_params: ResponsesAPIOptionalRequestParams,
response_api_optional_request_params: Dict,
logging_obj: LiteLLMLoggingObj,
litellm_params: GenericLiteLLMParams,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
extra_headers: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
) -> ResponsesAPIResponse:
if client is None or not isinstance(client, AsyncHTTPHandler):
async_httpx_client = get_async_httpx_client(
@ -1020,11 +1023,9 @@ class BaseLLMHTTPHandler:
provider_config=responses_api_provider_config,
)
base_response_api_response = ResponsesAPIResponse()
return responses_api_provider_config.transform_response_api_response(
model=model,
raw_response=response,
model_response=base_response_api_response,
logging_obj=logging_obj,
)

View file

@ -1,4 +1,4 @@
from typing import TYPE_CHECKING, Any, Optional, Union
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
import httpx
@ -55,39 +55,17 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
def map_openai_params(
self,
optional_params: dict,
response_api_optional_params: ResponsesAPIOptionalRequestParams,
model: str,
drop_params: bool,
) -> ResponsesAPIOptionalRequestParams:
return ResponsesAPIOptionalRequestParams(
include=optional_params.get("include"),
instructions=optional_params.get("instructions"),
max_output_tokens=optional_params.get("max_output_tokens"),
metadata=optional_params.get("metadata"),
parallel_tool_calls=optional_params.get("parallel_tool_calls"),
previous_response_id=optional_params.get("previous_response_id"),
reasoning=optional_params.get("reasoning"),
store=optional_params.get("store"),
stream=optional_params.get("stream"),
temperature=optional_params.get("temperature"),
text=optional_params.get("text"),
tool_choice=optional_params.get("tool_choice"),
tools=optional_params.get("tools"),
top_p=optional_params.get("top_p"),
truncation=optional_params.get("truncation"),
user=optional_params.get("user"),
extra_headers=optional_params.get("extra_headers"),
extra_query=optional_params.get("extra_query"),
extra_body=optional_params.get("extra_body"),
timeout=optional_params.get("timeout"),
)
) -> Dict:
return dict(response_api_optional_params)
def transform_responses_api_request(
self,
model: str,
input: Union[str, ResponseInputParam],
response_api_optional_request_params: ResponsesAPIOptionalRequestParams,
response_api_optional_request_params: Dict,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> ResponsesAPIRequestParams:

View file

@ -1,8 +1,9 @@
from typing import Any, Dict, Iterable, List, Literal, Optional, Union
from typing import Any, Dict, Iterable, List, Literal, Optional, Union, get_type_hints
import httpx
import litellm
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.llms.custom_httpx.llm_http_handler import BaseLLMHTTPHandler
from litellm.responses.utils import (
@ -13,12 +14,13 @@ from litellm.types.llms.openai import (
Reasoning,
ResponseIncludable,
ResponseInputParam,
ResponsesAPIOptionalRequestParams,
ResponseTextConfigParam,
ToolChoice,
ToolParam,
)
from litellm.types.router import GenericLiteLLMParams
from litellm.utils import ProviderConfigManager
from litellm.utils import ProviderConfigManager, client
####### ENVIRONMENT VARIABLES ###################
# Initialize any necessary instances or variables here
@ -26,6 +28,24 @@ base_llm_http_handler = BaseLLMHTTPHandler()
#################################################
def get_requested_response_api_optional_param(
params: Dict[str, Any]
) -> ResponsesAPIOptionalRequestParams:
"""
Filter parameters to only include those defined in ResponsesAPIOptionalRequestParams.
Args:
params: Dictionary of parameters to filter
Returns:
ResponsesAPIOptionalRequestParams instance with only the valid parameters
"""
valid_keys = get_type_hints(ResponsesAPIOptionalRequestParams).keys()
filtered_params = {k: v for k, v in params.items() if k in valid_keys}
return ResponsesAPIOptionalRequestParams(**filtered_params)
@client
async def aresponses(
input: Union[str, ResponseInputParam],
model: str,
@ -53,6 +73,8 @@ async def aresponses(
timeout: Optional[Union[float, httpx.Timeout]] = None,
**kwargs,
):
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
litellm_call_id: Optional[str] = kwargs.get("litellm_call_id", None)
# get llm provider logic
litellm_params = GenericLiteLLMParams(**kwargs)
@ -81,22 +103,31 @@ async def aresponses(
)
# Get all parameters using locals() and combine with kwargs
all_params = {**locals(), **kwargs}
local_vars = locals()
local_vars.update(kwargs)
# Get ResponsesAPIOptionalRequestParams with only valid parameters
response_api_optional_params: ResponsesAPIOptionalRequestParams = (
get_requested_response_api_optional_param(local_vars)
)
# Get optional parameters for the responses API
responses_api_request_params: ResponsesAPIRequestParams = (
get_optional_params_responses_api(
responses_api_request_params: Dict = get_optional_params_responses_api(
model=model,
responses_api_provider_config=responses_api_provider_config,
optional_params={**locals(), **kwargs},
)
response_api_optional_params=response_api_optional_params,
)
response = await base_llm_http_handler.async_response_api_handler(
model=model,
input=input,
responses_api_provider_config=responses_api_provider_config,
responses_api_request_params=responses_api_request_params,
response_api_optional_request_params=responses_api_request_params,
custom_llm_provider=custom_llm_provider,
litellm_params=litellm_params,
logging_obj=litellm_logging_obj,
extra_headers=extra_headers,
extra_body=extra_body,
timeout=timeout,
)
return response

View file

@ -1,15 +1,19 @@
import json
from typing import Any, Dict
import litellm
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.types.llms.openai import ResponsesAPIRequestParams
from litellm.types.llms.openai import (
ResponsesAPIOptionalRequestParams,
ResponsesAPIRequestParams,
)
def get_optional_params_responses_api(
model: str,
responses_api_provider_config: BaseResponsesAPIConfig,
optional_params: Dict[str, Any],
) -> ResponsesAPIRequestParams:
response_api_optional_params: ResponsesAPIOptionalRequestParams,
) -> Dict:
"""
Get optional parameters for the responses API.
@ -22,14 +26,13 @@ def get_optional_params_responses_api(
A dictionary of supported parameters for the responses API
"""
# Remove None values and internal parameters
filtered_params = {k: v for k, v in optional_params.items() if v is not None}
# Get supported parameters for the model
supported_params = responses_api_provider_config.get_supported_openai_params(model)
# Check for unsupported parameters
unsupported_params = [
param for param in filtered_params if param not in supported_params
param for param in response_api_optional_params if param not in supported_params
]
if unsupported_params:
@ -40,7 +43,9 @@ def get_optional_params_responses_api(
# Map parameters to provider-specific format
mapped_params = responses_api_provider_config.map_openai_params(
optional_params=filtered_params, model=model, drop_params=litellm.drop_params
response_api_optional_params=response_api_optional_params,
model=model,
drop_params=litellm.drop_params,
)
return mapped_params