add transform_request for OpenAI responses API

This commit is contained in:
Ishaan Jaff 2025-03-11 16:33:26 -07:00
parent 5da3bcc9c5
commit 03765d334c
3 changed files with 48 additions and 22 deletions

View file

@ -1,8 +1,13 @@
import types import types
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Optional from typing import TYPE_CHECKING, Any, Optional, Union
from litellm.types.llms.openai import ResponsesAPIRequestParams from litellm.types.llms.openai import (
ResponseInputParam,
ResponsesAPIOptionalRequestParams,
ResponsesAPIRequestParams,
)
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import ModelInfo from litellm.types.utils import ModelInfo
from ..chat.transformation import BaseLLMException from ..chat.transformation import BaseLLMException
@ -48,7 +53,7 @@ class BaseResponsesAPIConfig(ABC):
optional_params: dict, optional_params: dict,
model: str, model: str,
drop_params: bool, drop_params: bool,
) -> ResponsesAPIRequestParams: ) -> ResponsesAPIOptionalRequestParams:
pass pass
@ -66,7 +71,6 @@ class BaseResponsesAPIConfig(ABC):
self, self,
api_base: Optional[str], api_base: Optional[str],
model: str, model: str,
optional_params: dict,
stream: Optional[bool] = None, stream: Optional[bool] = None,
) -> str: ) -> str:
""" """
@ -80,15 +84,16 @@ class BaseResponsesAPIConfig(ABC):
raise ValueError("api_base is required") raise ValueError("api_base is required")
return api_base return api_base
# @abstractmethod @abstractmethod
# def transform_request( def transform_request(
# self, self,
# model: str, model: str,
# optional_params: dict, input: Union[str, ResponseInputParam],
# litellm_params: dict, response_api_optional_request_params: ResponsesAPIOptionalRequestParams,
# headers: dict, litellm_params: GenericLiteLLMParams,
# ) -> dict: headers: dict,
# pass ) -> ResponsesAPIRequestParams:
pass
# @abstractmethod # @abstractmethod
# def transform_response( # def transform_response(

View file

@ -1,9 +1,14 @@
from typing import Optional from typing import Optional, Union
import litellm import litellm
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.secret_managers.main import get_secret_str from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import ResponsesAPIRequestParams from litellm.types.llms.openai import (
ResponseInputParam,
ResponsesAPIOptionalRequestParams,
ResponsesAPIRequestParams,
)
from litellm.types.router import GenericLiteLLMParams
class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig): class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
@ -41,9 +46,9 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
optional_params: dict, optional_params: dict,
model: str, model: str,
drop_params: bool, drop_params: bool,
) -> ResponsesAPIRequestParams: ) -> ResponsesAPIOptionalRequestParams:
return ResponsesAPIRequestParams( return ResponsesAPIOptionalRequestParams(
include=optional_params.get("include"), include=optional_params.get("include"),
instructions=optional_params.get("instructions"), instructions=optional_params.get("instructions"),
max_output_tokens=optional_params.get("max_output_tokens"), max_output_tokens=optional_params.get("max_output_tokens"),
@ -66,6 +71,18 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
timeout=optional_params.get("timeout"), timeout=optional_params.get("timeout"),
) )
def transform_request(
self,
model: str,
input: Union[str, ResponseInputParam],
response_api_optional_request_params: ResponsesAPIOptionalRequestParams,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> ResponsesAPIRequestParams:
return ResponsesAPIRequestParams(
model=model, input=input, **response_api_optional_request_params
)
def validate_environment( def validate_environment(
self, self,
headers: dict, headers: dict,
@ -89,7 +106,6 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
self, self,
api_base: Optional[str], api_base: Optional[str],
model: str, model: str,
optional_params: dict,
stream: Optional[bool] = None, stream: Optional[bool] = None,
) -> str: ) -> str:
""" """

View file

@ -695,11 +695,9 @@ OpenAIAudioTranscriptionOptionalParams = Literal[
OpenAIImageVariationOptionalParams = Literal["n", "size", "response_format", "user"] OpenAIImageVariationOptionalParams = Literal["n", "size", "response_format", "user"]
class ResponsesAPIRequestParams(TypedDict, total=False): class ResponsesAPIOptionalRequestParams(TypedDict, total=False):
"""TypedDict for parameters supported by the responses API.""" """TypedDict for Optional parameters supported by the responses API."""
input: Union[str, ResponseInputParam]
model: str
include: Optional[List[ResponseIncludable]] include: Optional[List[ResponseIncludable]]
instructions: Optional[str] instructions: Optional[str]
max_output_tokens: Optional[int] max_output_tokens: Optional[int]
@ -720,3 +718,10 @@ class ResponsesAPIRequestParams(TypedDict, total=False):
extra_query: Optional[Dict[str, Any]] extra_query: Optional[Dict[str, Any]]
extra_body: Optional[Dict[str, Any]] extra_body: Optional[Dict[str, Any]]
timeout: Optional[Union[float, httpx.Timeout]] timeout: Optional[Union[float, httpx.Timeout]]
class ResponsesAPIRequestParams(ResponsesAPIOptionalRequestParams, total=False):
"""TypedDict for request parameters supported by the responses API."""
input: Union[str, ResponseInputParam]
model: str