mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
141 lines
3.9 KiB
Python
141 lines
3.9 KiB
Python
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
|
|
|
|
import httpx
|
|
|
|
import litellm
|
|
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
|
|
from litellm.secret_managers.main import get_secret_str
|
|
from litellm.types.llms.openai import (
|
|
ResponseInputParam,
|
|
ResponsesAPIOptionalRequestParams,
|
|
ResponsesAPIRequestParams,
|
|
ResponsesAPIResponse,
|
|
ResponsesAPIStreamingResponse,
|
|
)
|
|
from litellm.types.router import GenericLiteLLMParams
|
|
|
|
from ..common_utils import OpenAIError
|
|
|
|
if TYPE_CHECKING:
|
|
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
|
|
|
|
LiteLLMLoggingObj = _LiteLLMLoggingObj
|
|
else:
|
|
LiteLLMLoggingObj = Any
|
|
|
|
|
|
class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
|
|
def get_supported_openai_params(self, model: str) -> list:
|
|
"""
|
|
All OpenAI Responses API params are supported
|
|
"""
|
|
return [
|
|
"input",
|
|
"model",
|
|
"include",
|
|
"instructions",
|
|
"max_output_tokens",
|
|
"metadata",
|
|
"parallel_tool_calls",
|
|
"previous_response_id",
|
|
"reasoning",
|
|
"store",
|
|
"stream",
|
|
"temperature",
|
|
"text",
|
|
"tool_choice",
|
|
"tools",
|
|
"top_p",
|
|
"truncation",
|
|
"user",
|
|
"extra_headers",
|
|
"extra_query",
|
|
"extra_body",
|
|
"timeout",
|
|
]
|
|
|
|
def map_openai_params(
|
|
self,
|
|
response_api_optional_params: ResponsesAPIOptionalRequestParams,
|
|
model: str,
|
|
drop_params: bool,
|
|
) -> Dict:
|
|
return dict(response_api_optional_params)
|
|
|
|
def transform_responses_api_request(
|
|
self,
|
|
model: str,
|
|
input: Union[str, ResponseInputParam],
|
|
response_api_optional_request_params: Dict,
|
|
litellm_params: GenericLiteLLMParams,
|
|
headers: dict,
|
|
) -> ResponsesAPIRequestParams:
|
|
return ResponsesAPIRequestParams(
|
|
model=model, input=input, **response_api_optional_request_params
|
|
)
|
|
|
|
def transform_response_api_response(
|
|
self,
|
|
model: str,
|
|
raw_response: httpx.Response,
|
|
logging_obj: LiteLLMLoggingObj,
|
|
) -> ResponsesAPIResponse:
|
|
try:
|
|
raw_response_json = raw_response.json()
|
|
except Exception:
|
|
raise OpenAIError(
|
|
message=raw_response.text, status_code=raw_response.status_code
|
|
)
|
|
return ResponsesAPIResponse(**raw_response_json)
|
|
|
|
def validate_environment(
|
|
self,
|
|
headers: dict,
|
|
model: str,
|
|
api_key: Optional[str] = None,
|
|
) -> dict:
|
|
api_key = (
|
|
api_key
|
|
or litellm.api_key
|
|
or litellm.openai_key
|
|
or get_secret_str("OPENAI_API_KEY")
|
|
)
|
|
headers.update(
|
|
{
|
|
"Authorization": f"Bearer {api_key}",
|
|
}
|
|
)
|
|
return headers
|
|
|
|
def get_complete_url(
|
|
self,
|
|
api_base: Optional[str],
|
|
model: str,
|
|
stream: Optional[bool] = None,
|
|
) -> str:
|
|
"""
|
|
Get the endpoint for OpenAI responses API
|
|
"""
|
|
api_base = (
|
|
api_base
|
|
or litellm.api_base
|
|
or get_secret_str("OPENAI_API_BASE")
|
|
or "https://api.openai.com/v1"
|
|
)
|
|
|
|
# Remove trailing slashes
|
|
api_base = api_base.rstrip("/")
|
|
|
|
return f"{api_base}/responses"
|
|
|
|
def transform_streaming_response(
|
|
self,
|
|
model: str,
|
|
parsed_chunk: dict,
|
|
logging_obj: LiteLLMLoggingObj,
|
|
) -> ResponsesAPIStreamingResponse:
|
|
"""
|
|
Transform a parsed streaming response chunk into a ResponsesAPIStreamingResponse
|
|
"""
|
|
# Convert the dictionary to a properly typed ResponsesAPIStreamingResponse
|
|
return ResponsesAPIStreamingResponse(**parsed_chunk)
|