[Feat] Add support for GET Responses Endpoint - OpenAI, Azure OpenAI (#10235)

* Added get responses API (#10234)

* test_basic_openai_responses_get_endpoint

* transform_get_response_api_request

* test_basic_openai_responses_get_endpoint

---------

Co-authored-by: Prathamesh Saraf <pratamesh1867@gmail.com>
This commit is contained in:
Ishaan Jaff 2025-04-23 15:19:29 -07:00 committed by GitHub
parent 2e58e47b43
commit dc9b058dbd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 501 additions and 21 deletions

View file

@ -95,6 +95,35 @@ class AzureOpenAIResponsesAPIConfig(OpenAIResponsesAPIConfig):
#########################################################
########## DELETE RESPONSE API TRANSFORMATION ##############
#########################################################
def _construct_url_for_response_id_in_path(
self, api_base: str, response_id: str
) -> str:
"""
Constructs a URL for the API request with the response_id in the path.
"""
from urllib.parse import urlparse, urlunparse
# Parse the URL to separate its components
parsed_url = urlparse(api_base)
# Insert the response_id at the end of the path component
# Remove trailing slash if present to avoid double slashes
path = parsed_url.path.rstrip("/")
new_path = f"{path}/{response_id}"
# Reconstruct the URL with all original components but with the modified path
constructed_url = urlunparse(
(
parsed_url.scheme, # http, https
parsed_url.netloc, # domain name, port
new_path, # path with response_id added
parsed_url.params, # parameters
parsed_url.query, # query string
parsed_url.fragment, # fragment
)
)
return constructed_url
def transform_delete_response_api_request(
self,
response_id: str,
@ -111,28 +140,33 @@ class AzureOpenAIResponsesAPIConfig(OpenAIResponsesAPIConfig):
This function handles URLs with query parameters by inserting the response_id
at the correct location (before any query parameters).
"""
from urllib.parse import urlparse, urlunparse
# Parse the URL to separate its components
parsed_url = urlparse(api_base)
# Insert the response_id at the end of the path component
# Remove trailing slash if present to avoid double slashes
path = parsed_url.path.rstrip("/")
new_path = f"{path}/{response_id}"
# Reconstruct the URL with all original components but with the modified path
delete_url = urlunparse(
(
parsed_url.scheme, # http, https
parsed_url.netloc, # domain name, port
new_path, # path with response_id added
parsed_url.params, # parameters
parsed_url.query, # query string
parsed_url.fragment, # fragment
)
delete_url = self._construct_url_for_response_id_in_path(
api_base=api_base, response_id=response_id
)
data: Dict = {}
verbose_logger.debug(f"delete response url={delete_url}")
return delete_url, data
#########################################################
########## GET RESPONSE API TRANSFORMATION ###############
#########################################################
def transform_get_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the get response API request into a URL and data
OpenAI API expects the following request
- GET /v1/responses/{response_id}
"""
get_url = self._construct_url_for_response_id_in_path(
api_base=api_base, response_id=response_id
)
data: Dict = {}
verbose_logger.debug(f"get response url={get_url}")
return get_url, data

View file

@ -141,7 +141,32 @@ class BaseResponsesAPIConfig(ABC):
pass
#########################################################
########## END DELETE RESPONSE API TRANSFORMATION ##########
########## END DELETE RESPONSE API TRANSFORMATION #######
#########################################################
#########################################################
########## GET RESPONSE API TRANSFORMATION ###############
#########################################################
@abstractmethod
def transform_get_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
pass
@abstractmethod
def transform_get_response_api_response(
self,
raw_response: httpx.Response,
logging_obj: LiteLLMLoggingObj,
) -> ResponsesAPIResponse:
pass
#########################################################
########## END GET RESPONSE API TRANSFORMATION ##########
#########################################################
def get_error_class(

View file

@ -1426,6 +1426,162 @@ class BaseLLMHTTPHandler:
logging_obj=logging_obj,
)
def get_responses(
self,
response_id: str,
responses_api_provider_config: BaseResponsesAPIConfig,
litellm_params: GenericLiteLLMParams,
logging_obj: LiteLLMLoggingObj,
custom_llm_provider: Optional[str] = None,
extra_headers: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
_is_async: bool = False,
) -> Union[ResponsesAPIResponse, Coroutine[Any, Any, ResponsesAPIResponse]]:
"""
Get a response by ID
Uses GET /v1/responses/{response_id} endpoint in the responses API
"""
if _is_async:
return self.async_get_responses(
response_id=response_id,
responses_api_provider_config=responses_api_provider_config,
litellm_params=litellm_params,
logging_obj=logging_obj,
custom_llm_provider=custom_llm_provider,
extra_headers=extra_headers,
extra_body=extra_body,
timeout=timeout,
client=client,
)
if client is None or not isinstance(client, HTTPHandler):
sync_httpx_client = _get_httpx_client(
params={"ssl_verify": litellm_params.get("ssl_verify", None)}
)
else:
sync_httpx_client = client
headers = responses_api_provider_config.validate_environment(
api_key=litellm_params.api_key,
headers=extra_headers or {},
model="None",
)
if extra_headers:
headers.update(extra_headers)
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
litellm_params=dict(litellm_params),
)
url, data = responses_api_provider_config.transform_get_response_api_request(
response_id=response_id,
api_base=api_base,
litellm_params=litellm_params,
headers=headers,
)
## LOGGING
logging_obj.pre_call(
input="",
api_key="",
additional_args={
"complete_input_dict": data,
"api_base": api_base,
"headers": headers,
},
)
try:
response = sync_httpx_client.get(
url=url, headers=headers, params=data
)
except Exception as e:
raise self._handle_error(
e=e,
provider_config=responses_api_provider_config,
)
return responses_api_provider_config.transform_get_response_api_response(
raw_response=response,
logging_obj=logging_obj,
)
async def async_get_responses(
self,
response_id: str,
responses_api_provider_config: BaseResponsesAPIConfig,
litellm_params: GenericLiteLLMParams,
logging_obj: LiteLLMLoggingObj,
custom_llm_provider: Optional[str] = None,
extra_headers: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
) -> ResponsesAPIResponse:
"""
Async version of get_responses
"""
if client is None or not isinstance(client, AsyncHTTPHandler):
async_httpx_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders(custom_llm_provider),
params={"ssl_verify": litellm_params.get("ssl_verify", None)},
)
else:
async_httpx_client = client
headers = responses_api_provider_config.validate_environment(
api_key=litellm_params.api_key,
headers=extra_headers or {},
model="None",
)
if extra_headers:
headers.update(extra_headers)
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
litellm_params=dict(litellm_params),
)
url, data = responses_api_provider_config.transform_get_response_api_request(
response_id=response_id,
api_base=api_base,
litellm_params=litellm_params,
headers=headers,
)
## LOGGING
logging_obj.pre_call(
input="",
api_key="",
additional_args={
"complete_input_dict": data,
"api_base": api_base,
"headers": headers,
},
)
try:
response = await async_httpx_client.get(
url=url, headers=headers, params=data
)
except Exception as e:
verbose_logger.exception(f"Error retrieving response: {e}")
raise self._handle_error(
e=e,
provider_config=responses_api_provider_config,
)
return responses_api_provider_config.transform_get_response_api_response(
raw_response=response,
logging_obj=logging_obj,
)
def create_file(
self,
create_file_data: CreateFileRequest,

View file

@ -250,3 +250,39 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
message=raw_response.text, status_code=raw_response.status_code
)
return DeleteResponseResult(**raw_response_json)
#########################################################
########## GET RESPONSE API TRANSFORMATION ###############
#########################################################
def transform_get_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the get response API request into a URL and data
OpenAI API expects the following request
- GET /v1/responses/{response_id}
"""
url = f"{api_base}/{response_id}"
data: Dict = {}
return url, data
def transform_get_response_api_response(
self,
raw_response: httpx.Response,
logging_obj: LiteLLMLoggingObj,
) -> ResponsesAPIResponse:
"""
Transform the get response API response into a ResponsesAPIResponse
"""
try:
raw_response_json = raw_response.json()
except Exception:
raise OpenAIError(
message=raw_response.text, status_code=raw_response.status_code
)
return ResponsesAPIResponse(**raw_response_json)

View file

@ -434,3 +434,188 @@ def delete_responses(
completion_kwargs=local_vars,
extra_kwargs=kwargs,
)
@client
async def aget_responses(
response_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
extra_query: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
# LiteLLM specific params,
custom_llm_provider: Optional[str] = None,
**kwargs,
) -> ResponsesAPIResponse:
"""
Async: Fetch a response by its ID.
GET /v1/responses/{response_id} endpoint in the responses API
Args:
response_id: The ID of the response to fetch.
custom_llm_provider: Optional provider name. If not specified, will be decoded from response_id.
Returns:
The response object with complete information about the stored response.
"""
local_vars = locals()
try:
loop = asyncio.get_event_loop()
kwargs["aget_responses"] = True
# get custom llm provider from response_id
decoded_response_id: DecodedResponseId = (
ResponsesAPIRequestUtils._decode_responses_api_response_id(
response_id=response_id,
)
)
response_id = decoded_response_id.get("response_id") or response_id
custom_llm_provider = (
decoded_response_id.get("custom_llm_provider") or custom_llm_provider
)
func = partial(
get_responses,
response_id=response_id,
custom_llm_provider=custom_llm_provider,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
**kwargs,
)
ctx = contextvars.copy_context()
func_with_context = partial(ctx.run, func)
init_response = await loop.run_in_executor(None, func_with_context)
if asyncio.iscoroutine(init_response):
response = await init_response
else:
response = init_response
# Update the responses_api_response_id with the model_id
if isinstance(response, ResponsesAPIResponse):
response = ResponsesAPIRequestUtils._update_responses_api_response_id_with_model_id(
responses_api_response=response,
litellm_metadata=kwargs.get("litellm_metadata", {}),
custom_llm_provider=custom_llm_provider,
)
return response
except Exception as e:
raise litellm.exception_type(
model=None,
custom_llm_provider=custom_llm_provider,
original_exception=e,
completion_kwargs=local_vars,
extra_kwargs=kwargs,
)
@client
def get_responses(
response_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
extra_query: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
# LiteLLM specific params,
custom_llm_provider: Optional[str] = None,
**kwargs,
) -> Union[ResponsesAPIResponse, Coroutine[Any, Any, ResponsesAPIResponse]]:
"""
Fetch a response by its ID.
GET /v1/responses/{response_id} endpoint in the responses API
Args:
response_id: The ID of the response to fetch.
custom_llm_provider: Optional provider name. If not specified, will be decoded from response_id.
Returns:
The response object with complete information about the stored response.
"""
local_vars = locals()
try:
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
litellm_call_id: Optional[str] = kwargs.get("litellm_call_id", None)
_is_async = kwargs.pop("aget_responses", False) is True
# get llm provider logic
litellm_params = GenericLiteLLMParams(**kwargs)
# get custom llm provider from response_id
decoded_response_id: DecodedResponseId = (
ResponsesAPIRequestUtils._decode_responses_api_response_id(
response_id=response_id,
)
)
response_id = decoded_response_id.get("response_id") or response_id
custom_llm_provider = (
decoded_response_id.get("custom_llm_provider") or custom_llm_provider
)
if custom_llm_provider is None:
raise ValueError("custom_llm_provider is required but passed as None")
# get provider config
responses_api_provider_config: Optional[BaseResponsesAPIConfig] = (
ProviderConfigManager.get_provider_responses_api_config(
model=None,
provider=litellm.LlmProviders(custom_llm_provider),
)
)
if responses_api_provider_config is None:
raise ValueError(
f"GET responses is not supported for {custom_llm_provider}"
)
local_vars.update(kwargs)
# Pre Call logging
litellm_logging_obj.update_environment_variables(
model=None,
optional_params={
"response_id": response_id,
},
litellm_params={
"litellm_call_id": litellm_call_id,
},
custom_llm_provider=custom_llm_provider,
)
# Call the handler with _is_async flag instead of directly calling the async handler
response = base_llm_http_handler.get_responses(
response_id=response_id,
custom_llm_provider=custom_llm_provider,
responses_api_provider_config=responses_api_provider_config,
litellm_params=litellm_params,
logging_obj=litellm_logging_obj,
extra_headers=extra_headers,
extra_body=extra_body,
timeout=timeout or request_timeout,
_is_async=_is_async,
client=kwargs.get("client"),
)
# Update the responses_api_response_id with the model_id
if isinstance(response, ResponsesAPIResponse):
response = ResponsesAPIRequestUtils._update_responses_api_response_id_with_model_id(
responses_api_response=response,
litellm_metadata=kwargs.get("litellm_metadata", {}),
custom_llm_provider=custom_llm_provider,
)
return response
except Exception as e:
raise litellm.exception_type(
model=None,
custom_llm_provider=custom_llm_provider,
original_exception=e,
completion_kwargs=local_vars,
extra_kwargs=kwargs,
)

View file

@ -274,5 +274,44 @@ class BaseResponsesAPITest(ABC):
**base_completion_call_args
)
@pytest.mark.parametrize("sync_mode", [False, True])
@pytest.mark.asyncio
async def test_basic_openai_responses_get_endpoint(self, sync_mode):
litellm._turn_on_debug()
litellm.set_verbose = True
base_completion_call_args = self.get_base_completion_call_args()
if sync_mode:
response = litellm.responses(
input="Basic ping", max_output_tokens=20,
**base_completion_call_args
)
# get the response
if isinstance(response, ResponsesAPIResponse):
result = litellm.get_responses(
response_id=response.id,
**base_completion_call_args
)
assert result is not None
assert result.id == response.id
assert result.output == response.output
else:
raise ValueError("response is not a ResponsesAPIResponse")
else:
response = await litellm.aresponses(
input="Basic ping", max_output_tokens=20,
**base_completion_call_args
)
# async get the response
if isinstance(response, ResponsesAPIResponse):
result = await litellm.aget_responses(
response_id=response.id,
**base_completion_call_args
)
assert result is not None
assert result.id == response.id
assert result.output == response.output
else:
raise ValueError("response is not a ResponsesAPIResponse")

View file

@ -36,6 +36,11 @@ class TestAnthropicResponsesAPITest(BaseResponsesAPITest):
async def test_basic_openai_responses_streaming_delete_endpoint(self, sync_mode=False):
pass
async def test_basic_openai_responses_get_endpoint(self, sync_mode=False):
pass
def test_multiturn_tool_calls():
# Test streaming response with tools for Anthropic