feat: add responses input items api (#2239)

# What does this PR do?
TSIA

## Test Plan
added integration and unit tests
This commit is contained in:
ehhuang 2025-05-24 07:05:53 -07:00 committed by GitHub
parent 055f48b6a2
commit 15b0a67555
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 546 additions and 12 deletions

View file

@ -31,6 +31,7 @@ from llama_stack.apis.tools import ToolDef
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
from .openai_responses import (
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIResponseInput,
OpenAIResponseInputTool,
@ -630,3 +631,25 @@ class Agents(Protocol):
:returns: A ListOpenAIResponseObject.
"""
...
@webmethod(route="/openai/v1/responses/{response_id}/input_items", method="GET")
async def list_openai_response_input_items(
self,
response_id: str,
after: str | None = None,
before: str | None = None,
include: list[str] | None = None,
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
"""List input items for a given OpenAI response.
:param response_id: The ID of the response to retrieve input items for.
:param after: An item ID to list items after, used for pagination.
:param before: An item ID to list items before, used for pagination.
:param include: Additional fields to include in the response.
:param limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
:param order: The order to return the input items in. Default is desc.
:returns: An ListOpenAIResponseInputItem.
"""
...

View file

@ -216,7 +216,7 @@ OpenAIResponseInputTool = Annotated[
register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
class OpenAIResponseInputItemList(BaseModel):
class ListOpenAIResponseInputItem(BaseModel):
data: list[OpenAIResponseInput]
object: Literal["list"] = "list"

View file

@ -20,6 +20,7 @@ from llama_stack.apis.agents import (
AgentTurnCreateRequest,
AgentTurnResumeRequest,
Document,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIResponseInput,
OpenAIResponseInputTool,
@ -337,3 +338,16 @@ class MetaReferenceAgentsImpl(Agents):
order: Order | None = Order.desc,
) -> ListOpenAIResponseObject:
return await self.openai_responses_impl.list_openai_responses(after, limit, model, order)
async def list_openai_response_input_items(
self,
response_id: str,
after: str | None = None,
before: str | None = None,
include: list[str] | None = None,
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
return await self.openai_responses_impl.list_openai_response_input_items(
response_id, after, before, include, limit, order
)

View file

@ -14,10 +14,10 @@ from pydantic import BaseModel
from llama_stack.apis.agents import Order
from llama_stack.apis.agents.openai_responses import (
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputItemList,
OpenAIResponseInputMessageContent,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
@ -164,7 +164,7 @@ async def _get_message_type_by_role(role: str):
class OpenAIResponsePreviousResponseWithInputItems(BaseModel):
input_items: OpenAIResponseInputItemList
input_items: ListOpenAIResponseInputItem
response: OpenAIResponseObject
@ -223,6 +223,27 @@ class OpenAIResponsesImpl:
) -> ListOpenAIResponseObject:
return await self.responses_store.list_responses(after, limit, model, order)
async def list_openai_response_input_items(
self,
response_id: str,
after: str | None = None,
before: str | None = None,
include: list[str] | None = None,
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
"""List input items for a given OpenAI response.
:param response_id: The ID of the response to retrieve input items for.
:param after: An item ID to list items after, used for pagination.
:param before: An item ID to list items before, used for pagination.
:param include: Additional fields to include in the response.
:param limit: A limit on the number of objects to be returned.
:param order: The order to return the input items in.
:returns: An ListOpenAIResponseInputItem.
"""
return await self.responses_store.list_response_input_items(response_id, after, before, include, limit, order)
async def create_openai_response(
self,
input: str | list[OpenAIResponseInput],

View file

@ -7,6 +7,7 @@ from llama_stack.apis.agents import (
Order,
)
from llama_stack.apis.agents.openai_responses import (
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIResponseInput,
OpenAIResponseObject,
@ -96,3 +97,39 @@ class ResponsesStore:
if not row:
raise ValueError(f"Response with id {response_id} not found") from None
return OpenAIResponseObjectWithInput(**row["response_object"])
async def list_response_input_items(
self,
response_id: str,
after: str | None = None,
before: str | None = None,
include: list[str] | None = None,
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
"""
List input items for a given response.
:param response_id: The ID of the response to retrieve input items for.
:param after: An item ID to list items after, used for pagination.
:param before: An item ID to list items before, used for pagination.
:param include: Additional fields to include in the response.
:param limit: A limit on the number of objects to be returned.
:param order: The order to return the input items in.
"""
# TODO: support after/before pagination
if after or before:
raise NotImplementedError("After/before pagination is not supported yet")
if include:
raise NotImplementedError("Include is not supported yet")
response_with_input = await self.get_response_object(response_id)
input_items = response_with_input.input
if order == Order.desc:
input_items = list(reversed(input_items))
if limit is not None and len(input_items) > limit:
input_items = input_items[:limit]
return ListOpenAIResponseInputItem(data=input_items)