mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
b
This commit is contained in:
parent
f822495ab0
commit
c81f36198c
6 changed files with 984 additions and 201 deletions
|
@ -210,7 +210,7 @@ Before finalizing documentation, verify:
|
|||
4. Move to next file
|
||||
|
||||
**COMPLETION CRITERIA:**
|
||||
- All 22 files have been processed
|
||||
- All 29 files have been processed
|
||||
- All @json_schema_type classes have comprehensive docstrings with complete `:param` field documentation
|
||||
- All @webmethod functions have comprehensive docstrings with parameter and return documentation
|
||||
- All other BaseModel classes have comprehensive docstrings with `:param` field documentation
|
||||
|
@ -247,6 +247,7 @@ Before finalizing documentation, verify:
|
|||
[x] 26. `/Users/saip/Documents/GitHub/llama-stack-sai/llama_stack/apis/common/training_types.py` - Training metrics and checkpoint types
|
||||
[x] 27. `/Users/saip/Documents/GitHub/llama-stack-sai/llama_stack/apis/common/responses.py` - Generic response types (well documented)
|
||||
[x] 28. `/Users/saip/Documents/GitHub/llama-stack-sai/llama_stack/apis/common/type_system.py` - Parameter type system definitions
|
||||
[x] 29. `/Users/saip/Documents/GitHub/llama-stack-sai/llama_stack/apis/agents/openai_responses.py` - OpenAI Response API compatibility types
|
||||
|
||||
**NOTE:** `/Users/saip/Documents/GitHub/llama-stack-sai/llama_stack/apis/common/errors.py` contains only Python exception classes (no @json_schema_type), so it's not included in the documentation update scope.
|
||||
|
||||
|
@ -630,7 +631,7 @@ After processing all 22 files:
|
|||
4. Check that the documentation is comprehensive and consistent
|
||||
|
||||
## Success Indicators
|
||||
- [ ] All 22 API files processed
|
||||
- [ ] All 29 API files processed
|
||||
- [ ] All @json_schema_type classes have comprehensive docstrings with `:param` field documentation
|
||||
- [ ] All @webmethod functions have comprehensive docstrings with parameter and return documentation
|
||||
- [ ] OpenAPI spec contains comprehensive field descriptions
|
||||
|
|
579
docs/_static/llama-stack-spec.html
vendored
579
docs/_static/llama-stack-spec.html
vendored
File diff suppressed because it is too large
Load diff
350
docs/_static/llama-stack-spec.yaml
vendored
350
docs/_static/llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
|
@ -18,18 +18,34 @@ from llama_stack.schema_utils import json_schema_type, register_schema
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseError(BaseModel):
|
||||
"""Error details for failed OpenAI response requests.
|
||||
|
||||
:param code: Error code identifying the type of failure
|
||||
:param message: Human-readable error message describing the failure
|
||||
"""
|
||||
code: str
|
||||
message: str
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputMessageContentText(BaseModel):
|
||||
"""Text content for input messages in OpenAI response format.
|
||||
|
||||
:param text: The text content of the input message
|
||||
:param type: Content type identifier, always "input_text"
|
||||
"""
|
||||
text: str
|
||||
type: Literal["input_text"] = "input_text"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputMessageContentImage(BaseModel):
|
||||
"""Image content for input messages in OpenAI response format.
|
||||
|
||||
:param detail: Level of detail for image processing, can be "low", "high", or "auto"
|
||||
:param type: Content type identifier, always "input_image"
|
||||
:param image_url: (Optional) URL of the image content
|
||||
"""
|
||||
detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
|
||||
type: Literal["input_image"] = "input_image"
|
||||
# TODO: handle file_id
|
||||
|
@ -46,6 +62,13 @@ register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMess
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseAnnotationFileCitation(BaseModel):
|
||||
"""File citation annotation for referencing specific files in response content.
|
||||
|
||||
:param type: Annotation type identifier, always "file_citation"
|
||||
:param file_id: Unique identifier of the referenced file
|
||||
:param filename: Name of the referenced file
|
||||
:param index: Position index of the citation within the content
|
||||
"""
|
||||
type: Literal["file_citation"] = "file_citation"
|
||||
file_id: str
|
||||
filename: str
|
||||
|
@ -54,6 +77,14 @@ class OpenAIResponseAnnotationFileCitation(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseAnnotationCitation(BaseModel):
|
||||
"""URL citation annotation for referencing external web resources.
|
||||
|
||||
:param type: Annotation type identifier, always "url_citation"
|
||||
:param end_index: End position of the citation span in the content
|
||||
:param start_index: Start position of the citation span in the content
|
||||
:param title: Title of the referenced web resource
|
||||
:param url: URL of the referenced web resource
|
||||
"""
|
||||
type: Literal["url_citation"] = "url_citation"
|
||||
end_index: int
|
||||
start_index: int
|
||||
|
@ -122,6 +153,12 @@ class OpenAIResponseMessage(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
|
||||
"""Web search tool call output message for OpenAI responses.
|
||||
|
||||
:param id: Unique identifier for this tool call
|
||||
:param status: Current status of the web search operation
|
||||
:param type: Tool call type identifier, always "web_search_call"
|
||||
"""
|
||||
id: str
|
||||
status: str
|
||||
type: Literal["web_search_call"] = "web_search_call"
|
||||
|
@ -129,6 +166,14 @@ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
|
||||
"""File search tool call output message for OpenAI responses.
|
||||
|
||||
:param id: Unique identifier for this tool call
|
||||
:param queries: List of search queries executed
|
||||
:param status: Current status of the file search operation
|
||||
:param type: Tool call type identifier, always "file_search_call"
|
||||
:param results: (Optional) Search results returned by the file search operation
|
||||
"""
|
||||
id: str
|
||||
queries: list[str]
|
||||
status: str
|
||||
|
@ -138,6 +183,15 @@ class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
|
||||
"""Function tool call output message for OpenAI responses.
|
||||
|
||||
:param call_id: Unique identifier for the function call
|
||||
:param name: Name of the function being called
|
||||
:param arguments: JSON string containing the function arguments
|
||||
:param type: Tool call type identifier, always "function_call"
|
||||
:param id: (Optional) Additional identifier for the tool call
|
||||
:param status: (Optional) Current status of the function call execution
|
||||
"""
|
||||
call_id: str
|
||||
name: str
|
||||
arguments: str
|
||||
|
@ -148,6 +202,16 @@ class OpenAIResponseOutputMessageFunctionToolCall(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageMCPCall(BaseModel):
|
||||
"""Model Context Protocol (MCP) call output message for OpenAI responses.
|
||||
|
||||
:param id: Unique identifier for this MCP call
|
||||
:param type: Tool call type identifier, always "mcp_call"
|
||||
:param arguments: JSON string containing the MCP call arguments
|
||||
:param name: Name of the MCP method being called
|
||||
:param server_label: Label identifying the MCP server handling the call
|
||||
:param error: (Optional) Error message if the MCP call failed
|
||||
:param output: (Optional) Output result from the successful MCP call
|
||||
"""
|
||||
id: str
|
||||
type: Literal["mcp_call"] = "mcp_call"
|
||||
arguments: str
|
||||
|
@ -158,6 +222,12 @@ class OpenAIResponseOutputMessageMCPCall(BaseModel):
|
|||
|
||||
|
||||
class MCPListToolsTool(BaseModel):
|
||||
"""Tool definition returned by MCP list tools operation.
|
||||
|
||||
:param input_schema: JSON schema defining the tool's input parameters
|
||||
:param name: Name of the tool
|
||||
:param description: (Optional) Description of what the tool does
|
||||
"""
|
||||
input_schema: dict[str, Any]
|
||||
name: str
|
||||
description: str | None = None
|
||||
|
@ -165,6 +235,13 @@ class MCPListToolsTool(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseOutputMessageMCPListTools(BaseModel):
|
||||
"""MCP list tools output message containing available tools from an MCP server.
|
||||
|
||||
:param id: Unique identifier for this MCP list tools operation
|
||||
:param type: Tool call type identifier, always "mcp_list_tools"
|
||||
:param server_label: Label identifying the MCP server providing the tools
|
||||
:param tools: List of available tools provided by the MCP server
|
||||
"""
|
||||
id: str
|
||||
type: Literal["mcp_list_tools"] = "mcp_list_tools"
|
||||
server_label: str
|
||||
|
@ -206,11 +283,32 @@ class OpenAIResponseTextFormat(TypedDict, total=False):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseText(BaseModel):
|
||||
"""Text response configuration for OpenAI responses.
|
||||
|
||||
:param format: (Optional) Text format configuration specifying output format requirements
|
||||
"""
|
||||
format: OpenAIResponseTextFormat | None = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObject(BaseModel):
|
||||
"""Complete OpenAI response object containing generation results and metadata.
|
||||
|
||||
:param created_at: Unix timestamp when the response was created
|
||||
:param error: (Optional) Error details if the response generation failed
|
||||
:param id: Unique identifier for this response
|
||||
:param model: Model identifier used for generation
|
||||
:param object: Object type identifier, always "response"
|
||||
:param output: List of generated output items (messages, tool calls, etc.)
|
||||
:param parallel_tool_calls: Whether tool calls can be executed in parallel
|
||||
:param previous_response_id: (Optional) ID of the previous response in a conversation
|
||||
:param status: Current status of the response generation
|
||||
:param temperature: (Optional) Sampling temperature used for generation
|
||||
:param text: Text formatting configuration for the response
|
||||
:param top_p: (Optional) Nucleus sampling parameter used for generation
|
||||
:param truncation: (Optional) Truncation strategy applied to the response
|
||||
:param user: (Optional) User identifier associated with the request
|
||||
"""
|
||||
created_at: int
|
||||
error: OpenAIResponseError | None = None
|
||||
id: str
|
||||
|
@ -231,6 +329,12 @@ class OpenAIResponseObject(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIDeleteResponseObject(BaseModel):
|
||||
"""Response object confirming deletion of an OpenAI response.
|
||||
|
||||
:param id: Unique identifier of the deleted response
|
||||
:param object: Object type identifier, always "response"
|
||||
:param deleted: Deletion confirmation flag, always True
|
||||
"""
|
||||
id: str
|
||||
object: Literal["response"] = "response"
|
||||
deleted: bool = True
|
||||
|
@ -238,18 +342,36 @@ class OpenAIDeleteResponseObject(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseCreated(BaseModel):
|
||||
"""Streaming event indicating a new response has been created.
|
||||
|
||||
:param response: The newly created response object
|
||||
:param type: Event type identifier, always "response.created"
|
||||
"""
|
||||
response: OpenAIResponseObject
|
||||
type: Literal["response.created"] = "response.created"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseCompleted(BaseModel):
|
||||
"""Streaming event indicating a response has been completed.
|
||||
|
||||
:param response: The completed response object
|
||||
:param type: Event type identifier, always "response.completed"
|
||||
"""
|
||||
response: OpenAIResponseObject
|
||||
type: Literal["response.completed"] = "response.completed"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel):
|
||||
"""Streaming event for when a new output item is added to the response.
|
||||
|
||||
:param response_id: Unique identifier of the response containing this output
|
||||
:param item: The output item that was added (message, tool call, etc.)
|
||||
:param output_index: Index position of this item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.output_item.added"
|
||||
"""
|
||||
response_id: str
|
||||
item: OpenAIResponseOutput
|
||||
output_index: int
|
||||
|
@ -259,6 +381,14 @@ class OpenAIResponseObjectStreamResponseOutputItemAdded(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel):
|
||||
"""Streaming event for when an output item is completed.
|
||||
|
||||
:param response_id: Unique identifier of the response containing this output
|
||||
:param item: The completed output item (message, tool call, etc.)
|
||||
:param output_index: Index position of this item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.output_item.done"
|
||||
"""
|
||||
response_id: str
|
||||
item: OpenAIResponseOutput
|
||||
output_index: int
|
||||
|
@ -268,6 +398,15 @@ class OpenAIResponseObjectStreamResponseOutputItemDone(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel):
|
||||
"""Streaming event for incremental text content updates.
|
||||
|
||||
:param content_index: Index position within the text content
|
||||
:param delta: Incremental text content being added
|
||||
:param item_id: Unique identifier of the output item being updated
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.output_text.delta"
|
||||
"""
|
||||
content_index: int
|
||||
delta: str
|
||||
item_id: str
|
||||
|
@ -278,6 +417,15 @@ class OpenAIResponseObjectStreamResponseOutputTextDelta(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel):
|
||||
"""Streaming event for when text output is completed.
|
||||
|
||||
:param content_index: Index position within the text content
|
||||
:param text: Final complete text content of the output item
|
||||
:param item_id: Unique identifier of the completed output item
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.output_text.done"
|
||||
"""
|
||||
content_index: int
|
||||
text: str # final text of the output item
|
||||
item_id: str
|
||||
|
@ -288,6 +436,14 @@ class OpenAIResponseObjectStreamResponseOutputTextDone(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel):
|
||||
"""Streaming event for incremental function call argument updates.
|
||||
|
||||
:param delta: Incremental function call arguments being added
|
||||
:param item_id: Unique identifier of the function call being updated
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.function_call_arguments.delta"
|
||||
"""
|
||||
delta: str
|
||||
item_id: str
|
||||
output_index: int
|
||||
|
@ -297,6 +453,14 @@ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel):
|
||||
"""Streaming event for when function call arguments are completed.
|
||||
|
||||
:param arguments: Final complete arguments JSON string for the function call
|
||||
:param item_id: Unique identifier of the completed function call
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.function_call_arguments.done"
|
||||
"""
|
||||
arguments: str # final arguments of the function call
|
||||
item_id: str
|
||||
output_index: int
|
||||
|
@ -306,6 +470,13 @@ class OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseWebSearchCallInProgress(BaseModel):
|
||||
"""Streaming event for web search calls in progress.
|
||||
|
||||
:param item_id: Unique identifier of the web search call
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.web_search_call.in_progress"
|
||||
"""
|
||||
item_id: str
|
||||
output_index: int
|
||||
sequence_number: int
|
||||
|
@ -322,6 +493,13 @@ class OpenAIResponseObjectStreamResponseWebSearchCallSearching(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseWebSearchCallCompleted(BaseModel):
|
||||
"""Streaming event for completed web search calls.
|
||||
|
||||
:param item_id: Unique identifier of the completed web search call
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.web_search_call.completed"
|
||||
"""
|
||||
item_id: str
|
||||
output_index: int
|
||||
sequence_number: int
|
||||
|
@ -366,6 +544,13 @@ class OpenAIResponseObjectStreamResponseMcpCallArgumentsDone(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel):
|
||||
"""Streaming event for MCP calls in progress.
|
||||
|
||||
:param item_id: Unique identifier of the MCP call
|
||||
:param output_index: Index position of the item in the output list
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.mcp_call.in_progress"
|
||||
"""
|
||||
item_id: str
|
||||
output_index: int
|
||||
sequence_number: int
|
||||
|
@ -374,12 +559,22 @@ class OpenAIResponseObjectStreamResponseMcpCallInProgress(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseMcpCallFailed(BaseModel):
|
||||
"""Streaming event for failed MCP calls.
|
||||
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.mcp_call.failed"
|
||||
"""
|
||||
sequence_number: int
|
||||
type: Literal["response.mcp_call.failed"] = "response.mcp_call.failed"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectStreamResponseMcpCallCompleted(BaseModel):
|
||||
"""Streaming event for completed MCP calls.
|
||||
|
||||
:param sequence_number: Sequential number for ordering streaming events
|
||||
:param type: Event type identifier, always "response.mcp_call.completed"
|
||||
"""
|
||||
sequence_number: int
|
||||
type: Literal["response.mcp_call.completed"] = "response.mcp_call.completed"
|
||||
|
||||
|
@ -442,6 +637,11 @@ WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_20
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputToolWebSearch(BaseModel):
|
||||
"""Web search tool configuration for OpenAI response inputs.
|
||||
|
||||
:param type: Web search tool type variant to use
|
||||
:param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high"
|
||||
"""
|
||||
# Must match values of WebSearchToolTypes above
|
||||
type: Literal["web_search"] | Literal["web_search_preview"] | Literal["web_search_preview_2025_03_11"] = (
|
||||
"web_search"
|
||||
|
@ -453,6 +653,14 @@ class OpenAIResponseInputToolWebSearch(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputToolFunction(BaseModel):
|
||||
"""Function tool configuration for OpenAI response inputs.
|
||||
|
||||
:param type: Tool type identifier, always "function"
|
||||
:param name: Name of the function that can be called
|
||||
:param description: (Optional) Description of what the function does
|
||||
:param parameters: (Optional) JSON schema defining the function's parameters
|
||||
:param strict: (Optional) Whether to enforce strict parameter validation
|
||||
"""
|
||||
type: Literal["function"] = "function"
|
||||
name: str
|
||||
description: str | None = None
|
||||
|
@ -462,6 +670,14 @@ class OpenAIResponseInputToolFunction(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputToolFileSearch(BaseModel):
|
||||
"""File search tool configuration for OpenAI response inputs.
|
||||
|
||||
:param type: Tool type identifier, always "file_search"
|
||||
:param vector_store_ids: List of vector store identifiers to search within
|
||||
:param filters: (Optional) Additional filters to apply to the search
|
||||
:param max_num_results: (Optional) Maximum number of search results to return (1-50)
|
||||
:param ranking_options: (Optional) Options for ranking and scoring search results
|
||||
"""
|
||||
type: Literal["file_search"] = "file_search"
|
||||
vector_store_ids: list[str]
|
||||
filters: dict[str, Any] | None = None
|
||||
|
@ -470,16 +686,34 @@ class OpenAIResponseInputToolFileSearch(BaseModel):
|
|||
|
||||
|
||||
class ApprovalFilter(BaseModel):
|
||||
"""Filter configuration for MCP tool approval requirements.
|
||||
|
||||
:param always: (Optional) List of tool names that always require approval
|
||||
:param never: (Optional) List of tool names that never require approval
|
||||
"""
|
||||
always: list[str] | None = None
|
||||
never: list[str] | None = None
|
||||
|
||||
|
||||
class AllowedToolsFilter(BaseModel):
|
||||
"""Filter configuration for restricting which MCP tools can be used.
|
||||
|
||||
:param tool_names: (Optional) List of specific tool names that are allowed
|
||||
"""
|
||||
tool_names: list[str] | None = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputToolMCP(BaseModel):
|
||||
"""Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||
|
||||
:param type: Tool type identifier, always "mcp"
|
||||
:param server_label: Label to identify this MCP server
|
||||
:param server_url: URL endpoint of the MCP server
|
||||
:param headers: (Optional) HTTP headers to include when connecting to the server
|
||||
:param require_approval: Approval requirement for tool calls ("always", "never", or filter)
|
||||
:param allowed_tools: (Optional) Restriction on which tools can be used from this server
|
||||
"""
|
||||
type: Literal["mcp"] = "mcp"
|
||||
server_label: str
|
||||
server_url: str
|
||||
|
@ -500,17 +734,34 @@ register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
|
|||
|
||||
|
||||
class ListOpenAIResponseInputItem(BaseModel):
|
||||
"""List container for OpenAI response input items.
|
||||
|
||||
:param data: List of input items
|
||||
:param object: Object type identifier, always "list"
|
||||
"""
|
||||
data: list[OpenAIResponseInput]
|
||||
object: Literal["list"] = "list"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseObjectWithInput(OpenAIResponseObject):
|
||||
"""OpenAI response object extended with input context information.
|
||||
|
||||
:param input: List of input items that led to this response
|
||||
"""
|
||||
input: list[OpenAIResponseInput]
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ListOpenAIResponseObject(BaseModel):
|
||||
"""Paginated list of OpenAI response objects with navigation metadata.
|
||||
|
||||
:param data: List of response objects with their input context
|
||||
:param has_more: Whether there are more results available beyond this page
|
||||
:param first_id: Identifier of the first item in this page
|
||||
:param last_id: Identifier of the last item in this page
|
||||
:param object: Object type identifier, always "list"
|
||||
"""
|
||||
data: list[OpenAIResponseObjectWithInput]
|
||||
has_more: bool
|
||||
first_id: str
|
||||
|
|
0
todos.md
0
todos.md
Loading…
Add table
Add a link
Reference in a new issue