diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html
index d480ff592..e2c53d4b0 100644
--- a/docs/_static/llama-stack-spec.html
+++ b/docs/_static/llama-stack-spec.html
@@ -8293,28 +8293,60 @@
"type": "array",
"items": {
"type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "type": "null"
+ "properties": {
+ "attributes": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "type": "null"
+ },
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "array"
+ },
+ {
+ "type": "object"
+ }
+ ]
},
- {
- "type": "boolean"
- },
- {
- "type": "number"
- },
- {
- "type": "string"
- },
- {
- "type": "array"
- },
- {
- "type": "object"
- }
- ]
- }
+ "description": "(Optional) Key-value attributes associated with the file"
+ },
+ "file_id": {
+ "type": "string",
+ "description": "Unique identifier of the file containing the result"
+ },
+ "filename": {
+ "type": "string",
+ "description": "Name of the file containing the result"
+ },
+ "score": {
+ "type": "number",
+ "description": "Relevance score for this search result (between 0 and 1)"
+ },
+ "text": {
+ "type": "string",
+ "description": "Text content of the search result"
+ }
+ },
+ "additionalProperties": false,
+ "required": [
+ "attributes",
+ "file_id",
+ "filename",
+ "score",
+ "text"
+ ],
+ "title": "OpenAIResponseOutputMessageFileSearchToolCallResults",
+ "description": "Search results returned by the file search operation."
},
"description": "(Optional) Search results returned by the file search operation"
}
@@ -8515,6 +8547,13 @@
"$ref": "#/components/schemas/OpenAIResponseInputTool"
}
},
+ "include": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "(Optional) Additional fields to include in the response."
+ },
"max_infer_iters": {
"type": "integer"
}
diff --git a/docs/_static/llama-stack-spec.yaml b/docs/_static/llama-stack-spec.yaml
index 9c0fba554..85cec3a78 100644
--- a/docs/_static/llama-stack-spec.yaml
+++ b/docs/_static/llama-stack-spec.yaml
@@ -6021,14 +6021,44 @@ components:
type: array
items:
type: object
- additionalProperties:
- oneOf:
- - type: 'null'
- - type: boolean
- - type: number
- - type: string
- - type: array
- - type: object
+ properties:
+ attributes:
+ type: object
+ additionalProperties:
+ oneOf:
+ - type: 'null'
+ - type: boolean
+ - type: number
+ - type: string
+ - type: array
+ - type: object
+ description: >-
+ (Optional) Key-value attributes associated with the file
+ file_id:
+ type: string
+ description: >-
+ Unique identifier of the file containing the result
+ filename:
+ type: string
+ description: Name of the file containing the result
+ score:
+ type: number
+ description: >-
+ Relevance score for this search result (between 0 and 1)
+ text:
+ type: string
+ description: Text content of the search result
+ additionalProperties: false
+ required:
+ - attributes
+ - file_id
+ - filename
+ - score
+ - text
+ title: >-
+ OpenAIResponseOutputMessageFileSearchToolCallResults
+ description: >-
+ Search results returned by the file search operation.
description: >-
(Optional) Search results returned by the file search operation
additionalProperties: false
@@ -6188,6 +6218,12 @@ components:
type: array
items:
$ref: '#/components/schemas/OpenAIResponseInputTool'
+ include:
+ type: array
+ items:
+ type: string
+ description: >-
+ (Optional) Additional fields to include in the response.
max_infer_iters:
type: integer
additionalProperties: false
diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py
index e816da766..7dd3e9289 100644
--- a/llama_stack/apis/agents/agents.py
+++ b/llama_stack/apis/agents/agents.py
@@ -706,6 +706,7 @@ class Agents(Protocol):
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
+ include: list[str] | None = None,
max_infer_iters: int | None = 10, # this is an extension to the OpenAI API
) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
"""Create a new OpenAI response.
@@ -713,6 +714,7 @@ class Agents(Protocol):
:param input: Input message(s) to create the response.
:param model: The underlying LLM used for completions.
:param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
+ :param include: (Optional) Additional fields to include in the response.
:returns: An OpenAIResponseObject.
"""
...
diff --git a/llama_stack/apis/agents/openai_responses.py b/llama_stack/apis/agents/openai_responses.py
index 10cadf38f..8574104dc 100644
--- a/llama_stack/apis/agents/openai_responses.py
+++ b/llama_stack/apis/agents/openai_responses.py
@@ -170,6 +170,23 @@ class OpenAIResponseOutputMessageWebSearchToolCall(BaseModel):
type: Literal["web_search_call"] = "web_search_call"
+class OpenAIResponseOutputMessageFileSearchToolCallResults(BaseModel):
+ """Search results returned by the file search operation.
+
+ :param attributes: (Optional) Key-value attributes associated with the file
+ :param file_id: Unique identifier of the file containing the result
+ :param filename: Name of the file containing the result
+ :param score: Relevance score for this search result (between 0 and 1)
+ :param text: Text content of the search result
+ """
+
+ attributes: dict[str, Any]
+ file_id: str
+ filename: str
+ score: float
+ text: str
+
+
@json_schema_type
class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
"""File search tool call output message for OpenAI responses.
@@ -185,7 +202,7 @@ class OpenAIResponseOutputMessageFileSearchToolCall(BaseModel):
queries: list[str]
status: str
type: Literal["file_search_call"] = "file_search_call"
- results: list[dict[str, Any]] | None = None
+ results: list[OpenAIResponseOutputMessageFileSearchToolCallResults] | None = None
@json_schema_type
diff --git a/llama_stack/providers/inline/agents/meta_reference/agents.py b/llama_stack/providers/inline/agents/meta_reference/agents.py
index 15695ec48..0f12a0865 100644
--- a/llama_stack/providers/inline/agents/meta_reference/agents.py
+++ b/llama_stack/providers/inline/agents/meta_reference/agents.py
@@ -327,10 +327,21 @@ class MetaReferenceAgentsImpl(Agents):
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
+ include: list[str] | None = None,
max_infer_iters: int | None = 10,
) -> OpenAIResponseObject:
return await self.openai_responses_impl.create_openai_response(
- input, model, instructions, previous_response_id, store, stream, temperature, text, tools, max_infer_iters
+ input,
+ model,
+ instructions,
+ previous_response_id,
+ store,
+ stream,
+ temperature,
+ text,
+ tools,
+ include,
+ max_infer_iters,
)
async def list_openai_responses(
diff --git a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py
index 7eb2b3897..b98ca114f 100644
--- a/llama_stack/providers/inline/agents/meta_reference/openai_responses.py
+++ b/llama_stack/providers/inline/agents/meta_reference/openai_responses.py
@@ -38,6 +38,7 @@ from llama_stack.apis.agents.openai_responses import (
OpenAIResponseOutputMessageContent,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFileSearchToolCall,
+ OpenAIResponseOutputMessageFileSearchToolCallResults,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseOutputMessageWebSearchToolCall,
@@ -333,6 +334,7 @@ class OpenAIResponsesImpl:
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
+ include: list[str] | None = None,
max_infer_iters: int | None = 10,
):
stream = bool(stream)
@@ -826,12 +828,13 @@ class OpenAIResponsesImpl:
text = result.metadata["chunks"][i] if "chunks" in result.metadata else None
score = result.metadata["scores"][i] if "scores" in result.metadata else None
message.results.append(
- {
- "file_id": doc_id,
- "filename": doc_id,
- "text": text,
- "score": score,
- }
+ OpenAIResponseOutputMessageFileSearchToolCallResults(
+ file_id=doc_id,
+ filename=doc_id,
+ text=text,
+ score=score,
+ attributes={},
+ )
)
if error_exc or (result.error_code and result.error_code > 0) or result.error_message:
message.status = "failed"