mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-11 05:38:38 +00:00
feat: reuse previous mcp tool listings where possible (#3710)
# What does this PR do? This PR checks whether, if a previous response is linked, there are mcp_list_tools objects that can be reused instead of listing the tools explicitly every time. Closes #3106 ## Test Plan Tested manually. Added unit tests to cover new behaviour. --------- Signed-off-by: Gordon Sim <gsim@redhat.com> Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
parent
0066d986c5
commit
8bf07f91cb
12 changed files with 1835 additions and 983 deletions
411
docs/static/deprecated-llama-stack-spec.html
vendored
411
docs/static/deprecated-llama-stack-spec.html
vendored
|
@ -8975,6 +8975,168 @@
|
||||||
"title": "OpenAIResponseInputMessageContentText",
|
"title": "OpenAIResponseInputMessageContentText",
|
||||||
"description": "Text content for input messages in OpenAI response format."
|
"description": "Text content for input messages in OpenAI response format."
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseInputToolFileSearch": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "file_search",
|
||||||
|
"default": "file_search",
|
||||||
|
"description": "Tool type identifier, always \"file_search\""
|
||||||
|
},
|
||||||
|
"vector_store_ids": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "List of vector store identifiers to search within"
|
||||||
|
},
|
||||||
|
"filters": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "(Optional) Additional filters to apply to the search"
|
||||||
|
},
|
||||||
|
"max_num_results": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 10,
|
||||||
|
"description": "(Optional) Maximum number of search results to return (1-50)"
|
||||||
|
},
|
||||||
|
"ranking_options": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"ranker": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(Optional) Name of the ranking algorithm to use"
|
||||||
|
},
|
||||||
|
"score_threshold": {
|
||||||
|
"type": "number",
|
||||||
|
"default": 0.0,
|
||||||
|
"description": "(Optional) Minimum relevance score threshold for results"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"description": "(Optional) Options for ranking and scoring search results"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"vector_store_ids"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolFileSearch",
|
||||||
|
"description": "File search tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
|
"OpenAIResponseInputToolFunction": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "function",
|
||||||
|
"default": "function",
|
||||||
|
"description": "Tool type identifier, always \"function\""
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of the function that can be called"
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(Optional) Description of what the function does"
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "(Optional) JSON schema defining the function's parameters"
|
||||||
|
},
|
||||||
|
"strict": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "(Optional) Whether to enforce strict parameter validation"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"name"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolFunction",
|
||||||
|
"description": "Function tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
|
"OpenAIResponseInputToolWebSearch": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_preview"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_preview_2025_03_11"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"default": "web_search",
|
||||||
|
"description": "Web search tool type variant to use"
|
||||||
|
},
|
||||||
|
"search_context_size": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "medium",
|
||||||
|
"description": "(Optional) Size of search context, must be \"low\", \"medium\", or \"high\""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolWebSearch",
|
||||||
|
"description": "Web search tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
"OpenAIResponseMCPApprovalRequest": {
|
"OpenAIResponseMCPApprovalRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -9157,6 +9319,13 @@
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "(Optional) Nucleus sampling parameter used for generation"
|
"description": "(Optional) Nucleus sampling parameter used for generation"
|
||||||
},
|
},
|
||||||
|
"tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseTool"
|
||||||
|
},
|
||||||
|
"description": "(Optional) An array of tools the model may call while generating a response."
|
||||||
|
},
|
||||||
"truncation": {
|
"truncation": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) Truncation strategy applied to the response"
|
"description": "(Optional) Truncation strategy applied to the response"
|
||||||
|
@ -9610,6 +9779,79 @@
|
||||||
"title": "OpenAIResponseText",
|
"title": "OpenAIResponseText",
|
||||||
"description": "Text response configuration for OpenAI responses."
|
"description": "Text response configuration for OpenAI responses."
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseTool": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolFunction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseToolMCP"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"discriminator": {
|
||||||
|
"propertyName": "type",
|
||||||
|
"mapping": {
|
||||||
|
"web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch",
|
||||||
|
"file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch",
|
||||||
|
"function": "#/components/schemas/OpenAIResponseInputToolFunction",
|
||||||
|
"mcp": "#/components/schemas/OpenAIResponseToolMCP"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"OpenAIResponseToolMCP": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "mcp",
|
||||||
|
"default": "mcp",
|
||||||
|
"description": "Tool type identifier, always \"mcp\""
|
||||||
|
},
|
||||||
|
"server_label": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label to identify this MCP server"
|
||||||
|
},
|
||||||
|
"allowed_tools": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"tool_names": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "(Optional) List of specific tool names that are allowed"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"title": "AllowedToolsFilter",
|
||||||
|
"description": "Filter configuration for restricting which MCP tools can be used."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "(Optional) Restriction on which tools can be used from this server"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"server_label"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseToolMCP",
|
||||||
|
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response object."
|
||||||
|
},
|
||||||
"OpenAIResponseUsage": {
|
"OpenAIResponseUsage": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -9697,134 +9939,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputToolFileSearch": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "file_search",
|
|
||||||
"default": "file_search",
|
|
||||||
"description": "Tool type identifier, always \"file_search\""
|
|
||||||
},
|
|
||||||
"vector_store_ids": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "List of vector store identifiers to search within"
|
|
||||||
},
|
|
||||||
"filters": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "null"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "object"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "(Optional) Additional filters to apply to the search"
|
|
||||||
},
|
|
||||||
"max_num_results": {
|
|
||||||
"type": "integer",
|
|
||||||
"default": 10,
|
|
||||||
"description": "(Optional) Maximum number of search results to return (1-50)"
|
|
||||||
},
|
|
||||||
"ranking_options": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"ranker": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(Optional) Name of the ranking algorithm to use"
|
|
||||||
},
|
|
||||||
"score_threshold": {
|
|
||||||
"type": "number",
|
|
||||||
"default": 0.0,
|
|
||||||
"description": "(Optional) Minimum relevance score threshold for results"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"description": "(Optional) Options for ranking and scoring search results"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"vector_store_ids"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolFileSearch",
|
|
||||||
"description": "File search tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"OpenAIResponseInputToolFunction": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "function",
|
|
||||||
"default": "function",
|
|
||||||
"description": "Tool type identifier, always \"function\""
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Name of the function that can be called"
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(Optional) Description of what the function does"
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "null"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "object"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "(Optional) JSON schema defining the function's parameters"
|
|
||||||
},
|
|
||||||
"strict": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "(Optional) Whether to enforce strict parameter validation"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"name"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolFunction",
|
|
||||||
"description": "Function tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"OpenAIResponseInputToolMCP": {
|
"OpenAIResponseInputToolMCP": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -9941,40 +10055,6 @@
|
||||||
"title": "OpenAIResponseInputToolMCP",
|
"title": "OpenAIResponseInputToolMCP",
|
||||||
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs."
|
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs."
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputToolWebSearch": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_preview"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_preview_2025_03_11"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"default": "web_search",
|
|
||||||
"description": "Web search tool type variant to use"
|
|
||||||
},
|
|
||||||
"search_context_size": {
|
|
||||||
"type": "string",
|
|
||||||
"default": "medium",
|
|
||||||
"description": "(Optional) Size of search context, must be \"low\", \"medium\", or \"high\""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolWebSearch",
|
|
||||||
"description": "Web search tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"CreateOpenaiResponseRequest": {
|
"CreateOpenaiResponseRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -10096,6 +10176,13 @@
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "(Optional) Nucleus sampling parameter used for generation"
|
"description": "(Optional) Nucleus sampling parameter used for generation"
|
||||||
},
|
},
|
||||||
|
"tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseTool"
|
||||||
|
},
|
||||||
|
"description": "(Optional) An array of tools the model may call while generating a response."
|
||||||
|
},
|
||||||
"truncation": {
|
"truncation": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) Truncation strategy applied to the response"
|
"description": "(Optional) Truncation strategy applied to the response"
|
||||||
|
|
294
docs/static/deprecated-llama-stack-spec.yaml
vendored
294
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
@ -6661,6 +6661,122 @@ components:
|
||||||
title: OpenAIResponseInputMessageContentText
|
title: OpenAIResponseInputMessageContentText
|
||||||
description: >-
|
description: >-
|
||||||
Text content for input messages in OpenAI response format.
|
Text content for input messages in OpenAI response format.
|
||||||
|
OpenAIResponseInputToolFileSearch:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: file_search
|
||||||
|
default: file_search
|
||||||
|
description: >-
|
||||||
|
Tool type identifier, always "file_search"
|
||||||
|
vector_store_ids:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
List of vector store identifiers to search within
|
||||||
|
filters:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) Additional filters to apply to the search
|
||||||
|
max_num_results:
|
||||||
|
type: integer
|
||||||
|
default: 10
|
||||||
|
description: >-
|
||||||
|
(Optional) Maximum number of search results to return (1-50)
|
||||||
|
ranking_options:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
ranker:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) Name of the ranking algorithm to use
|
||||||
|
score_threshold:
|
||||||
|
type: number
|
||||||
|
default: 0.0
|
||||||
|
description: >-
|
||||||
|
(Optional) Minimum relevance score threshold for results
|
||||||
|
additionalProperties: false
|
||||||
|
description: >-
|
||||||
|
(Optional) Options for ranking and scoring search results
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- vector_store_ids
|
||||||
|
title: OpenAIResponseInputToolFileSearch
|
||||||
|
description: >-
|
||||||
|
File search tool configuration for OpenAI response inputs.
|
||||||
|
OpenAIResponseInputToolFunction:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: function
|
||||||
|
default: function
|
||||||
|
description: Tool type identifier, always "function"
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Name of the function that can be called
|
||||||
|
description:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) Description of what the function does
|
||||||
|
parameters:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) JSON schema defining the function's parameters
|
||||||
|
strict:
|
||||||
|
type: boolean
|
||||||
|
description: >-
|
||||||
|
(Optional) Whether to enforce strict parameter validation
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- name
|
||||||
|
title: OpenAIResponseInputToolFunction
|
||||||
|
description: >-
|
||||||
|
Function tool configuration for OpenAI response inputs.
|
||||||
|
OpenAIResponseInputToolWebSearch:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
oneOf:
|
||||||
|
- type: string
|
||||||
|
const: web_search
|
||||||
|
- type: string
|
||||||
|
const: web_search_preview
|
||||||
|
- type: string
|
||||||
|
const: web_search_preview_2025_03_11
|
||||||
|
default: web_search
|
||||||
|
description: Web search tool type variant to use
|
||||||
|
search_context_size:
|
||||||
|
type: string
|
||||||
|
default: medium
|
||||||
|
description: >-
|
||||||
|
(Optional) Size of search context, must be "low", "medium", or "high"
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
title: OpenAIResponseInputToolWebSearch
|
||||||
|
description: >-
|
||||||
|
Web search tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseMCPApprovalRequest:
|
OpenAIResponseMCPApprovalRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -6802,6 +6918,12 @@ components:
|
||||||
type: number
|
type: number
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Nucleus sampling parameter used for generation
|
(Optional) Nucleus sampling parameter used for generation
|
||||||
|
tools:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
||||||
|
description: >-
|
||||||
|
(Optional) An array of tools the model may call while generating a response.
|
||||||
truncation:
|
truncation:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
|
@ -7158,6 +7280,56 @@ components:
|
||||||
title: OpenAIResponseText
|
title: OpenAIResponseText
|
||||||
description: >-
|
description: >-
|
||||||
Text response configuration for OpenAI responses.
|
Text response configuration for OpenAI responses.
|
||||||
|
OpenAIResponseTool:
|
||||||
|
oneOf:
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseToolMCP'
|
||||||
|
discriminator:
|
||||||
|
propertyName: type
|
||||||
|
mapping:
|
||||||
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
||||||
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
|
mcp: '#/components/schemas/OpenAIResponseToolMCP'
|
||||||
|
OpenAIResponseToolMCP:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: mcp
|
||||||
|
default: mcp
|
||||||
|
description: Tool type identifier, always "mcp"
|
||||||
|
server_label:
|
||||||
|
type: string
|
||||||
|
description: Label to identify this MCP server
|
||||||
|
allowed_tools:
|
||||||
|
oneOf:
|
||||||
|
- type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
- type: object
|
||||||
|
properties:
|
||||||
|
tool_names:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) List of specific tool names that are allowed
|
||||||
|
additionalProperties: false
|
||||||
|
title: AllowedToolsFilter
|
||||||
|
description: >-
|
||||||
|
Filter configuration for restricting which MCP tools can be used.
|
||||||
|
description: >-
|
||||||
|
(Optional) Restriction on which tools can be used from this server
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- server_label
|
||||||
|
title: OpenAIResponseToolMCP
|
||||||
|
description: >-
|
||||||
|
Model Context Protocol (MCP) tool configuration for OpenAI response object.
|
||||||
OpenAIResponseUsage:
|
OpenAIResponseUsage:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7219,98 +7391,6 @@ components:
|
||||||
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
||||||
OpenAIResponseInputToolFileSearch:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: file_search
|
|
||||||
default: file_search
|
|
||||||
description: >-
|
|
||||||
Tool type identifier, always "file_search"
|
|
||||||
vector_store_ids:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
List of vector store identifiers to search within
|
|
||||||
filters:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
oneOf:
|
|
||||||
- type: 'null'
|
|
||||||
- type: boolean
|
|
||||||
- type: number
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
- type: object
|
|
||||||
description: >-
|
|
||||||
(Optional) Additional filters to apply to the search
|
|
||||||
max_num_results:
|
|
||||||
type: integer
|
|
||||||
default: 10
|
|
||||||
description: >-
|
|
||||||
(Optional) Maximum number of search results to return (1-50)
|
|
||||||
ranking_options:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
ranker:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
(Optional) Name of the ranking algorithm to use
|
|
||||||
score_threshold:
|
|
||||||
type: number
|
|
||||||
default: 0.0
|
|
||||||
description: >-
|
|
||||||
(Optional) Minimum relevance score threshold for results
|
|
||||||
additionalProperties: false
|
|
||||||
description: >-
|
|
||||||
(Optional) Options for ranking and scoring search results
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- vector_store_ids
|
|
||||||
title: OpenAIResponseInputToolFileSearch
|
|
||||||
description: >-
|
|
||||||
File search tool configuration for OpenAI response inputs.
|
|
||||||
OpenAIResponseInputToolFunction:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: function
|
|
||||||
default: function
|
|
||||||
description: Tool type identifier, always "function"
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: Name of the function that can be called
|
|
||||||
description:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
(Optional) Description of what the function does
|
|
||||||
parameters:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
oneOf:
|
|
||||||
- type: 'null'
|
|
||||||
- type: boolean
|
|
||||||
- type: number
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
- type: object
|
|
||||||
description: >-
|
|
||||||
(Optional) JSON schema defining the function's parameters
|
|
||||||
strict:
|
|
||||||
type: boolean
|
|
||||||
description: >-
|
|
||||||
(Optional) Whether to enforce strict parameter validation
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- name
|
|
||||||
title: OpenAIResponseInputToolFunction
|
|
||||||
description: >-
|
|
||||||
Function tool configuration for OpenAI response inputs.
|
|
||||||
OpenAIResponseInputToolMCP:
|
OpenAIResponseInputToolMCP:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7392,30 +7472,6 @@ components:
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: >-
|
description: >-
|
||||||
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseInputToolWebSearch:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
oneOf:
|
|
||||||
- type: string
|
|
||||||
const: web_search
|
|
||||||
- type: string
|
|
||||||
const: web_search_preview
|
|
||||||
- type: string
|
|
||||||
const: web_search_preview_2025_03_11
|
|
||||||
default: web_search
|
|
||||||
description: Web search tool type variant to use
|
|
||||||
search_context_size:
|
|
||||||
type: string
|
|
||||||
default: medium
|
|
||||||
description: >-
|
|
||||||
(Optional) Size of search context, must be "low", "medium", or "high"
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
title: OpenAIResponseInputToolWebSearch
|
|
||||||
description: >-
|
|
||||||
Web search tool configuration for OpenAI response inputs.
|
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7516,6 +7572,12 @@ components:
|
||||||
type: number
|
type: number
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Nucleus sampling parameter used for generation
|
(Optional) Nucleus sampling parameter used for generation
|
||||||
|
tools:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
||||||
|
description: >-
|
||||||
|
(Optional) An array of tools the model may call while generating a response.
|
||||||
truncation:
|
truncation:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
|
|
411
docs/static/llama-stack-spec.html
vendored
411
docs/static/llama-stack-spec.html
vendored
|
@ -7445,6 +7445,168 @@
|
||||||
"title": "OpenAIResponseInputFunctionToolCallOutput",
|
"title": "OpenAIResponseInputFunctionToolCallOutput",
|
||||||
"description": "This represents the output of a function call that gets passed back to the model."
|
"description": "This represents the output of a function call that gets passed back to the model."
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseInputToolFileSearch": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "file_search",
|
||||||
|
"default": "file_search",
|
||||||
|
"description": "Tool type identifier, always \"file_search\""
|
||||||
|
},
|
||||||
|
"vector_store_ids": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "List of vector store identifiers to search within"
|
||||||
|
},
|
||||||
|
"filters": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "(Optional) Additional filters to apply to the search"
|
||||||
|
},
|
||||||
|
"max_num_results": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 10,
|
||||||
|
"description": "(Optional) Maximum number of search results to return (1-50)"
|
||||||
|
},
|
||||||
|
"ranking_options": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"ranker": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(Optional) Name of the ranking algorithm to use"
|
||||||
|
},
|
||||||
|
"score_threshold": {
|
||||||
|
"type": "number",
|
||||||
|
"default": 0.0,
|
||||||
|
"description": "(Optional) Minimum relevance score threshold for results"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"description": "(Optional) Options for ranking and scoring search results"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"vector_store_ids"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolFileSearch",
|
||||||
|
"description": "File search tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
|
"OpenAIResponseInputToolFunction": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "function",
|
||||||
|
"default": "function",
|
||||||
|
"description": "Tool type identifier, always \"function\""
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of the function that can be called"
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(Optional) Description of what the function does"
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "(Optional) JSON schema defining the function's parameters"
|
||||||
|
},
|
||||||
|
"strict": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "(Optional) Whether to enforce strict parameter validation"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"name"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolFunction",
|
||||||
|
"description": "Function tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
|
"OpenAIResponseInputToolWebSearch": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_preview"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_preview_2025_03_11"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"default": "web_search",
|
||||||
|
"description": "Web search tool type variant to use"
|
||||||
|
},
|
||||||
|
"search_context_size": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "medium",
|
||||||
|
"description": "(Optional) Size of search context, must be \"low\", \"medium\", or \"high\""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolWebSearch",
|
||||||
|
"description": "Web search tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
"OpenAIResponseMCPApprovalRequest": {
|
"OpenAIResponseMCPApprovalRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -7564,6 +7726,13 @@
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "(Optional) Nucleus sampling parameter used for generation"
|
"description": "(Optional) Nucleus sampling parameter used for generation"
|
||||||
},
|
},
|
||||||
|
"tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseTool"
|
||||||
|
},
|
||||||
|
"description": "(Optional) An array of tools the model may call while generating a response."
|
||||||
|
},
|
||||||
"truncation": {
|
"truncation": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) Truncation strategy applied to the response"
|
"description": "(Optional) Truncation strategy applied to the response"
|
||||||
|
@ -7705,6 +7874,79 @@
|
||||||
"title": "OpenAIResponseText",
|
"title": "OpenAIResponseText",
|
||||||
"description": "Text response configuration for OpenAI responses."
|
"description": "Text response configuration for OpenAI responses."
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseTool": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolFunction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseToolMCP"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"discriminator": {
|
||||||
|
"propertyName": "type",
|
||||||
|
"mapping": {
|
||||||
|
"web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch",
|
||||||
|
"file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch",
|
||||||
|
"function": "#/components/schemas/OpenAIResponseInputToolFunction",
|
||||||
|
"mcp": "#/components/schemas/OpenAIResponseToolMCP"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"OpenAIResponseToolMCP": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "mcp",
|
||||||
|
"default": "mcp",
|
||||||
|
"description": "Tool type identifier, always \"mcp\""
|
||||||
|
},
|
||||||
|
"server_label": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label to identify this MCP server"
|
||||||
|
},
|
||||||
|
"allowed_tools": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"tool_names": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "(Optional) List of specific tool names that are allowed"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"title": "AllowedToolsFilter",
|
||||||
|
"description": "Filter configuration for restricting which MCP tools can be used."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "(Optional) Restriction on which tools can be used from this server"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"server_label"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseToolMCP",
|
||||||
|
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response object."
|
||||||
|
},
|
||||||
"OpenAIResponseUsage": {
|
"OpenAIResponseUsage": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -7792,134 +8034,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputToolFileSearch": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "file_search",
|
|
||||||
"default": "file_search",
|
|
||||||
"description": "Tool type identifier, always \"file_search\""
|
|
||||||
},
|
|
||||||
"vector_store_ids": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "List of vector store identifiers to search within"
|
|
||||||
},
|
|
||||||
"filters": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "null"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "object"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "(Optional) Additional filters to apply to the search"
|
|
||||||
},
|
|
||||||
"max_num_results": {
|
|
||||||
"type": "integer",
|
|
||||||
"default": 10,
|
|
||||||
"description": "(Optional) Maximum number of search results to return (1-50)"
|
|
||||||
},
|
|
||||||
"ranking_options": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"ranker": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(Optional) Name of the ranking algorithm to use"
|
|
||||||
},
|
|
||||||
"score_threshold": {
|
|
||||||
"type": "number",
|
|
||||||
"default": 0.0,
|
|
||||||
"description": "(Optional) Minimum relevance score threshold for results"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"description": "(Optional) Options for ranking and scoring search results"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"vector_store_ids"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolFileSearch",
|
|
||||||
"description": "File search tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"OpenAIResponseInputToolFunction": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "function",
|
|
||||||
"default": "function",
|
|
||||||
"description": "Tool type identifier, always \"function\""
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Name of the function that can be called"
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(Optional) Description of what the function does"
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "null"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "object"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "(Optional) JSON schema defining the function's parameters"
|
|
||||||
},
|
|
||||||
"strict": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "(Optional) Whether to enforce strict parameter validation"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"name"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolFunction",
|
|
||||||
"description": "Function tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"OpenAIResponseInputToolMCP": {
|
"OpenAIResponseInputToolMCP": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -8036,40 +8150,6 @@
|
||||||
"title": "OpenAIResponseInputToolMCP",
|
"title": "OpenAIResponseInputToolMCP",
|
||||||
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs."
|
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs."
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputToolWebSearch": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_preview"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_preview_2025_03_11"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"default": "web_search",
|
|
||||||
"description": "Web search tool type variant to use"
|
|
||||||
},
|
|
||||||
"search_context_size": {
|
|
||||||
"type": "string",
|
|
||||||
"default": "medium",
|
|
||||||
"description": "(Optional) Size of search context, must be \"low\", \"medium\", or \"high\""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolWebSearch",
|
|
||||||
"description": "Web search tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"CreateOpenaiResponseRequest": {
|
"CreateOpenaiResponseRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -8191,6 +8271,13 @@
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "(Optional) Nucleus sampling parameter used for generation"
|
"description": "(Optional) Nucleus sampling parameter used for generation"
|
||||||
},
|
},
|
||||||
|
"tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseTool"
|
||||||
|
},
|
||||||
|
"description": "(Optional) An array of tools the model may call while generating a response."
|
||||||
|
},
|
||||||
"truncation": {
|
"truncation": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) Truncation strategy applied to the response"
|
"description": "(Optional) Truncation strategy applied to the response"
|
||||||
|
|
294
docs/static/llama-stack-spec.yaml
vendored
294
docs/static/llama-stack-spec.yaml
vendored
|
@ -5654,6 +5654,122 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
This represents the output of a function call that gets passed back to the
|
This represents the output of a function call that gets passed back to the
|
||||||
model.
|
model.
|
||||||
|
OpenAIResponseInputToolFileSearch:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: file_search
|
||||||
|
default: file_search
|
||||||
|
description: >-
|
||||||
|
Tool type identifier, always "file_search"
|
||||||
|
vector_store_ids:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
List of vector store identifiers to search within
|
||||||
|
filters:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) Additional filters to apply to the search
|
||||||
|
max_num_results:
|
||||||
|
type: integer
|
||||||
|
default: 10
|
||||||
|
description: >-
|
||||||
|
(Optional) Maximum number of search results to return (1-50)
|
||||||
|
ranking_options:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
ranker:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) Name of the ranking algorithm to use
|
||||||
|
score_threshold:
|
||||||
|
type: number
|
||||||
|
default: 0.0
|
||||||
|
description: >-
|
||||||
|
(Optional) Minimum relevance score threshold for results
|
||||||
|
additionalProperties: false
|
||||||
|
description: >-
|
||||||
|
(Optional) Options for ranking and scoring search results
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- vector_store_ids
|
||||||
|
title: OpenAIResponseInputToolFileSearch
|
||||||
|
description: >-
|
||||||
|
File search tool configuration for OpenAI response inputs.
|
||||||
|
OpenAIResponseInputToolFunction:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: function
|
||||||
|
default: function
|
||||||
|
description: Tool type identifier, always "function"
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Name of the function that can be called
|
||||||
|
description:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) Description of what the function does
|
||||||
|
parameters:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) JSON schema defining the function's parameters
|
||||||
|
strict:
|
||||||
|
type: boolean
|
||||||
|
description: >-
|
||||||
|
(Optional) Whether to enforce strict parameter validation
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- name
|
||||||
|
title: OpenAIResponseInputToolFunction
|
||||||
|
description: >-
|
||||||
|
Function tool configuration for OpenAI response inputs.
|
||||||
|
OpenAIResponseInputToolWebSearch:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
oneOf:
|
||||||
|
- type: string
|
||||||
|
const: web_search
|
||||||
|
- type: string
|
||||||
|
const: web_search_preview
|
||||||
|
- type: string
|
||||||
|
const: web_search_preview_2025_03_11
|
||||||
|
default: web_search
|
||||||
|
description: Web search tool type variant to use
|
||||||
|
search_context_size:
|
||||||
|
type: string
|
||||||
|
default: medium
|
||||||
|
description: >-
|
||||||
|
(Optional) Size of search context, must be "low", "medium", or "high"
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
title: OpenAIResponseInputToolWebSearch
|
||||||
|
description: >-
|
||||||
|
Web search tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseMCPApprovalRequest:
|
OpenAIResponseMCPApprovalRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -5755,6 +5871,12 @@ components:
|
||||||
type: number
|
type: number
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Nucleus sampling parameter used for generation
|
(Optional) Nucleus sampling parameter used for generation
|
||||||
|
tools:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
||||||
|
description: >-
|
||||||
|
(Optional) An array of tools the model may call while generating a response.
|
||||||
truncation:
|
truncation:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
|
@ -5854,6 +5976,56 @@ components:
|
||||||
title: OpenAIResponseText
|
title: OpenAIResponseText
|
||||||
description: >-
|
description: >-
|
||||||
Text response configuration for OpenAI responses.
|
Text response configuration for OpenAI responses.
|
||||||
|
OpenAIResponseTool:
|
||||||
|
oneOf:
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseToolMCP'
|
||||||
|
discriminator:
|
||||||
|
propertyName: type
|
||||||
|
mapping:
|
||||||
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
||||||
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
|
mcp: '#/components/schemas/OpenAIResponseToolMCP'
|
||||||
|
OpenAIResponseToolMCP:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: mcp
|
||||||
|
default: mcp
|
||||||
|
description: Tool type identifier, always "mcp"
|
||||||
|
server_label:
|
||||||
|
type: string
|
||||||
|
description: Label to identify this MCP server
|
||||||
|
allowed_tools:
|
||||||
|
oneOf:
|
||||||
|
- type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
- type: object
|
||||||
|
properties:
|
||||||
|
tool_names:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) List of specific tool names that are allowed
|
||||||
|
additionalProperties: false
|
||||||
|
title: AllowedToolsFilter
|
||||||
|
description: >-
|
||||||
|
Filter configuration for restricting which MCP tools can be used.
|
||||||
|
description: >-
|
||||||
|
(Optional) Restriction on which tools can be used from this server
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- server_label
|
||||||
|
title: OpenAIResponseToolMCP
|
||||||
|
description: >-
|
||||||
|
Model Context Protocol (MCP) tool configuration for OpenAI response object.
|
||||||
OpenAIResponseUsage:
|
OpenAIResponseUsage:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -5915,98 +6087,6 @@ components:
|
||||||
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
||||||
OpenAIResponseInputToolFileSearch:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: file_search
|
|
||||||
default: file_search
|
|
||||||
description: >-
|
|
||||||
Tool type identifier, always "file_search"
|
|
||||||
vector_store_ids:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
List of vector store identifiers to search within
|
|
||||||
filters:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
oneOf:
|
|
||||||
- type: 'null'
|
|
||||||
- type: boolean
|
|
||||||
- type: number
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
- type: object
|
|
||||||
description: >-
|
|
||||||
(Optional) Additional filters to apply to the search
|
|
||||||
max_num_results:
|
|
||||||
type: integer
|
|
||||||
default: 10
|
|
||||||
description: >-
|
|
||||||
(Optional) Maximum number of search results to return (1-50)
|
|
||||||
ranking_options:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
ranker:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
(Optional) Name of the ranking algorithm to use
|
|
||||||
score_threshold:
|
|
||||||
type: number
|
|
||||||
default: 0.0
|
|
||||||
description: >-
|
|
||||||
(Optional) Minimum relevance score threshold for results
|
|
||||||
additionalProperties: false
|
|
||||||
description: >-
|
|
||||||
(Optional) Options for ranking and scoring search results
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- vector_store_ids
|
|
||||||
title: OpenAIResponseInputToolFileSearch
|
|
||||||
description: >-
|
|
||||||
File search tool configuration for OpenAI response inputs.
|
|
||||||
OpenAIResponseInputToolFunction:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: function
|
|
||||||
default: function
|
|
||||||
description: Tool type identifier, always "function"
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: Name of the function that can be called
|
|
||||||
description:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
(Optional) Description of what the function does
|
|
||||||
parameters:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
oneOf:
|
|
||||||
- type: 'null'
|
|
||||||
- type: boolean
|
|
||||||
- type: number
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
- type: object
|
|
||||||
description: >-
|
|
||||||
(Optional) JSON schema defining the function's parameters
|
|
||||||
strict:
|
|
||||||
type: boolean
|
|
||||||
description: >-
|
|
||||||
(Optional) Whether to enforce strict parameter validation
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- name
|
|
||||||
title: OpenAIResponseInputToolFunction
|
|
||||||
description: >-
|
|
||||||
Function tool configuration for OpenAI response inputs.
|
|
||||||
OpenAIResponseInputToolMCP:
|
OpenAIResponseInputToolMCP:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -6088,30 +6168,6 @@ components:
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: >-
|
description: >-
|
||||||
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseInputToolWebSearch:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
oneOf:
|
|
||||||
- type: string
|
|
||||||
const: web_search
|
|
||||||
- type: string
|
|
||||||
const: web_search_preview
|
|
||||||
- type: string
|
|
||||||
const: web_search_preview_2025_03_11
|
|
||||||
default: web_search
|
|
||||||
description: Web search tool type variant to use
|
|
||||||
search_context_size:
|
|
||||||
type: string
|
|
||||||
default: medium
|
|
||||||
description: >-
|
|
||||||
(Optional) Size of search context, must be "low", "medium", or "high"
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
title: OpenAIResponseInputToolWebSearch
|
|
||||||
description: >-
|
|
||||||
Web search tool configuration for OpenAI response inputs.
|
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -6212,6 +6268,12 @@ components:
|
||||||
type: number
|
type: number
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Nucleus sampling parameter used for generation
|
(Optional) Nucleus sampling parameter used for generation
|
||||||
|
tools:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
||||||
|
description: >-
|
||||||
|
(Optional) An array of tools the model may call while generating a response.
|
||||||
truncation:
|
truncation:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
|
|
411
docs/static/stainless-llama-stack-spec.html
vendored
411
docs/static/stainless-llama-stack-spec.html
vendored
|
@ -9454,6 +9454,168 @@
|
||||||
"title": "OpenAIResponseInputFunctionToolCallOutput",
|
"title": "OpenAIResponseInputFunctionToolCallOutput",
|
||||||
"description": "This represents the output of a function call that gets passed back to the model."
|
"description": "This represents the output of a function call that gets passed back to the model."
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseInputToolFileSearch": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "file_search",
|
||||||
|
"default": "file_search",
|
||||||
|
"description": "Tool type identifier, always \"file_search\""
|
||||||
|
},
|
||||||
|
"vector_store_ids": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "List of vector store identifiers to search within"
|
||||||
|
},
|
||||||
|
"filters": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "(Optional) Additional filters to apply to the search"
|
||||||
|
},
|
||||||
|
"max_num_results": {
|
||||||
|
"type": "integer",
|
||||||
|
"default": 10,
|
||||||
|
"description": "(Optional) Maximum number of search results to return (1-50)"
|
||||||
|
},
|
||||||
|
"ranking_options": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"ranker": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(Optional) Name of the ranking algorithm to use"
|
||||||
|
},
|
||||||
|
"score_threshold": {
|
||||||
|
"type": "number",
|
||||||
|
"default": 0.0,
|
||||||
|
"description": "(Optional) Minimum relevance score threshold for results"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"description": "(Optional) Options for ranking and scoring search results"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"vector_store_ids"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolFileSearch",
|
||||||
|
"description": "File search tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
|
"OpenAIResponseInputToolFunction": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "function",
|
||||||
|
"default": "function",
|
||||||
|
"description": "Tool type identifier, always \"function\""
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Name of the function that can be called"
|
||||||
|
},
|
||||||
|
"description": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "(Optional) Description of what the function does"
|
||||||
|
},
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "null"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "boolean"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"description": "(Optional) JSON schema defining the function's parameters"
|
||||||
|
},
|
||||||
|
"strict": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "(Optional) Whether to enforce strict parameter validation"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"name"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolFunction",
|
||||||
|
"description": "Function tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
|
"OpenAIResponseInputToolWebSearch": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_preview"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"const": "web_search_preview_2025_03_11"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"default": "web_search",
|
||||||
|
"description": "Web search tool type variant to use"
|
||||||
|
},
|
||||||
|
"search_context_size": {
|
||||||
|
"type": "string",
|
||||||
|
"default": "medium",
|
||||||
|
"description": "(Optional) Size of search context, must be \"low\", \"medium\", or \"high\""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseInputToolWebSearch",
|
||||||
|
"description": "Web search tool configuration for OpenAI response inputs."
|
||||||
|
},
|
||||||
"OpenAIResponseMCPApprovalRequest": {
|
"OpenAIResponseMCPApprovalRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -9573,6 +9735,13 @@
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "(Optional) Nucleus sampling parameter used for generation"
|
"description": "(Optional) Nucleus sampling parameter used for generation"
|
||||||
},
|
},
|
||||||
|
"tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseTool"
|
||||||
|
},
|
||||||
|
"description": "(Optional) An array of tools the model may call while generating a response."
|
||||||
|
},
|
||||||
"truncation": {
|
"truncation": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) Truncation strategy applied to the response"
|
"description": "(Optional) Truncation strategy applied to the response"
|
||||||
|
@ -9714,6 +9883,79 @@
|
||||||
"title": "OpenAIResponseText",
|
"title": "OpenAIResponseText",
|
||||||
"description": "Text response configuration for OpenAI responses."
|
"description": "Text response configuration for OpenAI responses."
|
||||||
},
|
},
|
||||||
|
"OpenAIResponseTool": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolWebSearch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolFileSearch"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseInputToolFunction"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseToolMCP"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"discriminator": {
|
||||||
|
"propertyName": "type",
|
||||||
|
"mapping": {
|
||||||
|
"web_search": "#/components/schemas/OpenAIResponseInputToolWebSearch",
|
||||||
|
"file_search": "#/components/schemas/OpenAIResponseInputToolFileSearch",
|
||||||
|
"function": "#/components/schemas/OpenAIResponseInputToolFunction",
|
||||||
|
"mcp": "#/components/schemas/OpenAIResponseToolMCP"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"OpenAIResponseToolMCP": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"type": {
|
||||||
|
"type": "string",
|
||||||
|
"const": "mcp",
|
||||||
|
"default": "mcp",
|
||||||
|
"description": "Tool type identifier, always \"mcp\""
|
||||||
|
},
|
||||||
|
"server_label": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Label to identify this MCP server"
|
||||||
|
},
|
||||||
|
"allowed_tools": {
|
||||||
|
"oneOf": [
|
||||||
|
{
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"tool_names": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"description": "(Optional) List of specific tool names that are allowed"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"title": "AllowedToolsFilter",
|
||||||
|
"description": "Filter configuration for restricting which MCP tools can be used."
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "(Optional) Restriction on which tools can be used from this server"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"additionalProperties": false,
|
||||||
|
"required": [
|
||||||
|
"type",
|
||||||
|
"server_label"
|
||||||
|
],
|
||||||
|
"title": "OpenAIResponseToolMCP",
|
||||||
|
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response object."
|
||||||
|
},
|
||||||
"OpenAIResponseUsage": {
|
"OpenAIResponseUsage": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -9801,134 +10043,6 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputToolFileSearch": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "file_search",
|
|
||||||
"default": "file_search",
|
|
||||||
"description": "Tool type identifier, always \"file_search\""
|
|
||||||
},
|
|
||||||
"vector_store_ids": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
"description": "List of vector store identifiers to search within"
|
|
||||||
},
|
|
||||||
"filters": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "null"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "object"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "(Optional) Additional filters to apply to the search"
|
|
||||||
},
|
|
||||||
"max_num_results": {
|
|
||||||
"type": "integer",
|
|
||||||
"default": 10,
|
|
||||||
"description": "(Optional) Maximum number of search results to return (1-50)"
|
|
||||||
},
|
|
||||||
"ranking_options": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"ranker": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(Optional) Name of the ranking algorithm to use"
|
|
||||||
},
|
|
||||||
"score_threshold": {
|
|
||||||
"type": "number",
|
|
||||||
"default": 0.0,
|
|
||||||
"description": "(Optional) Minimum relevance score threshold for results"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"description": "(Optional) Options for ranking and scoring search results"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"vector_store_ids"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolFileSearch",
|
|
||||||
"description": "File search tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"OpenAIResponseInputToolFunction": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"type": "string",
|
|
||||||
"const": "function",
|
|
||||||
"default": "function",
|
|
||||||
"description": "Tool type identifier, always \"function\""
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Name of the function that can be called"
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "(Optional) Description of what the function does"
|
|
||||||
},
|
|
||||||
"parameters": {
|
|
||||||
"type": "object",
|
|
||||||
"additionalProperties": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "null"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "boolean"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "object"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"description": "(Optional) JSON schema defining the function's parameters"
|
|
||||||
},
|
|
||||||
"strict": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "(Optional) Whether to enforce strict parameter validation"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type",
|
|
||||||
"name"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolFunction",
|
|
||||||
"description": "Function tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"OpenAIResponseInputToolMCP": {
|
"OpenAIResponseInputToolMCP": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -10045,40 +10159,6 @@
|
||||||
"title": "OpenAIResponseInputToolMCP",
|
"title": "OpenAIResponseInputToolMCP",
|
||||||
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs."
|
"description": "Model Context Protocol (MCP) tool configuration for OpenAI response inputs."
|
||||||
},
|
},
|
||||||
"OpenAIResponseInputToolWebSearch": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"type": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_preview"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "string",
|
|
||||||
"const": "web_search_preview_2025_03_11"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"default": "web_search",
|
|
||||||
"description": "Web search tool type variant to use"
|
|
||||||
},
|
|
||||||
"search_context_size": {
|
|
||||||
"type": "string",
|
|
||||||
"default": "medium",
|
|
||||||
"description": "(Optional) Size of search context, must be \"low\", \"medium\", or \"high\""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"type"
|
|
||||||
],
|
|
||||||
"title": "OpenAIResponseInputToolWebSearch",
|
|
||||||
"description": "Web search tool configuration for OpenAI response inputs."
|
|
||||||
},
|
|
||||||
"CreateOpenaiResponseRequest": {
|
"CreateOpenaiResponseRequest": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
@ -10200,6 +10280,13 @@
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"description": "(Optional) Nucleus sampling parameter used for generation"
|
"description": "(Optional) Nucleus sampling parameter used for generation"
|
||||||
},
|
},
|
||||||
|
"tools": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/OpenAIResponseTool"
|
||||||
|
},
|
||||||
|
"description": "(Optional) An array of tools the model may call while generating a response."
|
||||||
|
},
|
||||||
"truncation": {
|
"truncation": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "(Optional) Truncation strategy applied to the response"
|
"description": "(Optional) Truncation strategy applied to the response"
|
||||||
|
|
294
docs/static/stainless-llama-stack-spec.yaml
vendored
294
docs/static/stainless-llama-stack-spec.yaml
vendored
|
@ -7099,6 +7099,122 @@ components:
|
||||||
description: >-
|
description: >-
|
||||||
This represents the output of a function call that gets passed back to the
|
This represents the output of a function call that gets passed back to the
|
||||||
model.
|
model.
|
||||||
|
OpenAIResponseInputToolFileSearch:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: file_search
|
||||||
|
default: file_search
|
||||||
|
description: >-
|
||||||
|
Tool type identifier, always "file_search"
|
||||||
|
vector_store_ids:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
List of vector store identifiers to search within
|
||||||
|
filters:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) Additional filters to apply to the search
|
||||||
|
max_num_results:
|
||||||
|
type: integer
|
||||||
|
default: 10
|
||||||
|
description: >-
|
||||||
|
(Optional) Maximum number of search results to return (1-50)
|
||||||
|
ranking_options:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
ranker:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) Name of the ranking algorithm to use
|
||||||
|
score_threshold:
|
||||||
|
type: number
|
||||||
|
default: 0.0
|
||||||
|
description: >-
|
||||||
|
(Optional) Minimum relevance score threshold for results
|
||||||
|
additionalProperties: false
|
||||||
|
description: >-
|
||||||
|
(Optional) Options for ranking and scoring search results
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- vector_store_ids
|
||||||
|
title: OpenAIResponseInputToolFileSearch
|
||||||
|
description: >-
|
||||||
|
File search tool configuration for OpenAI response inputs.
|
||||||
|
OpenAIResponseInputToolFunction:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: function
|
||||||
|
default: function
|
||||||
|
description: Tool type identifier, always "function"
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
description: Name of the function that can be called
|
||||||
|
description:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) Description of what the function does
|
||||||
|
parameters:
|
||||||
|
type: object
|
||||||
|
additionalProperties:
|
||||||
|
oneOf:
|
||||||
|
- type: 'null'
|
||||||
|
- type: boolean
|
||||||
|
- type: number
|
||||||
|
- type: string
|
||||||
|
- type: array
|
||||||
|
- type: object
|
||||||
|
description: >-
|
||||||
|
(Optional) JSON schema defining the function's parameters
|
||||||
|
strict:
|
||||||
|
type: boolean
|
||||||
|
description: >-
|
||||||
|
(Optional) Whether to enforce strict parameter validation
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- name
|
||||||
|
title: OpenAIResponseInputToolFunction
|
||||||
|
description: >-
|
||||||
|
Function tool configuration for OpenAI response inputs.
|
||||||
|
OpenAIResponseInputToolWebSearch:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
oneOf:
|
||||||
|
- type: string
|
||||||
|
const: web_search
|
||||||
|
- type: string
|
||||||
|
const: web_search_preview
|
||||||
|
- type: string
|
||||||
|
const: web_search_preview_2025_03_11
|
||||||
|
default: web_search
|
||||||
|
description: Web search tool type variant to use
|
||||||
|
search_context_size:
|
||||||
|
type: string
|
||||||
|
default: medium
|
||||||
|
description: >-
|
||||||
|
(Optional) Size of search context, must be "low", "medium", or "high"
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
title: OpenAIResponseInputToolWebSearch
|
||||||
|
description: >-
|
||||||
|
Web search tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseMCPApprovalRequest:
|
OpenAIResponseMCPApprovalRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7200,6 +7316,12 @@ components:
|
||||||
type: number
|
type: number
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Nucleus sampling parameter used for generation
|
(Optional) Nucleus sampling parameter used for generation
|
||||||
|
tools:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
||||||
|
description: >-
|
||||||
|
(Optional) An array of tools the model may call while generating a response.
|
||||||
truncation:
|
truncation:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
|
@ -7299,6 +7421,56 @@ components:
|
||||||
title: OpenAIResponseText
|
title: OpenAIResponseText
|
||||||
description: >-
|
description: >-
|
||||||
Text response configuration for OpenAI responses.
|
Text response configuration for OpenAI responses.
|
||||||
|
OpenAIResponseTool:
|
||||||
|
oneOf:
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
|
- $ref: '#/components/schemas/OpenAIResponseToolMCP'
|
||||||
|
discriminator:
|
||||||
|
propertyName: type
|
||||||
|
mapping:
|
||||||
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
||||||
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
|
mcp: '#/components/schemas/OpenAIResponseToolMCP'
|
||||||
|
OpenAIResponseToolMCP:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
const: mcp
|
||||||
|
default: mcp
|
||||||
|
description: Tool type identifier, always "mcp"
|
||||||
|
server_label:
|
||||||
|
type: string
|
||||||
|
description: Label to identify this MCP server
|
||||||
|
allowed_tools:
|
||||||
|
oneOf:
|
||||||
|
- type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
- type: object
|
||||||
|
properties:
|
||||||
|
tool_names:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
description: >-
|
||||||
|
(Optional) List of specific tool names that are allowed
|
||||||
|
additionalProperties: false
|
||||||
|
title: AllowedToolsFilter
|
||||||
|
description: >-
|
||||||
|
Filter configuration for restricting which MCP tools can be used.
|
||||||
|
description: >-
|
||||||
|
(Optional) Restriction on which tools can be used from this server
|
||||||
|
additionalProperties: false
|
||||||
|
required:
|
||||||
|
- type
|
||||||
|
- server_label
|
||||||
|
title: OpenAIResponseToolMCP
|
||||||
|
description: >-
|
||||||
|
Model Context Protocol (MCP) tool configuration for OpenAI response object.
|
||||||
OpenAIResponseUsage:
|
OpenAIResponseUsage:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7360,98 +7532,6 @@ components:
|
||||||
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
||||||
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
||||||
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
||||||
OpenAIResponseInputToolFileSearch:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: file_search
|
|
||||||
default: file_search
|
|
||||||
description: >-
|
|
||||||
Tool type identifier, always "file_search"
|
|
||||||
vector_store_ids:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
List of vector store identifiers to search within
|
|
||||||
filters:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
oneOf:
|
|
||||||
- type: 'null'
|
|
||||||
- type: boolean
|
|
||||||
- type: number
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
- type: object
|
|
||||||
description: >-
|
|
||||||
(Optional) Additional filters to apply to the search
|
|
||||||
max_num_results:
|
|
||||||
type: integer
|
|
||||||
default: 10
|
|
||||||
description: >-
|
|
||||||
(Optional) Maximum number of search results to return (1-50)
|
|
||||||
ranking_options:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
ranker:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
(Optional) Name of the ranking algorithm to use
|
|
||||||
score_threshold:
|
|
||||||
type: number
|
|
||||||
default: 0.0
|
|
||||||
description: >-
|
|
||||||
(Optional) Minimum relevance score threshold for results
|
|
||||||
additionalProperties: false
|
|
||||||
description: >-
|
|
||||||
(Optional) Options for ranking and scoring search results
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- vector_store_ids
|
|
||||||
title: OpenAIResponseInputToolFileSearch
|
|
||||||
description: >-
|
|
||||||
File search tool configuration for OpenAI response inputs.
|
|
||||||
OpenAIResponseInputToolFunction:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
type: string
|
|
||||||
const: function
|
|
||||||
default: function
|
|
||||||
description: Tool type identifier, always "function"
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: Name of the function that can be called
|
|
||||||
description:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
(Optional) Description of what the function does
|
|
||||||
parameters:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
oneOf:
|
|
||||||
- type: 'null'
|
|
||||||
- type: boolean
|
|
||||||
- type: number
|
|
||||||
- type: string
|
|
||||||
- type: array
|
|
||||||
- type: object
|
|
||||||
description: >-
|
|
||||||
(Optional) JSON schema defining the function's parameters
|
|
||||||
strict:
|
|
||||||
type: boolean
|
|
||||||
description: >-
|
|
||||||
(Optional) Whether to enforce strict parameter validation
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
- name
|
|
||||||
title: OpenAIResponseInputToolFunction
|
|
||||||
description: >-
|
|
||||||
Function tool configuration for OpenAI response inputs.
|
|
||||||
OpenAIResponseInputToolMCP:
|
OpenAIResponseInputToolMCP:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7533,30 +7613,6 @@ components:
|
||||||
title: OpenAIResponseInputToolMCP
|
title: OpenAIResponseInputToolMCP
|
||||||
description: >-
|
description: >-
|
||||||
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
OpenAIResponseInputToolWebSearch:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
type:
|
|
||||||
oneOf:
|
|
||||||
- type: string
|
|
||||||
const: web_search
|
|
||||||
- type: string
|
|
||||||
const: web_search_preview
|
|
||||||
- type: string
|
|
||||||
const: web_search_preview_2025_03_11
|
|
||||||
default: web_search
|
|
||||||
description: Web search tool type variant to use
|
|
||||||
search_context_size:
|
|
||||||
type: string
|
|
||||||
default: medium
|
|
||||||
description: >-
|
|
||||||
(Optional) Size of search context, must be "low", "medium", or "high"
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- type
|
|
||||||
title: OpenAIResponseInputToolWebSearch
|
|
||||||
description: >-
|
|
||||||
Web search tool configuration for OpenAI response inputs.
|
|
||||||
CreateOpenaiResponseRequest:
|
CreateOpenaiResponseRequest:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -7657,6 +7713,12 @@ components:
|
||||||
type: number
|
type: number
|
||||||
description: >-
|
description: >-
|
||||||
(Optional) Nucleus sampling parameter used for generation
|
(Optional) Nucleus sampling parameter used for generation
|
||||||
|
tools:
|
||||||
|
type: array
|
||||||
|
items:
|
||||||
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
||||||
|
description: >-
|
||||||
|
(Optional) An array of tools the model may call while generating a response.
|
||||||
truncation:
|
truncation:
|
||||||
type: string
|
type: string
|
||||||
description: >-
|
description: >-
|
||||||
|
|
|
@ -346,6 +346,138 @@ class OpenAIResponseText(BaseModel):
|
||||||
format: OpenAIResponseTextFormat | None = None
|
format: OpenAIResponseTextFormat | None = None
|
||||||
|
|
||||||
|
|
||||||
|
# Must match type Literals of OpenAIResponseInputToolWebSearch below
|
||||||
|
WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_2025_03_11"]
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputToolWebSearch(BaseModel):
|
||||||
|
"""Web search tool configuration for OpenAI response inputs.
|
||||||
|
|
||||||
|
:param type: Web search tool type variant to use
|
||||||
|
:param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high"
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Must match values of WebSearchToolTypes above
|
||||||
|
type: Literal["web_search"] | Literal["web_search_preview"] | Literal["web_search_preview_2025_03_11"] = (
|
||||||
|
"web_search"
|
||||||
|
)
|
||||||
|
# TODO: actually use search_context_size somewhere...
|
||||||
|
search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$")
|
||||||
|
# TODO: add user_location
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputToolFunction(BaseModel):
|
||||||
|
"""Function tool configuration for OpenAI response inputs.
|
||||||
|
|
||||||
|
:param type: Tool type identifier, always "function"
|
||||||
|
:param name: Name of the function that can be called
|
||||||
|
:param description: (Optional) Description of what the function does
|
||||||
|
:param parameters: (Optional) JSON schema defining the function's parameters
|
||||||
|
:param strict: (Optional) Whether to enforce strict parameter validation
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Literal["function"] = "function"
|
||||||
|
name: str
|
||||||
|
description: str | None = None
|
||||||
|
parameters: dict[str, Any] | None
|
||||||
|
strict: bool | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputToolFileSearch(BaseModel):
|
||||||
|
"""File search tool configuration for OpenAI response inputs.
|
||||||
|
|
||||||
|
:param type: Tool type identifier, always "file_search"
|
||||||
|
:param vector_store_ids: List of vector store identifiers to search within
|
||||||
|
:param filters: (Optional) Additional filters to apply to the search
|
||||||
|
:param max_num_results: (Optional) Maximum number of search results to return (1-50)
|
||||||
|
:param ranking_options: (Optional) Options for ranking and scoring search results
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Literal["file_search"] = "file_search"
|
||||||
|
vector_store_ids: list[str]
|
||||||
|
filters: dict[str, Any] | None = None
|
||||||
|
max_num_results: int | None = Field(default=10, ge=1, le=50)
|
||||||
|
ranking_options: FileSearchRankingOptions | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class ApprovalFilter(BaseModel):
|
||||||
|
"""Filter configuration for MCP tool approval requirements.
|
||||||
|
|
||||||
|
:param always: (Optional) List of tool names that always require approval
|
||||||
|
:param never: (Optional) List of tool names that never require approval
|
||||||
|
"""
|
||||||
|
|
||||||
|
always: list[str] | None = None
|
||||||
|
never: list[str] | None = None
|
||||||
|
|
||||||
|
|
||||||
|
class AllowedToolsFilter(BaseModel):
|
||||||
|
"""Filter configuration for restricting which MCP tools can be used.
|
||||||
|
|
||||||
|
:param tool_names: (Optional) List of specific tool names that are allowed
|
||||||
|
"""
|
||||||
|
|
||||||
|
tool_names: list[str] | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseInputToolMCP(BaseModel):
|
||||||
|
"""Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
||||||
|
|
||||||
|
:param type: Tool type identifier, always "mcp"
|
||||||
|
:param server_label: Label to identify this MCP server
|
||||||
|
:param server_url: URL endpoint of the MCP server
|
||||||
|
:param headers: (Optional) HTTP headers to include when connecting to the server
|
||||||
|
:param require_approval: Approval requirement for tool calls ("always", "never", or filter)
|
||||||
|
:param allowed_tools: (Optional) Restriction on which tools can be used from this server
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Literal["mcp"] = "mcp"
|
||||||
|
server_label: str
|
||||||
|
server_url: str
|
||||||
|
headers: dict[str, Any] | None = None
|
||||||
|
|
||||||
|
require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
|
||||||
|
allowed_tools: list[str] | AllowedToolsFilter | None = None
|
||||||
|
|
||||||
|
|
||||||
|
OpenAIResponseInputTool = Annotated[
|
||||||
|
OpenAIResponseInputToolWebSearch
|
||||||
|
| OpenAIResponseInputToolFileSearch
|
||||||
|
| OpenAIResponseInputToolFunction
|
||||||
|
| OpenAIResponseInputToolMCP,
|
||||||
|
Field(discriminator="type"),
|
||||||
|
]
|
||||||
|
register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class OpenAIResponseToolMCP(BaseModel):
|
||||||
|
"""Model Context Protocol (MCP) tool configuration for OpenAI response object.
|
||||||
|
|
||||||
|
:param type: Tool type identifier, always "mcp"
|
||||||
|
:param server_label: Label to identify this MCP server
|
||||||
|
:param allowed_tools: (Optional) Restriction on which tools can be used from this server
|
||||||
|
"""
|
||||||
|
|
||||||
|
type: Literal["mcp"] = "mcp"
|
||||||
|
server_label: str
|
||||||
|
allowed_tools: list[str] | AllowedToolsFilter | None = None
|
||||||
|
|
||||||
|
|
||||||
|
OpenAIResponseTool = Annotated[
|
||||||
|
OpenAIResponseInputToolWebSearch
|
||||||
|
| OpenAIResponseInputToolFileSearch
|
||||||
|
| OpenAIResponseInputToolFunction
|
||||||
|
| OpenAIResponseToolMCP, # The only type that differes from that in the inputs is the MCP tool
|
||||||
|
Field(discriminator="type"),
|
||||||
|
]
|
||||||
|
register_schema(OpenAIResponseTool, name="OpenAIResponseTool")
|
||||||
|
|
||||||
|
|
||||||
class OpenAIResponseUsageOutputTokensDetails(BaseModel):
|
class OpenAIResponseUsageOutputTokensDetails(BaseModel):
|
||||||
"""Token details for output tokens in OpenAI response usage.
|
"""Token details for output tokens in OpenAI response usage.
|
||||||
|
|
||||||
|
@ -398,6 +530,7 @@ class OpenAIResponseObject(BaseModel):
|
||||||
:param temperature: (Optional) Sampling temperature used for generation
|
:param temperature: (Optional) Sampling temperature used for generation
|
||||||
:param text: Text formatting configuration for the response
|
:param text: Text formatting configuration for the response
|
||||||
:param top_p: (Optional) Nucleus sampling parameter used for generation
|
:param top_p: (Optional) Nucleus sampling parameter used for generation
|
||||||
|
:param tools: (Optional) An array of tools the model may call while generating a response.
|
||||||
:param truncation: (Optional) Truncation strategy applied to the response
|
:param truncation: (Optional) Truncation strategy applied to the response
|
||||||
:param usage: (Optional) Token usage information for the response
|
:param usage: (Optional) Token usage information for the response
|
||||||
"""
|
"""
|
||||||
|
@ -416,6 +549,7 @@ class OpenAIResponseObject(BaseModel):
|
||||||
# before the field was added. New responses will have this set always.
|
# before the field was added. New responses will have this set always.
|
||||||
text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text"))
|
text: OpenAIResponseText = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text"))
|
||||||
top_p: float | None = None
|
top_p: float | None = None
|
||||||
|
tools: list[OpenAIResponseTool] | None = None
|
||||||
truncation: str | None = None
|
truncation: str | None = None
|
||||||
usage: OpenAIResponseUsage | None = None
|
usage: OpenAIResponseUsage | None = None
|
||||||
|
|
||||||
|
@ -878,114 +1012,6 @@ OpenAIResponseInput = Annotated[
|
||||||
register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
|
register_schema(OpenAIResponseInput, name="OpenAIResponseInput")
|
||||||
|
|
||||||
|
|
||||||
# Must match type Literals of OpenAIResponseInputToolWebSearch below
|
|
||||||
WebSearchToolTypes = ["web_search", "web_search_preview", "web_search_preview_2025_03_11"]
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputToolWebSearch(BaseModel):
|
|
||||||
"""Web search tool configuration for OpenAI response inputs.
|
|
||||||
|
|
||||||
:param type: Web search tool type variant to use
|
|
||||||
:param search_context_size: (Optional) Size of search context, must be "low", "medium", or "high"
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Must match values of WebSearchToolTypes above
|
|
||||||
type: Literal["web_search"] | Literal["web_search_preview"] | Literal["web_search_preview_2025_03_11"] = (
|
|
||||||
"web_search"
|
|
||||||
)
|
|
||||||
# TODO: actually use search_context_size somewhere...
|
|
||||||
search_context_size: str | None = Field(default="medium", pattern="^low|medium|high$")
|
|
||||||
# TODO: add user_location
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputToolFunction(BaseModel):
|
|
||||||
"""Function tool configuration for OpenAI response inputs.
|
|
||||||
|
|
||||||
:param type: Tool type identifier, always "function"
|
|
||||||
:param name: Name of the function that can be called
|
|
||||||
:param description: (Optional) Description of what the function does
|
|
||||||
:param parameters: (Optional) JSON schema defining the function's parameters
|
|
||||||
:param strict: (Optional) Whether to enforce strict parameter validation
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["function"] = "function"
|
|
||||||
name: str
|
|
||||||
description: str | None = None
|
|
||||||
parameters: dict[str, Any] | None
|
|
||||||
strict: bool | None = None
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputToolFileSearch(BaseModel):
|
|
||||||
"""File search tool configuration for OpenAI response inputs.
|
|
||||||
|
|
||||||
:param type: Tool type identifier, always "file_search"
|
|
||||||
:param vector_store_ids: List of vector store identifiers to search within
|
|
||||||
:param filters: (Optional) Additional filters to apply to the search
|
|
||||||
:param max_num_results: (Optional) Maximum number of search results to return (1-50)
|
|
||||||
:param ranking_options: (Optional) Options for ranking and scoring search results
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["file_search"] = "file_search"
|
|
||||||
vector_store_ids: list[str]
|
|
||||||
filters: dict[str, Any] | None = None
|
|
||||||
max_num_results: int | None = Field(default=10, ge=1, le=50)
|
|
||||||
ranking_options: FileSearchRankingOptions | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class ApprovalFilter(BaseModel):
|
|
||||||
"""Filter configuration for MCP tool approval requirements.
|
|
||||||
|
|
||||||
:param always: (Optional) List of tool names that always require approval
|
|
||||||
:param never: (Optional) List of tool names that never require approval
|
|
||||||
"""
|
|
||||||
|
|
||||||
always: list[str] | None = None
|
|
||||||
never: list[str] | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class AllowedToolsFilter(BaseModel):
|
|
||||||
"""Filter configuration for restricting which MCP tools can be used.
|
|
||||||
|
|
||||||
:param tool_names: (Optional) List of specific tool names that are allowed
|
|
||||||
"""
|
|
||||||
|
|
||||||
tool_names: list[str] | None = None
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
|
||||||
class OpenAIResponseInputToolMCP(BaseModel):
|
|
||||||
"""Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
|
||||||
|
|
||||||
:param type: Tool type identifier, always "mcp"
|
|
||||||
:param server_label: Label to identify this MCP server
|
|
||||||
:param server_url: URL endpoint of the MCP server
|
|
||||||
:param headers: (Optional) HTTP headers to include when connecting to the server
|
|
||||||
:param require_approval: Approval requirement for tool calls ("always", "never", or filter)
|
|
||||||
:param allowed_tools: (Optional) Restriction on which tools can be used from this server
|
|
||||||
"""
|
|
||||||
|
|
||||||
type: Literal["mcp"] = "mcp"
|
|
||||||
server_label: str
|
|
||||||
server_url: str
|
|
||||||
headers: dict[str, Any] | None = None
|
|
||||||
|
|
||||||
require_approval: Literal["always"] | Literal["never"] | ApprovalFilter = "never"
|
|
||||||
allowed_tools: list[str] | AllowedToolsFilter | None = None
|
|
||||||
|
|
||||||
|
|
||||||
OpenAIResponseInputTool = Annotated[
|
|
||||||
OpenAIResponseInputToolWebSearch
|
|
||||||
| OpenAIResponseInputToolFileSearch
|
|
||||||
| OpenAIResponseInputToolFunction
|
|
||||||
| OpenAIResponseInputToolMCP,
|
|
||||||
Field(discriminator="type"),
|
|
||||||
]
|
|
||||||
register_schema(OpenAIResponseInputTool, name="OpenAIResponseInputTool")
|
|
||||||
|
|
||||||
|
|
||||||
class ListOpenAIResponseInputItem(BaseModel):
|
class ListOpenAIResponseInputItem(BaseModel):
|
||||||
"""List container for OpenAI response input items.
|
"""List container for OpenAI response input items.
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ from llama_stack.providers.utils.responses.responses_store import (
|
||||||
|
|
||||||
from .streaming import StreamingResponseOrchestrator
|
from .streaming import StreamingResponseOrchestrator
|
||||||
from .tool_executor import ToolExecutor
|
from .tool_executor import ToolExecutor
|
||||||
from .types import ChatCompletionContext
|
from .types import ChatCompletionContext, ToolContext
|
||||||
from .utils import (
|
from .utils import (
|
||||||
convert_response_input_to_chat_messages,
|
convert_response_input_to_chat_messages,
|
||||||
convert_response_text_to_chat_response_format,
|
convert_response_text_to_chat_response_format,
|
||||||
|
@ -91,13 +91,15 @@ class OpenAIResponsesImpl:
|
||||||
async def _process_input_with_previous_response(
|
async def _process_input_with_previous_response(
|
||||||
self,
|
self,
|
||||||
input: str | list[OpenAIResponseInput],
|
input: str | list[OpenAIResponseInput],
|
||||||
|
tools: list[OpenAIResponseInputTool] | None,
|
||||||
previous_response_id: str | None,
|
previous_response_id: str | None,
|
||||||
) -> tuple[str | list[OpenAIResponseInput], list[OpenAIMessageParam]]:
|
) -> tuple[str | list[OpenAIResponseInput], list[OpenAIMessageParam]]:
|
||||||
"""Process input with optional previous response context.
|
"""Process input with optional previous response context.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple: (all_input for storage, messages for chat completion)
|
tuple: (all_input for storage, messages for chat completion, tool context)
|
||||||
"""
|
"""
|
||||||
|
tool_context = ToolContext(tools)
|
||||||
if previous_response_id:
|
if previous_response_id:
|
||||||
previous_response: _OpenAIResponseObjectWithInputAndMessages = (
|
previous_response: _OpenAIResponseObjectWithInputAndMessages = (
|
||||||
await self.responses_store.get_response_object(previous_response_id)
|
await self.responses_store.get_response_object(previous_response_id)
|
||||||
|
@ -113,11 +115,13 @@ class OpenAIResponsesImpl:
|
||||||
else:
|
else:
|
||||||
# Backward compatibility: reconstruct from inputs
|
# Backward compatibility: reconstruct from inputs
|
||||||
messages = await convert_response_input_to_chat_messages(all_input)
|
messages = await convert_response_input_to_chat_messages(all_input)
|
||||||
|
|
||||||
|
tool_context.recover_tools_from_previous_response(previous_response)
|
||||||
else:
|
else:
|
||||||
all_input = input
|
all_input = input
|
||||||
messages = await convert_response_input_to_chat_messages(input)
|
messages = await convert_response_input_to_chat_messages(input)
|
||||||
|
|
||||||
return all_input, messages
|
return all_input, messages, tool_context
|
||||||
|
|
||||||
async def _prepend_instructions(self, messages, instructions):
|
async def _prepend_instructions(self, messages, instructions):
|
||||||
if instructions:
|
if instructions:
|
||||||
|
@ -273,7 +277,9 @@ class OpenAIResponsesImpl:
|
||||||
max_infer_iters: int | None = 10,
|
max_infer_iters: int | None = 10,
|
||||||
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||||
# Input preprocessing
|
# Input preprocessing
|
||||||
all_input, messages = await self._process_input_with_previous_response(input, previous_response_id)
|
all_input, messages, tool_context = await self._process_input_with_previous_response(
|
||||||
|
input, tools, previous_response_id
|
||||||
|
)
|
||||||
await self._prepend_instructions(messages, instructions)
|
await self._prepend_instructions(messages, instructions)
|
||||||
|
|
||||||
# Structured outputs
|
# Structured outputs
|
||||||
|
@ -285,6 +291,7 @@ class OpenAIResponsesImpl:
|
||||||
response_tools=tools,
|
response_tools=tools,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
response_format=response_format,
|
response_format=response_format,
|
||||||
|
tool_context=tool_context,
|
||||||
inputs=all_input,
|
inputs=all_input,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ class StreamingResponseOrchestrator:
|
||||||
self.tool_executor = tool_executor
|
self.tool_executor = tool_executor
|
||||||
self.sequence_number = 0
|
self.sequence_number = 0
|
||||||
# Store MCP tool mapping that gets built during tool processing
|
# Store MCP tool mapping that gets built during tool processing
|
||||||
self.mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] = {}
|
self.mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] = ctx.tool_context.previous_tools or {}
|
||||||
# Track final messages after all tool executions
|
# Track final messages after all tool executions
|
||||||
self.final_messages: list[OpenAIMessageParam] = []
|
self.final_messages: list[OpenAIMessageParam] = []
|
||||||
# mapping for annotations
|
# mapping for annotations
|
||||||
|
@ -129,6 +129,7 @@ class StreamingResponseOrchestrator:
|
||||||
status=status,
|
status=status,
|
||||||
output=self._clone_outputs(outputs),
|
output=self._clone_outputs(outputs),
|
||||||
text=self.text,
|
text=self.text,
|
||||||
|
tools=self.ctx.available_tools(),
|
||||||
error=error,
|
error=error,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -146,10 +147,8 @@ class StreamingResponseOrchestrator:
|
||||||
sequence_number=self.sequence_number,
|
sequence_number=self.sequence_number,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process all tools (including MCP tools) and emit streaming events
|
async for stream_event in self._process_tools(output_messages):
|
||||||
if self.ctx.response_tools:
|
yield stream_event
|
||||||
async for stream_event in self._process_tools(self.ctx.response_tools, output_messages):
|
|
||||||
yield stream_event
|
|
||||||
|
|
||||||
n_iter = 0
|
n_iter = 0
|
||||||
messages = self.ctx.messages.copy()
|
messages = self.ctx.messages.copy()
|
||||||
|
@ -590,7 +589,7 @@ class StreamingResponseOrchestrator:
|
||||||
sequence_number=self.sequence_number,
|
sequence_number=self.sequence_number,
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _process_tools(
|
async def _process_new_tools(
|
||||||
self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput]
|
self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput]
|
||||||
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||||
"""Process all tools and emit appropriate streaming events."""
|
"""Process all tools and emit appropriate streaming events."""
|
||||||
|
@ -645,7 +644,6 @@ class StreamingResponseOrchestrator:
|
||||||
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(
|
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(
|
||||||
sequence_number=self.sequence_number,
|
sequence_number=self.sequence_number,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Parse allowed/never allowed tools
|
# Parse allowed/never allowed tools
|
||||||
always_allowed = None
|
always_allowed = None
|
||||||
|
@ -707,39 +705,26 @@ class StreamingResponseOrchestrator:
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
async for stream_event in self._add_mcp_list_tools(mcp_list_message, output_messages):
|
||||||
# Add the MCP list message to output
|
yield stream_event
|
||||||
output_messages.append(mcp_list_message)
|
|
||||||
|
|
||||||
# Emit output_item.added for the MCP list tools message
|
|
||||||
self.sequence_number += 1
|
|
||||||
yield OpenAIResponseObjectStreamResponseOutputItemAdded(
|
|
||||||
response_id=self.response_id,
|
|
||||||
item=mcp_list_message,
|
|
||||||
output_index=len(output_messages) - 1,
|
|
||||||
sequence_number=self.sequence_number,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Emit mcp_list_tools.completed
|
|
||||||
self.sequence_number += 1
|
|
||||||
yield OpenAIResponseObjectStreamResponseMcpListToolsCompleted(
|
|
||||||
sequence_number=self.sequence_number,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Emit output_item.done for the MCP list tools message
|
|
||||||
self.sequence_number += 1
|
|
||||||
yield OpenAIResponseObjectStreamResponseOutputItemDone(
|
|
||||||
response_id=self.response_id,
|
|
||||||
item=mcp_list_message,
|
|
||||||
output_index=len(output_messages) - 1,
|
|
||||||
sequence_number=self.sequence_number,
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# TODO: Emit mcp_list_tools.failed event if needed
|
# TODO: Emit mcp_list_tools.failed event if needed
|
||||||
logger.exception(f"Failed to list MCP tools from {mcp_tool.server_url}: {e}")
|
logger.exception(f"Failed to list MCP tools from {mcp_tool.server_url}: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
async def _process_tools(
|
||||||
|
self, output_messages: list[OpenAIResponseOutput]
|
||||||
|
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||||
|
# Handle all mcp tool lists from previous response that are still valid:
|
||||||
|
for tool in self.ctx.tool_context.previous_tool_listings:
|
||||||
|
async for evt in self._reuse_mcp_list_tools(tool, output_messages):
|
||||||
|
yield evt
|
||||||
|
# Process all remaining tools (including MCP tools) and emit streaming events
|
||||||
|
if self.ctx.tool_context.tools_to_process:
|
||||||
|
async for stream_event in self._process_new_tools(self.ctx.tool_context.tools_to_process, output_messages):
|
||||||
|
yield stream_event
|
||||||
|
|
||||||
def _approval_required(self, tool_name: str) -> bool:
|
def _approval_required(self, tool_name: str) -> bool:
|
||||||
if tool_name not in self.mcp_tool_to_server:
|
if tool_name not in self.mcp_tool_to_server:
|
||||||
return False
|
return False
|
||||||
|
@ -774,7 +759,6 @@ class StreamingResponseOrchestrator:
|
||||||
output_index=len(output_messages) - 1,
|
output_index=len(output_messages) - 1,
|
||||||
sequence_number=self.sequence_number,
|
sequence_number=self.sequence_number,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.sequence_number += 1
|
self.sequence_number += 1
|
||||||
yield OpenAIResponseObjectStreamResponseOutputItemDone(
|
yield OpenAIResponseObjectStreamResponseOutputItemDone(
|
||||||
response_id=self.response_id,
|
response_id=self.response_id,
|
||||||
|
@ -782,3 +766,60 @@ class StreamingResponseOrchestrator:
|
||||||
output_index=len(output_messages) - 1,
|
output_index=len(output_messages) - 1,
|
||||||
sequence_number=self.sequence_number,
|
sequence_number=self.sequence_number,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def _add_mcp_list_tools(
|
||||||
|
self, mcp_list_message: OpenAIResponseOutputMessageMCPListTools, output_messages: list[OpenAIResponseOutput]
|
||||||
|
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||||
|
# Add the MCP list message to output
|
||||||
|
output_messages.append(mcp_list_message)
|
||||||
|
|
||||||
|
# Emit output_item.added for the MCP list tools message
|
||||||
|
self.sequence_number += 1
|
||||||
|
yield OpenAIResponseObjectStreamResponseOutputItemAdded(
|
||||||
|
response_id=self.response_id,
|
||||||
|
item=mcp_list_message,
|
||||||
|
output_index=len(output_messages) - 1,
|
||||||
|
sequence_number=self.sequence_number,
|
||||||
|
)
|
||||||
|
# Emit mcp_list_tools.completed
|
||||||
|
self.sequence_number += 1
|
||||||
|
yield OpenAIResponseObjectStreamResponseMcpListToolsCompleted(
|
||||||
|
sequence_number=self.sequence_number,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Emit output_item.done for the MCP list tools message
|
||||||
|
self.sequence_number += 1
|
||||||
|
yield OpenAIResponseObjectStreamResponseOutputItemDone(
|
||||||
|
response_id=self.response_id,
|
||||||
|
item=mcp_list_message,
|
||||||
|
output_index=len(output_messages) - 1,
|
||||||
|
sequence_number=self.sequence_number,
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _reuse_mcp_list_tools(
|
||||||
|
self, original: OpenAIResponseOutputMessageMCPListTools, output_messages: list[OpenAIResponseOutput]
|
||||||
|
) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||||
|
for t in original.tools:
|
||||||
|
from llama_stack.models.llama.datatypes import ToolDefinition
|
||||||
|
from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool
|
||||||
|
|
||||||
|
# convert from input_schema to map of ToolParamDefinitions...
|
||||||
|
tool_def = ToolDefinition(
|
||||||
|
tool_name=t.name,
|
||||||
|
description=t.description,
|
||||||
|
input_schema=t.input_schema,
|
||||||
|
)
|
||||||
|
# ...then can convert that to openai completions tool
|
||||||
|
openai_tool = convert_tooldef_to_openai_tool(tool_def)
|
||||||
|
if self.ctx.chat_tools is None:
|
||||||
|
self.ctx.chat_tools = []
|
||||||
|
self.ctx.chat_tools.append(openai_tool)
|
||||||
|
|
||||||
|
mcp_list_message = OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id=f"mcp_list_{uuid.uuid4()}",
|
||||||
|
server_label=original.server_label,
|
||||||
|
tools=original.tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
async for stream_event in self._add_mcp_list_tools(mcp_list_message, output_messages):
|
||||||
|
yield stream_event
|
||||||
|
|
|
@ -12,10 +12,18 @@ from pydantic import BaseModel
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack.apis.agents.openai_responses import (
|
||||||
OpenAIResponseInput,
|
OpenAIResponseInput,
|
||||||
OpenAIResponseInputTool,
|
OpenAIResponseInputTool,
|
||||||
|
OpenAIResponseInputToolFileSearch,
|
||||||
|
OpenAIResponseInputToolFunction,
|
||||||
|
OpenAIResponseInputToolMCP,
|
||||||
|
OpenAIResponseInputToolWebSearch,
|
||||||
OpenAIResponseMCPApprovalRequest,
|
OpenAIResponseMCPApprovalRequest,
|
||||||
OpenAIResponseMCPApprovalResponse,
|
OpenAIResponseMCPApprovalResponse,
|
||||||
|
OpenAIResponseObject,
|
||||||
OpenAIResponseObjectStream,
|
OpenAIResponseObjectStream,
|
||||||
OpenAIResponseOutput,
|
OpenAIResponseOutput,
|
||||||
|
OpenAIResponseOutputMessageMCPListTools,
|
||||||
|
OpenAIResponseTool,
|
||||||
|
OpenAIResponseToolMCP,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import OpenAIChatCompletionToolCall, OpenAIMessageParam, OpenAIResponseFormatParam
|
from llama_stack.apis.inference import OpenAIChatCompletionToolCall, OpenAIMessageParam, OpenAIResponseFormatParam
|
||||||
|
|
||||||
|
@ -55,6 +63,86 @@ class ChatCompletionResult:
|
||||||
return bool(self.tool_calls)
|
return bool(self.tool_calls)
|
||||||
|
|
||||||
|
|
||||||
|
class ToolContext(BaseModel):
|
||||||
|
"""Holds information about tools from this and (if relevant)
|
||||||
|
previous response in order to facilitate reuse of previous
|
||||||
|
listings where appropriate."""
|
||||||
|
|
||||||
|
# tools argument passed into current request:
|
||||||
|
current_tools: list[OpenAIResponseInputTool]
|
||||||
|
# reconstructed map of tool -> mcp server from previous response:
|
||||||
|
previous_tools: dict[str, OpenAIResponseInputToolMCP]
|
||||||
|
# reusable mcp-list-tools objects from previous response:
|
||||||
|
previous_tool_listings: list[OpenAIResponseOutputMessageMCPListTools]
|
||||||
|
# tool arguments from current request that still need to be processed:
|
||||||
|
tools_to_process: list[OpenAIResponseInputTool]
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
current_tools: list[OpenAIResponseInputTool] | None,
|
||||||
|
):
|
||||||
|
super().__init__(
|
||||||
|
current_tools=current_tools or [],
|
||||||
|
previous_tools={},
|
||||||
|
previous_tool_listings=[],
|
||||||
|
tools_to_process=current_tools or [],
|
||||||
|
)
|
||||||
|
|
||||||
|
def recover_tools_from_previous_response(
|
||||||
|
self,
|
||||||
|
previous_response: OpenAIResponseObject,
|
||||||
|
):
|
||||||
|
"""Determine which mcp_list_tools objects from previous response we can reuse."""
|
||||||
|
|
||||||
|
if self.current_tools and previous_response.tools:
|
||||||
|
previous_tools_by_label: dict[str, OpenAIResponseToolMCP] = {}
|
||||||
|
for tool in previous_response.tools:
|
||||||
|
if isinstance(tool, OpenAIResponseToolMCP):
|
||||||
|
previous_tools_by_label[tool.server_label] = tool
|
||||||
|
# collect tool definitions which are the same in current and previous requests:
|
||||||
|
tools_to_process = []
|
||||||
|
matched: dict[str, OpenAIResponseInputToolMCP] = {}
|
||||||
|
for tool in self.current_tools:
|
||||||
|
if isinstance(tool, OpenAIResponseInputToolMCP) and tool.server_label in previous_tools_by_label:
|
||||||
|
previous_tool = previous_tools_by_label[tool.server_label]
|
||||||
|
if previous_tool.allowed_tools == tool.allowed_tools:
|
||||||
|
matched[tool.server_label] = tool
|
||||||
|
else:
|
||||||
|
tools_to_process.append(tool)
|
||||||
|
else:
|
||||||
|
tools_to_process.append(tool)
|
||||||
|
# tools that are not the same or were not previously defined need to be processed:
|
||||||
|
self.tools_to_process = tools_to_process
|
||||||
|
# for all matched definitions, get the mcp_list_tools objects from the previous output:
|
||||||
|
self.previous_tool_listings = [
|
||||||
|
obj for obj in previous_response.output if obj.type == "mcp_list_tools" and obj.server_label in matched
|
||||||
|
]
|
||||||
|
# reconstruct the tool to server mappings that can be reused:
|
||||||
|
for listing in self.previous_tool_listings:
|
||||||
|
definition = matched[listing.server_label]
|
||||||
|
for tool in listing.tools:
|
||||||
|
self.previous_tools[tool.name] = definition
|
||||||
|
|
||||||
|
def available_tools(self) -> list[OpenAIResponseTool]:
|
||||||
|
if not self.current_tools:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def convert_tool(tool: OpenAIResponseInputTool) -> OpenAIResponseTool:
|
||||||
|
if isinstance(tool, OpenAIResponseInputToolWebSearch):
|
||||||
|
return tool
|
||||||
|
if isinstance(tool, OpenAIResponseInputToolFileSearch):
|
||||||
|
return tool
|
||||||
|
if isinstance(tool, OpenAIResponseInputToolFunction):
|
||||||
|
return tool
|
||||||
|
if isinstance(tool, OpenAIResponseInputToolMCP):
|
||||||
|
return OpenAIResponseToolMCP(
|
||||||
|
server_label=tool.server_label,
|
||||||
|
allowed_tools=tool.allowed_tools,
|
||||||
|
)
|
||||||
|
|
||||||
|
return [convert_tool(tool) for tool in self.current_tools]
|
||||||
|
|
||||||
|
|
||||||
class ChatCompletionContext(BaseModel):
|
class ChatCompletionContext(BaseModel):
|
||||||
model: str
|
model: str
|
||||||
messages: list[OpenAIMessageParam]
|
messages: list[OpenAIMessageParam]
|
||||||
|
@ -62,6 +150,7 @@ class ChatCompletionContext(BaseModel):
|
||||||
chat_tools: list[ChatCompletionToolParam] | None = None
|
chat_tools: list[ChatCompletionToolParam] | None = None
|
||||||
temperature: float | None
|
temperature: float | None
|
||||||
response_format: OpenAIResponseFormatParam
|
response_format: OpenAIResponseFormatParam
|
||||||
|
tool_context: ToolContext | None
|
||||||
approval_requests: list[OpenAIResponseMCPApprovalRequest] = []
|
approval_requests: list[OpenAIResponseMCPApprovalRequest] = []
|
||||||
approval_responses: dict[str, OpenAIResponseMCPApprovalResponse] = {}
|
approval_responses: dict[str, OpenAIResponseMCPApprovalResponse] = {}
|
||||||
|
|
||||||
|
@ -72,6 +161,7 @@ class ChatCompletionContext(BaseModel):
|
||||||
response_tools: list[OpenAIResponseInputTool] | None,
|
response_tools: list[OpenAIResponseInputTool] | None,
|
||||||
temperature: float | None,
|
temperature: float | None,
|
||||||
response_format: OpenAIResponseFormatParam,
|
response_format: OpenAIResponseFormatParam,
|
||||||
|
tool_context: ToolContext,
|
||||||
inputs: list[OpenAIResponseInput] | str,
|
inputs: list[OpenAIResponseInput] | str,
|
||||||
):
|
):
|
||||||
super().__init__(
|
super().__init__(
|
||||||
|
@ -80,6 +170,7 @@ class ChatCompletionContext(BaseModel):
|
||||||
response_tools=response_tools,
|
response_tools=response_tools,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
response_format=response_format,
|
response_format=response_format,
|
||||||
|
tool_context=tool_context,
|
||||||
)
|
)
|
||||||
if not isinstance(inputs, str):
|
if not isinstance(inputs, str):
|
||||||
self.approval_requests = [input for input in inputs if input.type == "mcp_approval_request"]
|
self.approval_requests = [input for input in inputs if input.type == "mcp_approval_request"]
|
||||||
|
@ -96,3 +187,8 @@ class ChatCompletionContext(BaseModel):
|
||||||
if request.name == tool_name and request.arguments == arguments:
|
if request.name == tool_name and request.arguments == arguments:
|
||||||
return request
|
return request
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def available_tools(self) -> list[OpenAIResponseTool]:
|
||||||
|
if not self.tool_context:
|
||||||
|
return []
|
||||||
|
return self.tool_context.available_tools()
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from unittest.mock import AsyncMock
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from openai.types.chat.chat_completion_chunk import (
|
from openai.types.chat.chat_completion_chunk import (
|
||||||
|
@ -20,6 +20,7 @@ from llama_stack.apis.agents.openai_responses import (
|
||||||
ListOpenAIResponseInputItem,
|
ListOpenAIResponseInputItem,
|
||||||
OpenAIResponseInputMessageContentText,
|
OpenAIResponseInputMessageContentText,
|
||||||
OpenAIResponseInputToolFunction,
|
OpenAIResponseInputToolFunction,
|
||||||
|
OpenAIResponseInputToolMCP,
|
||||||
OpenAIResponseInputToolWebSearch,
|
OpenAIResponseInputToolWebSearch,
|
||||||
OpenAIResponseMessage,
|
OpenAIResponseMessage,
|
||||||
OpenAIResponseOutputMessageContentOutputText,
|
OpenAIResponseOutputMessageContentOutputText,
|
||||||
|
@ -38,7 +39,7 @@ from llama_stack.apis.inference import (
|
||||||
OpenAIResponseFormatJSONSchema,
|
OpenAIResponseFormatJSONSchema,
|
||||||
OpenAIUserMessageParam,
|
OpenAIUserMessageParam,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.tools.tools import ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime
|
from llama_stack.apis.tools.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime
|
||||||
from llama_stack.core.access_control.access_control import default_policy
|
from llama_stack.core.access_control.access_control import default_policy
|
||||||
from llama_stack.core.datatypes import ResponsesStoreConfig
|
from llama_stack.core.datatypes import ResponsesStoreConfig
|
||||||
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
||||||
|
@ -963,6 +964,57 @@ async def test_store_response_uses_rehydrated_input_with_previous_response(
|
||||||
assert result.status == "completed"
|
assert result.status == "completed"
|
||||||
|
|
||||||
|
|
||||||
|
@patch("llama_stack.providers.utils.tools.mcp.list_mcp_tools")
|
||||||
|
async def test_reuse_mcp_tool_list(
|
||||||
|
mock_list_mcp_tools, openai_responses_impl, mock_responses_store, mock_inference_api
|
||||||
|
):
|
||||||
|
"""Test that mcp_list_tools can be reused where appropriate."""
|
||||||
|
|
||||||
|
mock_inference_api.openai_chat_completion.return_value = fake_stream()
|
||||||
|
mock_list_mcp_tools.return_value = ListToolDefsResponse(
|
||||||
|
data=[ToolDef(name="test_tool", description="a test tool", input_schema={}, output_schema={})]
|
||||||
|
)
|
||||||
|
|
||||||
|
res1 = await openai_responses_impl.create_openai_response(
|
||||||
|
input="What is 2+2?",
|
||||||
|
model="meta-llama/Llama-3.1-8B-Instruct",
|
||||||
|
store=True,
|
||||||
|
tools=[
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
args = mock_responses_store.store_response_object.call_args
|
||||||
|
data = args.kwargs["response_object"].model_dump()
|
||||||
|
data["input"] = [input_item.model_dump() for input_item in args.kwargs["input"]]
|
||||||
|
data["messages"] = [msg.model_dump() for msg in args.kwargs["messages"]]
|
||||||
|
stored = _OpenAIResponseObjectWithInputAndMessages(**data)
|
||||||
|
mock_responses_store.get_response_object.return_value = stored
|
||||||
|
|
||||||
|
res2 = await openai_responses_impl.create_openai_response(
|
||||||
|
previous_response_id=res1.id,
|
||||||
|
input="Now what is 3+3?",
|
||||||
|
model="meta-llama/Llama-3.1-8B-Instruct",
|
||||||
|
store=True,
|
||||||
|
tools=[
|
||||||
|
OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
assert len(mock_inference_api.openai_chat_completion.call_args_list) == 2
|
||||||
|
second_call = mock_inference_api.openai_chat_completion.call_args_list[1]
|
||||||
|
tools_seen = second_call.kwargs["tools"]
|
||||||
|
assert len(tools_seen) == 1
|
||||||
|
assert tools_seen[0]["function"]["name"] == "test_tool"
|
||||||
|
assert tools_seen[0]["function"]["description"] == "a test tool"
|
||||||
|
|
||||||
|
assert mock_list_mcp_tools.call_count == 1
|
||||||
|
listings = [obj for obj in res2.output if obj.type == "mcp_list_tools"]
|
||||||
|
assert len(listings) == 1
|
||||||
|
assert listings[0].server_label == "alabel"
|
||||||
|
assert len(listings[0].tools) == 1
|
||||||
|
assert listings[0].tools[0].name == "test_tool"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"text_format, response_format",
|
"text_format, response_format",
|
||||||
[
|
[
|
||||||
|
|
|
@ -0,0 +1,183 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
|
||||||
|
from llama_stack.apis.agents.openai_responses import (
|
||||||
|
MCPListToolsTool,
|
||||||
|
OpenAIResponseInputToolFileSearch,
|
||||||
|
OpenAIResponseInputToolFunction,
|
||||||
|
OpenAIResponseInputToolMCP,
|
||||||
|
OpenAIResponseInputToolWebSearch,
|
||||||
|
OpenAIResponseObject,
|
||||||
|
OpenAIResponseOutputMessageMCPListTools,
|
||||||
|
OpenAIResponseToolMCP,
|
||||||
|
)
|
||||||
|
from llama_stack.providers.inline.agents.meta_reference.responses.types import ToolContext
|
||||||
|
|
||||||
|
|
||||||
|
class TestToolContext:
|
||||||
|
def test_no_tools(self):
|
||||||
|
tools = []
|
||||||
|
context = ToolContext(tools)
|
||||||
|
previous_response = OpenAIResponseObject(created_at=1234, id="test", model="mymodel", output=[], status="")
|
||||||
|
context.recover_tools_from_previous_response(previous_response)
|
||||||
|
|
||||||
|
assert len(context.tools_to_process) == 0
|
||||||
|
assert len(context.previous_tools) == 0
|
||||||
|
assert len(context.previous_tool_listings) == 0
|
||||||
|
|
||||||
|
def test_no_previous_tools(self):
|
||||||
|
tools = [
|
||||||
|
OpenAIResponseInputToolFileSearch(vector_store_ids=["fake"]),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="label", server_url="url"),
|
||||||
|
]
|
||||||
|
context = ToolContext(tools)
|
||||||
|
previous_response = OpenAIResponseObject(created_at=1234, id="test", model="mymodel", output=[], status="")
|
||||||
|
context.recover_tools_from_previous_response(previous_response)
|
||||||
|
|
||||||
|
assert len(context.tools_to_process) == 2
|
||||||
|
assert len(context.previous_tools) == 0
|
||||||
|
assert len(context.previous_tool_listings) == 0
|
||||||
|
|
||||||
|
def test_reusable_server(self):
|
||||||
|
tools = [
|
||||||
|
OpenAIResponseInputToolFileSearch(vector_store_ids=["fake"]),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
]
|
||||||
|
context = ToolContext(tools)
|
||||||
|
output = [
|
||||||
|
OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id="test", server_label="alabel", tools=[MCPListToolsTool(name="test_tool", input_schema={})]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
|
||||||
|
previous_response.tools = [
|
||||||
|
OpenAIResponseInputToolFileSearch(vector_store_ids=["fake"]),
|
||||||
|
OpenAIResponseToolMCP(server_label="alabel"),
|
||||||
|
]
|
||||||
|
context.recover_tools_from_previous_response(previous_response)
|
||||||
|
|
||||||
|
assert len(context.tools_to_process) == 1
|
||||||
|
assert context.tools_to_process[0].type == "file_search"
|
||||||
|
assert len(context.previous_tools) == 1
|
||||||
|
assert context.previous_tools["test_tool"].server_label == "alabel"
|
||||||
|
assert context.previous_tools["test_tool"].server_url == "aurl"
|
||||||
|
assert len(context.previous_tool_listings) == 1
|
||||||
|
assert len(context.previous_tool_listings[0].tools) == 1
|
||||||
|
assert context.previous_tool_listings[0].server_label == "alabel"
|
||||||
|
|
||||||
|
def test_multiple_reusable_servers(self):
|
||||||
|
tools = [
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="anotherlabel", server_url="anotherurl"),
|
||||||
|
OpenAIResponseInputToolWebSearch(),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
]
|
||||||
|
context = ToolContext(tools)
|
||||||
|
output = [
|
||||||
|
OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id="test1", server_label="alabel", tools=[MCPListToolsTool(name="test_tool", input_schema={})]
|
||||||
|
),
|
||||||
|
OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id="test2",
|
||||||
|
server_label="anotherlabel",
|
||||||
|
tools=[MCPListToolsTool(name="some_other_tool", input_schema={})],
|
||||||
|
),
|
||||||
|
]
|
||||||
|
previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
|
||||||
|
previous_response.tools = [
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseToolMCP(server_label="anotherlabel", server_url="anotherurl"),
|
||||||
|
OpenAIResponseInputToolWebSearch(type="web_search"),
|
||||||
|
OpenAIResponseToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
]
|
||||||
|
context.recover_tools_from_previous_response(previous_response)
|
||||||
|
|
||||||
|
assert len(context.tools_to_process) == 2
|
||||||
|
assert context.tools_to_process[0].type == "function"
|
||||||
|
assert context.tools_to_process[1].type == "web_search"
|
||||||
|
assert len(context.previous_tools) == 2
|
||||||
|
assert context.previous_tools["test_tool"].server_label == "alabel"
|
||||||
|
assert context.previous_tools["test_tool"].server_url == "aurl"
|
||||||
|
assert context.previous_tools["some_other_tool"].server_label == "anotherlabel"
|
||||||
|
assert context.previous_tools["some_other_tool"].server_url == "anotherurl"
|
||||||
|
assert len(context.previous_tool_listings) == 2
|
||||||
|
assert len(context.previous_tool_listings[0].tools) == 1
|
||||||
|
assert context.previous_tool_listings[0].server_label == "alabel"
|
||||||
|
assert len(context.previous_tool_listings[1].tools) == 1
|
||||||
|
assert context.previous_tool_listings[1].server_label == "anotherlabel"
|
||||||
|
|
||||||
|
def test_multiple_servers_only_one_reusable(self):
|
||||||
|
tools = [
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="anotherlabel", server_url="anotherurl"),
|
||||||
|
OpenAIResponseInputToolWebSearch(type="web_search"),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
]
|
||||||
|
context = ToolContext(tools)
|
||||||
|
output = [
|
||||||
|
OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id="test2",
|
||||||
|
server_label="anotherlabel",
|
||||||
|
tools=[MCPListToolsTool(name="some_other_tool", input_schema={})],
|
||||||
|
)
|
||||||
|
]
|
||||||
|
previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
|
||||||
|
previous_response.tools = [
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseToolMCP(server_label="anotherlabel", server_url="anotherurl"),
|
||||||
|
OpenAIResponseInputToolWebSearch(type="web_search"),
|
||||||
|
]
|
||||||
|
context.recover_tools_from_previous_response(previous_response)
|
||||||
|
|
||||||
|
assert len(context.tools_to_process) == 3
|
||||||
|
assert context.tools_to_process[0].type == "function"
|
||||||
|
assert context.tools_to_process[1].type == "web_search"
|
||||||
|
assert context.tools_to_process[2].type == "mcp"
|
||||||
|
assert len(context.previous_tools) == 1
|
||||||
|
assert context.previous_tools["some_other_tool"].server_label == "anotherlabel"
|
||||||
|
assert context.previous_tools["some_other_tool"].server_url == "anotherurl"
|
||||||
|
assert len(context.previous_tool_listings) == 1
|
||||||
|
assert len(context.previous_tool_listings[0].tools) == 1
|
||||||
|
assert context.previous_tool_listings[0].server_label == "anotherlabel"
|
||||||
|
|
||||||
|
def test_mismatched_allowed_tools(self):
|
||||||
|
tools = [
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="anotherlabel", server_url="anotherurl"),
|
||||||
|
OpenAIResponseInputToolWebSearch(type="web_search"),
|
||||||
|
OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl", allowed_tools=["test_tool_2"]),
|
||||||
|
]
|
||||||
|
context = ToolContext(tools)
|
||||||
|
output = [
|
||||||
|
OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id="test1", server_label="alabel", tools=[MCPListToolsTool(name="test_tool_1", input_schema={})]
|
||||||
|
),
|
||||||
|
OpenAIResponseOutputMessageMCPListTools(
|
||||||
|
id="test2",
|
||||||
|
server_label="anotherlabel",
|
||||||
|
tools=[MCPListToolsTool(name="some_other_tool", input_schema={})],
|
||||||
|
),
|
||||||
|
]
|
||||||
|
previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
|
||||||
|
previous_response.tools = [
|
||||||
|
OpenAIResponseInputToolFunction(name="fake", parameters=None),
|
||||||
|
OpenAIResponseToolMCP(server_label="anotherlabel", server_url="anotherurl"),
|
||||||
|
OpenAIResponseInputToolWebSearch(type="web_search"),
|
||||||
|
OpenAIResponseToolMCP(server_label="alabel", server_url="aurl"),
|
||||||
|
]
|
||||||
|
context.recover_tools_from_previous_response(previous_response)
|
||||||
|
|
||||||
|
assert len(context.tools_to_process) == 3
|
||||||
|
assert context.tools_to_process[0].type == "function"
|
||||||
|
assert context.tools_to_process[1].type == "web_search"
|
||||||
|
assert context.tools_to_process[2].type == "mcp"
|
||||||
|
assert len(context.previous_tools) == 1
|
||||||
|
assert context.previous_tools["some_other_tool"].server_label == "anotherlabel"
|
||||||
|
assert context.previous_tools["some_other_tool"].server_url == "anotherurl"
|
||||||
|
assert len(context.previous_tool_listings) == 1
|
||||||
|
assert len(context.previous_tool_listings[0].tools) == 1
|
||||||
|
assert context.previous_tool_listings[0].server_label == "anotherlabel"
|
Loading…
Add table
Add a link
Reference in a new issue