mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
feat(responses)!: introduce OpenAI compatible prompts to Responses API
This commit is contained in:
parent
d10bfb5121
commit
d94efaaac4
12 changed files with 593 additions and 8 deletions
|
|
@ -5474,11 +5474,44 @@ components:
|
|||
oneOf:
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
OpenAIResponseInputMessageContentFile:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: input_file
|
||||
default: input_file
|
||||
description: >-
|
||||
The type of the input item. Always `input_file`.
|
||||
file_data:
|
||||
type: string
|
||||
description: >-
|
||||
The data of the file to be sent to the model.
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
file_url:
|
||||
type: string
|
||||
description: >-
|
||||
The URL of the file to be sent to the model.
|
||||
filename:
|
||||
type: string
|
||||
description: >-
|
||||
The name of the file to be sent to the model.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
title: OpenAIResponseInputMessageContentFile
|
||||
description: >-
|
||||
File content for input messages in OpenAI response format.
|
||||
OpenAIResponseInputMessageContentImage:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -5499,6 +5532,10 @@ components:
|
|||
default: input_image
|
||||
description: >-
|
||||
Content type identifier, always "input_image"
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
image_url:
|
||||
type: string
|
||||
description: (Optional) URL of the image content
|
||||
|
|
@ -6893,6 +6930,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
@ -6966,6 +7007,30 @@ components:
|
|||
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
||||
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
||||
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
||||
OpenAIResponsePrompt:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier of the prompt template
|
||||
variables:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
||||
description: >-
|
||||
Dictionary of variable names to OpenAIResponseInputMessageContent structure
|
||||
for template substitution. The substitution values can either be strings,
|
||||
or other Response input types like images or files.
|
||||
version:
|
||||
type: string
|
||||
description: >-
|
||||
Version number of the prompt to use (defaults to latest if not specified)
|
||||
additionalProperties: false
|
||||
required:
|
||||
- id
|
||||
title: OpenAIResponsePrompt
|
||||
description: >-
|
||||
OpenAI compatible Prompt object that is used in OpenAI responses.
|
||||
OpenAIResponseText:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -7223,6 +7288,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: The underlying LLM used for completions.
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Prompt object with ID, version, and variables.
|
||||
instructions:
|
||||
type: string
|
||||
previous_response_id:
|
||||
|
|
@ -7300,6 +7369,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
|
|||
81
docs/static/deprecated-llama-stack-spec.html
vendored
81
docs/static/deprecated-llama-stack-spec.html
vendored
|
|
@ -8576,16 +8576,53 @@
|
|||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
|
||||
"input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
|
||||
"input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage",
|
||||
"input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile"
|
||||
}
|
||||
}
|
||||
},
|
||||
"OpenAIResponseInputMessageContentFile": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "input_file",
|
||||
"default": "input_file",
|
||||
"description": "The type of the input item. Always `input_file`."
|
||||
},
|
||||
"file_data": {
|
||||
"type": "string",
|
||||
"description": "The data of the file to be sent to the model."
|
||||
},
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "(Optional) The ID of the file to be sent to the model."
|
||||
},
|
||||
"file_url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the file to be sent to the model."
|
||||
},
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to be sent to the model."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "OpenAIResponseInputMessageContentFile",
|
||||
"description": "File content for input messages in OpenAI response format."
|
||||
},
|
||||
"OpenAIResponseInputMessageContentImage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -8613,6 +8650,10 @@
|
|||
"default": "input_image",
|
||||
"description": "Content type identifier, always \"input_image\""
|
||||
},
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "(Optional) The ID of the file to be sent to the model."
|
||||
},
|
||||
"image_url": {
|
||||
"type": "string",
|
||||
"description": "(Optional) URL of the image content"
|
||||
|
|
@ -8976,6 +9017,10 @@
|
|||
"type": "string",
|
||||
"description": "(Optional) ID of the previous response in a conversation"
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Reference to a prompt template and its variables."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the response generation"
|
||||
|
|
@ -9400,6 +9445,32 @@
|
|||
"title": "OpenAIResponseOutputMessageWebSearchToolCall",
|
||||
"description": "Web search tool call output message for OpenAI responses."
|
||||
},
|
||||
"OpenAIResponsePrompt": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier of the prompt template"
|
||||
},
|
||||
"variables": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
|
||||
},
|
||||
"description": "Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types like images or files."
|
||||
},
|
||||
"version": {
|
||||
"type": "string",
|
||||
"description": "Version number of the prompt to use (defaults to latest if not specified)"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"id"
|
||||
],
|
||||
"title": "OpenAIResponsePrompt",
|
||||
"description": "OpenAI compatible Prompt object that is used in OpenAI responses."
|
||||
},
|
||||
"OpenAIResponseText": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -9770,6 +9841,10 @@
|
|||
"type": "string",
|
||||
"description": "The underlying LLM used for completions."
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Prompt object with ID, version, and variables."
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
@ -9858,6 +9933,10 @@
|
|||
"type": "string",
|
||||
"description": "(Optional) ID of the previous response in a conversation"
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Reference to a prompt template and its variables."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the response generation"
|
||||
|
|
|
|||
73
docs/static/deprecated-llama-stack-spec.yaml
vendored
73
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
|
@ -6402,11 +6402,44 @@ components:
|
|||
oneOf:
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
OpenAIResponseInputMessageContentFile:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: input_file
|
||||
default: input_file
|
||||
description: >-
|
||||
The type of the input item. Always `input_file`.
|
||||
file_data:
|
||||
type: string
|
||||
description: >-
|
||||
The data of the file to be sent to the model.
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
file_url:
|
||||
type: string
|
||||
description: >-
|
||||
The URL of the file to be sent to the model.
|
||||
filename:
|
||||
type: string
|
||||
description: >-
|
||||
The name of the file to be sent to the model.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
title: OpenAIResponseInputMessageContentFile
|
||||
description: >-
|
||||
File content for input messages in OpenAI response format.
|
||||
OpenAIResponseInputMessageContentImage:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -6427,6 +6460,10 @@ components:
|
|||
default: input_image
|
||||
description: >-
|
||||
Content type identifier, always "input_image"
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
image_url:
|
||||
type: string
|
||||
description: (Optional) URL of the image content
|
||||
|
|
@ -6697,6 +6734,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
@ -7036,6 +7077,30 @@ components:
|
|||
OpenAIResponseOutputMessageWebSearchToolCall
|
||||
description: >-
|
||||
Web search tool call output message for OpenAI responses.
|
||||
OpenAIResponsePrompt:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier of the prompt template
|
||||
variables:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
||||
description: >-
|
||||
Dictionary of variable names to OpenAIResponseInputMessageContent structure
|
||||
for template substitution. The substitution values can either be strings,
|
||||
or other Response input types like images or files.
|
||||
version:
|
||||
type: string
|
||||
description: >-
|
||||
Version number of the prompt to use (defaults to latest if not specified)
|
||||
additionalProperties: false
|
||||
required:
|
||||
- id
|
||||
title: OpenAIResponsePrompt
|
||||
description: >-
|
||||
OpenAI compatible Prompt object that is used in OpenAI responses.
|
||||
OpenAIResponseText:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -7293,6 +7358,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: The underlying LLM used for completions.
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Prompt object with ID, version, and variables.
|
||||
instructions:
|
||||
type: string
|
||||
previous_response_id:
|
||||
|
|
@ -7370,6 +7439,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
|
|||
81
docs/static/llama-stack-spec.html
vendored
81
docs/static/llama-stack-spec.html
vendored
|
|
@ -5696,16 +5696,53 @@
|
|||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
|
||||
"input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
|
||||
"input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage",
|
||||
"input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile"
|
||||
}
|
||||
}
|
||||
},
|
||||
"OpenAIResponseInputMessageContentFile": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "input_file",
|
||||
"default": "input_file",
|
||||
"description": "The type of the input item. Always `input_file`."
|
||||
},
|
||||
"file_data": {
|
||||
"type": "string",
|
||||
"description": "The data of the file to be sent to the model."
|
||||
},
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "(Optional) The ID of the file to be sent to the model."
|
||||
},
|
||||
"file_url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the file to be sent to the model."
|
||||
},
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to be sent to the model."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "OpenAIResponseInputMessageContentFile",
|
||||
"description": "File content for input messages in OpenAI response format."
|
||||
},
|
||||
"OpenAIResponseInputMessageContentImage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -5733,6 +5770,10 @@
|
|||
"default": "input_image",
|
||||
"description": "Content type identifier, always \"input_image\""
|
||||
},
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "(Optional) The ID of the file to be sent to the model."
|
||||
},
|
||||
"image_url": {
|
||||
"type": "string",
|
||||
"description": "(Optional) URL of the image content"
|
||||
|
|
@ -7521,6 +7562,10 @@
|
|||
"type": "string",
|
||||
"description": "(Optional) ID of the previous response in a conversation"
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Reference to a prompt template and its variables."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the response generation"
|
||||
|
|
@ -7616,6 +7661,32 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"OpenAIResponsePrompt": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier of the prompt template"
|
||||
},
|
||||
"variables": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
|
||||
},
|
||||
"description": "Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types like images or files."
|
||||
},
|
||||
"version": {
|
||||
"type": "string",
|
||||
"description": "Version number of the prompt to use (defaults to latest if not specified)"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"id"
|
||||
],
|
||||
"title": "OpenAIResponsePrompt",
|
||||
"description": "OpenAI compatible Prompt object that is used in OpenAI responses."
|
||||
},
|
||||
"OpenAIResponseText": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -7986,6 +8057,10 @@
|
|||
"type": "string",
|
||||
"description": "The underlying LLM used for completions."
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Prompt object with ID, version, and variables."
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
@ -8074,6 +8149,10 @@
|
|||
"type": "string",
|
||||
"description": "(Optional) ID of the previous response in a conversation"
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Reference to a prompt template and its variables."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the response generation"
|
||||
|
|
|
|||
73
docs/static/llama-stack-spec.yaml
vendored
73
docs/static/llama-stack-spec.yaml
vendored
|
|
@ -4261,11 +4261,44 @@ components:
|
|||
oneOf:
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
OpenAIResponseInputMessageContentFile:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: input_file
|
||||
default: input_file
|
||||
description: >-
|
||||
The type of the input item. Always `input_file`.
|
||||
file_data:
|
||||
type: string
|
||||
description: >-
|
||||
The data of the file to be sent to the model.
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
file_url:
|
||||
type: string
|
||||
description: >-
|
||||
The URL of the file to be sent to the model.
|
||||
filename:
|
||||
type: string
|
||||
description: >-
|
||||
The name of the file to be sent to the model.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
title: OpenAIResponseInputMessageContentFile
|
||||
description: >-
|
||||
File content for input messages in OpenAI response format.
|
||||
OpenAIResponseInputMessageContentImage:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -4286,6 +4319,10 @@ components:
|
|||
default: input_image
|
||||
description: >-
|
||||
Content type identifier, always "input_image"
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
image_url:
|
||||
type: string
|
||||
description: (Optional) URL of the image content
|
||||
|
|
@ -5680,6 +5717,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
@ -5753,6 +5794,30 @@ components:
|
|||
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
||||
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
||||
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
||||
OpenAIResponsePrompt:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier of the prompt template
|
||||
variables:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
||||
description: >-
|
||||
Dictionary of variable names to OpenAIResponseInputMessageContent structure
|
||||
for template substitution. The substitution values can either be strings,
|
||||
or other Response input types like images or files.
|
||||
version:
|
||||
type: string
|
||||
description: >-
|
||||
Version number of the prompt to use (defaults to latest if not specified)
|
||||
additionalProperties: false
|
||||
required:
|
||||
- id
|
||||
title: OpenAIResponsePrompt
|
||||
description: >-
|
||||
OpenAI compatible Prompt object that is used in OpenAI responses.
|
||||
OpenAIResponseText:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -6010,6 +6075,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: The underlying LLM used for completions.
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Prompt object with ID, version, and variables.
|
||||
instructions:
|
||||
type: string
|
||||
previous_response_id:
|
||||
|
|
@ -6087,6 +6156,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
|
|||
81
docs/static/stainless-llama-stack-spec.html
vendored
81
docs/static/stainless-llama-stack-spec.html
vendored
|
|
@ -7368,16 +7368,53 @@
|
|||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContentImage"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContentFile"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"input_text": "#/components/schemas/OpenAIResponseInputMessageContentText",
|
||||
"input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage"
|
||||
"input_image": "#/components/schemas/OpenAIResponseInputMessageContentImage",
|
||||
"input_file": "#/components/schemas/OpenAIResponseInputMessageContentFile"
|
||||
}
|
||||
}
|
||||
},
|
||||
"OpenAIResponseInputMessageContentFile": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "input_file",
|
||||
"default": "input_file",
|
||||
"description": "The type of the input item. Always `input_file`."
|
||||
},
|
||||
"file_data": {
|
||||
"type": "string",
|
||||
"description": "The data of the file to be sent to the model."
|
||||
},
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "(Optional) The ID of the file to be sent to the model."
|
||||
},
|
||||
"file_url": {
|
||||
"type": "string",
|
||||
"description": "The URL of the file to be sent to the model."
|
||||
},
|
||||
"filename": {
|
||||
"type": "string",
|
||||
"description": "The name of the file to be sent to the model."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type"
|
||||
],
|
||||
"title": "OpenAIResponseInputMessageContentFile",
|
||||
"description": "File content for input messages in OpenAI response format."
|
||||
},
|
||||
"OpenAIResponseInputMessageContentImage": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -7405,6 +7442,10 @@
|
|||
"default": "input_image",
|
||||
"description": "Content type identifier, always \"input_image\""
|
||||
},
|
||||
"file_id": {
|
||||
"type": "string",
|
||||
"description": "(Optional) The ID of the file to be sent to the model."
|
||||
},
|
||||
"image_url": {
|
||||
"type": "string",
|
||||
"description": "(Optional) URL of the image content"
|
||||
|
|
@ -9193,6 +9234,10 @@
|
|||
"type": "string",
|
||||
"description": "(Optional) ID of the previous response in a conversation"
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Reference to a prompt template and its variables."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the response generation"
|
||||
|
|
@ -9288,6 +9333,32 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"OpenAIResponsePrompt": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier of the prompt template"
|
||||
},
|
||||
"variables": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseInputMessageContent"
|
||||
},
|
||||
"description": "Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types like images or files."
|
||||
},
|
||||
"version": {
|
||||
"type": "string",
|
||||
"description": "Version number of the prompt to use (defaults to latest if not specified)"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"id"
|
||||
],
|
||||
"title": "OpenAIResponsePrompt",
|
||||
"description": "OpenAI compatible Prompt object that is used in OpenAI responses."
|
||||
},
|
||||
"OpenAIResponseText": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
@ -9658,6 +9729,10 @@
|
|||
"type": "string",
|
||||
"description": "The underlying LLM used for completions."
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Prompt object with ID, version, and variables."
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
@ -9746,6 +9821,10 @@
|
|||
"type": "string",
|
||||
"description": "(Optional) ID of the previous response in a conversation"
|
||||
},
|
||||
"prompt": {
|
||||
"$ref": "#/components/schemas/OpenAIResponsePrompt",
|
||||
"description": "(Optional) Reference to a prompt template and its variables."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the response generation"
|
||||
|
|
|
|||
73
docs/static/stainless-llama-stack-spec.yaml
vendored
73
docs/static/stainless-llama-stack-spec.yaml
vendored
|
|
@ -5474,11 +5474,44 @@ components:
|
|||
oneOf:
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
||||
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
||||
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
||||
OpenAIResponseInputMessageContentFile:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: input_file
|
||||
default: input_file
|
||||
description: >-
|
||||
The type of the input item. Always `input_file`.
|
||||
file_data:
|
||||
type: string
|
||||
description: >-
|
||||
The data of the file to be sent to the model.
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
file_url:
|
||||
type: string
|
||||
description: >-
|
||||
The URL of the file to be sent to the model.
|
||||
filename:
|
||||
type: string
|
||||
description: >-
|
||||
The name of the file to be sent to the model.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
title: OpenAIResponseInputMessageContentFile
|
||||
description: >-
|
||||
File content for input messages in OpenAI response format.
|
||||
OpenAIResponseInputMessageContentImage:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -5499,6 +5532,10 @@ components:
|
|||
default: input_image
|
||||
description: >-
|
||||
Content type identifier, always "input_image"
|
||||
file_id:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) The ID of the file to be sent to the model.
|
||||
image_url:
|
||||
type: string
|
||||
description: (Optional) URL of the image content
|
||||
|
|
@ -6893,6 +6930,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
@ -6966,6 +7007,30 @@ components:
|
|||
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
||||
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
||||
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
||||
OpenAIResponsePrompt:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: Unique identifier of the prompt template
|
||||
variables:
|
||||
type: object
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
||||
description: >-
|
||||
Dictionary of variable names to OpenAIResponseInputMessageContent structure
|
||||
for template substitution. The substitution values can either be strings,
|
||||
or other Response input types like images or files.
|
||||
version:
|
||||
type: string
|
||||
description: >-
|
||||
Version number of the prompt to use (defaults to latest if not specified)
|
||||
additionalProperties: false
|
||||
required:
|
||||
- id
|
||||
title: OpenAIResponsePrompt
|
||||
description: >-
|
||||
OpenAI compatible Prompt object that is used in OpenAI responses.
|
||||
OpenAIResponseText:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -7223,6 +7288,10 @@ components:
|
|||
model:
|
||||
type: string
|
||||
description: The underlying LLM used for completions.
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Prompt object with ID, version, and variables.
|
||||
instructions:
|
||||
type: string
|
||||
previous_response_id:
|
||||
|
|
@ -7300,6 +7369,10 @@ components:
|
|||
type: string
|
||||
description: >-
|
||||
(Optional) ID of the previous response in a conversation
|
||||
prompt:
|
||||
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
||||
description: >-
|
||||
(Optional) Reference to a prompt template and its variables.
|
||||
status:
|
||||
type: string
|
||||
description: >-
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ from .openai_responses import (
|
|||
OpenAIResponseInputTool,
|
||||
OpenAIResponseObject,
|
||||
OpenAIResponseObjectStream,
|
||||
OpenAIResponsePrompt,
|
||||
OpenAIResponseText,
|
||||
)
|
||||
|
||||
|
|
@ -810,6 +811,7 @@ class Agents(Protocol):
|
|||
self,
|
||||
input: str | list[OpenAIResponseInput],
|
||||
model: str,
|
||||
prompt: OpenAIResponsePrompt | None = None,
|
||||
instructions: str | None = None,
|
||||
previous_response_id: str | None = None,
|
||||
conversation: str | None = None,
|
||||
|
|
@ -831,6 +833,7 @@ class Agents(Protocol):
|
|||
|
||||
:param input: Input message(s) to create the response.
|
||||
:param model: The underlying LLM used for completions.
|
||||
:param prompt: (Optional) Prompt object with ID, version, and variables.
|
||||
:param previous_response_id: (Optional) if specified, the new response will be a continuation of the previous response. This can be used to easily fork-off new responses from existing responses.
|
||||
:param conversation: (Optional) The ID of a conversation to add the response to. Must begin with 'conv_'. Input and output messages will be automatically added to the conversation.
|
||||
:param include: (Optional) Additional fields to include in the response.
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import Annotated, Any, Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from llama_stack.apis.vector_io import SearchRankingOptions as FileSearchRankingOptions
|
||||
|
|
@ -46,23 +46,66 @@ class OpenAIResponseInputMessageContentImage(BaseModel):
|
|||
|
||||
:param detail: Level of detail for image processing, can be "low", "high", or "auto"
|
||||
:param type: Content type identifier, always "input_image"
|
||||
:param file_id: (Optional) The ID of the file to be sent to the model.
|
||||
:param image_url: (Optional) URL of the image content
|
||||
"""
|
||||
|
||||
detail: Literal["low"] | Literal["high"] | Literal["auto"] = "auto"
|
||||
type: Literal["input_image"] = "input_image"
|
||||
# TODO: handle file_id
|
||||
file_id: str | None = None
|
||||
image_url: str | None = None
|
||||
|
||||
|
||||
# TODO: handle file content types
|
||||
@json_schema_type
|
||||
class OpenAIResponseInputMessageContentFile(BaseModel):
|
||||
"""File content for input messages in OpenAI response format.
|
||||
|
||||
:param type: The type of the input item. Always `input_file`.
|
||||
:param file_data: The data of the file to be sent to the model.
|
||||
:param file_id: (Optional) The ID of the file to be sent to the model.
|
||||
:param file_url: The URL of the file to be sent to the model.
|
||||
:param filename: The name of the file to be sent to the model.
|
||||
"""
|
||||
|
||||
type: Literal["input_file"] = "input_file"
|
||||
file_data: str | None = None
|
||||
file_id: str | None = None
|
||||
file_url: str | None = None
|
||||
filename: str | None = None
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_file_source(self) -> "OpenAIResponseInputMessageContentFile":
|
||||
if not any([self.file_data, self.file_id, self.file_url, self.filename]):
|
||||
raise ValueError(
|
||||
"At least one of 'file_data', 'file_id', 'file_url', or 'filename' must be provided for file content"
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
OpenAIResponseInputMessageContent = Annotated[
|
||||
OpenAIResponseInputMessageContentText | OpenAIResponseInputMessageContentImage,
|
||||
OpenAIResponseInputMessageContentText
|
||||
| OpenAIResponseInputMessageContentImage
|
||||
| OpenAIResponseInputMessageContentFile,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
register_schema(OpenAIResponseInputMessageContent, name="OpenAIResponseInputMessageContent")
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponsePrompt(BaseModel):
|
||||
"""OpenAI compatible Prompt object that is used in OpenAI responses.
|
||||
|
||||
:param id: Unique identifier of the prompt template
|
||||
:param variables: Dictionary of variable names to OpenAIResponseInputMessageContent structure for template substitution. The substitution values can either be strings, or other Response input types
|
||||
like images or files.
|
||||
:param version: Version number of the prompt to use (defaults to latest if not specified)
|
||||
"""
|
||||
|
||||
id: str
|
||||
variables: dict[str, OpenAIResponseInputMessageContent] | None = None
|
||||
version: str | None = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class OpenAIResponseAnnotationFileCitation(BaseModel):
|
||||
"""File citation annotation for referencing specific files in response content.
|
||||
|
|
@ -538,6 +581,7 @@ class OpenAIResponseObject(BaseModel):
|
|||
:param output: List of generated output items (messages, tool calls, etc.)
|
||||
:param parallel_tool_calls: Whether tool calls can be executed in parallel
|
||||
:param previous_response_id: (Optional) ID of the previous response in a conversation
|
||||
:param prompt: (Optional) Reference to a prompt template and its variables.
|
||||
:param status: Current status of the response generation
|
||||
:param temperature: (Optional) Sampling temperature used for generation
|
||||
:param text: Text formatting configuration for the response
|
||||
|
|
@ -556,6 +600,7 @@ class OpenAIResponseObject(BaseModel):
|
|||
output: list[OpenAIResponseOutput]
|
||||
parallel_tool_calls: bool = False
|
||||
previous_response_id: str | None = None
|
||||
prompt: OpenAIResponsePrompt | None = None
|
||||
status: str
|
||||
temperature: float | None = None
|
||||
# Default to text format to avoid breaking the loading of old responses
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ from llama_stack.apis.agents import (
|
|||
Turn,
|
||||
)
|
||||
from llama_stack.apis.agents.agents import ResponseGuardrail
|
||||
from llama_stack.apis.agents.openai_responses import OpenAIResponseText
|
||||
from llama_stack.apis.agents.openai_responses import OpenAIResponsePrompt, OpenAIResponseText
|
||||
from llama_stack.apis.common.responses import PaginatedResponse
|
||||
from llama_stack.apis.conversations import Conversations
|
||||
from llama_stack.apis.inference import (
|
||||
|
|
@ -329,6 +329,7 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
self,
|
||||
input: str | list[OpenAIResponseInput],
|
||||
model: str,
|
||||
prompt: OpenAIResponsePrompt | None = None,
|
||||
instructions: str | None = None,
|
||||
previous_response_id: str | None = None,
|
||||
conversation: str | None = None,
|
||||
|
|
@ -344,6 +345,7 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
return await self.openai_responses_impl.create_openai_response(
|
||||
input,
|
||||
model,
|
||||
prompt,
|
||||
instructions,
|
||||
previous_response_id,
|
||||
conversation,
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ from llama_stack.apis.agents.openai_responses import (
|
|||
OpenAIResponseMessage,
|
||||
OpenAIResponseObject,
|
||||
OpenAIResponseObjectStream,
|
||||
OpenAIResponsePrompt,
|
||||
OpenAIResponseText,
|
||||
OpenAIResponseTextFormat,
|
||||
)
|
||||
|
|
@ -239,6 +240,7 @@ class OpenAIResponsesImpl:
|
|||
self,
|
||||
input: str | list[OpenAIResponseInput],
|
||||
model: str,
|
||||
prompt: OpenAIResponsePrompt | None = None,
|
||||
instructions: str | None = None,
|
||||
previous_response_id: str | None = None,
|
||||
conversation: str | None = None,
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ from llama_stack.apis.agents.openai_responses import (
|
|||
OpenAIResponseOutputMessageMCPCall,
|
||||
OpenAIResponseOutputMessageMCPListTools,
|
||||
OpenAIResponseOutputMessageWebSearchToolCall,
|
||||
OpenAIResponsePrompt,
|
||||
OpenAIResponseText,
|
||||
OpenAIResponseUsage,
|
||||
OpenAIResponseUsageInputTokensDetails,
|
||||
|
|
@ -113,6 +114,7 @@ class StreamingResponseOrchestrator:
|
|||
instructions: str,
|
||||
safety_api,
|
||||
guardrail_ids: list[str] | None = None,
|
||||
prompt: OpenAIResponsePrompt | None = None,
|
||||
):
|
||||
self.inference_api = inference_api
|
||||
self.ctx = ctx
|
||||
|
|
@ -123,6 +125,7 @@ class StreamingResponseOrchestrator:
|
|||
self.tool_executor = tool_executor
|
||||
self.safety_api = safety_api
|
||||
self.guardrail_ids = guardrail_ids or []
|
||||
self.prompt = prompt
|
||||
self.sequence_number = 0
|
||||
# Store MCP tool mapping that gets built during tool processing
|
||||
self.mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] = ctx.tool_context.previous_tools or {}
|
||||
|
|
@ -180,6 +183,7 @@ class StreamingResponseOrchestrator:
|
|||
error=error,
|
||||
usage=self.accumulated_usage,
|
||||
instructions=self.instructions,
|
||||
prompt=self.prompt,
|
||||
)
|
||||
|
||||
async def create_response(self) -> AsyncIterator[OpenAIResponseObjectStream]:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue