mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 10:10:36 +00:00
14972 lines
462 KiB
YAML
14972 lines
462 KiB
YAML
openapi: 3.1.0
|
|
info:
|
|
title: Llama Stack API
|
|
description: A comprehensive API for building and deploying AI applications
|
|
version: 1.0.0
|
|
servers:
|
|
- url: https://api.llamastack.com
|
|
description: Production server
|
|
- url: https://staging-api.llamastack.com
|
|
description: Staging server
|
|
paths:
|
|
/v1/batches:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Batches
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_batches_v1_batches_get
|
|
parameters:
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 20
|
|
title: Limit
|
|
responses:
|
|
'200':
|
|
description: A list of batch objects.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListBatchesResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Create Batch
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: create_batch_v1_batches_post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_batches_Request'
|
|
responses:
|
|
'200':
|
|
description: The created batch object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Batch'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/batches/{batch_id}:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Retrieve Batch
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: retrieve_batch_v1_batches__batch_id__get
|
|
parameters:
|
|
- name: batch_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Batch Id
|
|
responses:
|
|
'200':
|
|
description: The batch object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Batch'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/batches/{batch_id}/cancel:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Cancel Batch
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: cancel_batch_v1_batches__batch_id__cancel_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_batches_batch_id_cancel_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: The updated batch object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Batch'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
parameters:
|
|
- name: batch_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: batch_id'
|
|
/v1/chat/completions:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Chat Completions
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_chat_completions_v1_chat_completions_get
|
|
parameters:
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: model
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Model
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 20
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
$ref: '#/components/schemas/Order'
|
|
default: desc
|
|
responses:
|
|
'200':
|
|
description: A ListOpenAIChatCompletionResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListOpenAIChatCompletionResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Chat Completion
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_chat_completion_v1_chat_completions_post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionRequestWithExtraBody'
|
|
responses:
|
|
'200':
|
|
description: An OpenAIChatCompletion.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIChatCompletion'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/chat/completions/{completion_id}:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Chat Completion
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_chat_completion_v1_chat_completions__completion_id__get
|
|
parameters:
|
|
- name: completion_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Completion Id
|
|
responses:
|
|
'200':
|
|
description: A OpenAICompletionWithInputMessages.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAICompletionWithInputMessages'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/completions:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Completion
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_completion_v1_completions_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAICompletionRequestWithExtraBody'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: An OpenAICompletion.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAICompletion'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/conversations:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Create Conversation
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: create_conversation_v1_conversations_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_conversations_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: The created conversation object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Conversation'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/conversations/{conversation_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Openai Delete Conversation
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_delete_conversation_v1_conversations__conversation_id__delete
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Conversation Id
|
|
responses:
|
|
'200':
|
|
description: The deleted conversation resource.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ConversationDeletedResource'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Conversation
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_conversation_v1_conversations__conversation_id__get
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Conversation Id
|
|
responses:
|
|
'200':
|
|
description: The conversation object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Conversation'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Update Conversation
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: update_conversation_v1_conversations__conversation_id__post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_conversations_conversation_id_Request'
|
|
responses:
|
|
'200':
|
|
description: The updated conversation object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Conversation'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: conversation_id'
|
|
/v1/conversations/{conversation_id}/items:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Items
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_items_v1_conversations__conversation_id__items_get
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Conversation Id
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: include
|
|
in: query
|
|
required: true
|
|
schema:
|
|
$ref: '#/components/schemas/ConversationItemInclude'
|
|
- name: limit
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: integer
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Order
|
|
responses:
|
|
'200':
|
|
description: List of conversation items.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ConversationItemList'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Add Items
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: add_items_v1_conversations__conversation_id__items_post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_conversations_conversation_id_items_Request'
|
|
responses:
|
|
'200':
|
|
description: List of created items.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ConversationItemList'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: conversation_id'
|
|
/v1/conversations/{conversation_id}/items/{item_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Openai Delete Conversation Item
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_delete_conversation_item_v1_conversations__conversation_id__items__item_id__delete
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Conversation Id
|
|
- name: item_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Item Id
|
|
responses:
|
|
'200':
|
|
description: The deleted item resource.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ConversationItemDeletedResource'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Retrieve
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: retrieve_v1_conversations__conversation_id__items__item_id__get
|
|
parameters:
|
|
- name: conversation_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Conversation Id
|
|
- name: item_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Item Id
|
|
responses:
|
|
'200':
|
|
description: The conversation item.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIResponseMessage'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/embeddings:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Embeddings
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_embeddings_v1_embeddings_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIEmbeddingsRequestWithExtraBody'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: An OpenAIEmbeddingsResponse containing the embeddings.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIEmbeddingsResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/files:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai List Files
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_list_files_v1_files_get
|
|
parameters:
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: purpose
|
|
in: query
|
|
required: true
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIFilePurpose'
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 10000
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
$ref: '#/components/schemas/Order'
|
|
default: desc
|
|
responses:
|
|
'200':
|
|
description: An ListOpenAIFileResponse containing the list of files.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListOpenAIFileResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Upload File
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: openai_upload_file_v1_files_post
|
|
responses:
|
|
'200':
|
|
description: An OpenAIFileObject representing the uploaded file.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIFileObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/files/{file_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Openai Delete File
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_delete_file_v1_files__file_id__delete
|
|
parameters:
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: File Id
|
|
responses:
|
|
'200':
|
|
description: An OpenAIFileDeleteResponse indicating successful deletion.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIFileDeleteResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai Retrieve File
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_retrieve_file_v1_files__file_id__get
|
|
parameters:
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: File Id
|
|
responses:
|
|
'200':
|
|
description: An OpenAIFileObject containing file information.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIFileObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/files/{file_id}/content:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai Retrieve File Content
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: openai_retrieve_file_content_v1_files__file_id__content_get
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: file_id'
|
|
responses:
|
|
'200':
|
|
description: The raw file content as a binary response.
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/health:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Health
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: health_v1_health_get
|
|
responses:
|
|
'200':
|
|
description: Health information indicating if the service is operational.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/HealthInfo'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/inspect/routes:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Routes
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_routes_v1_inspect_routes_get
|
|
responses:
|
|
'200':
|
|
description: Response containing information about all available routes.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListRoutesResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
tags:
|
|
- Inspect
|
|
summary: List routes.
|
|
description: >-
|
|
List routes.
|
|
|
|
List all available API routes with their methods and implementing providers.
|
|
parameters:
|
|
- name: api_filter
|
|
in: query
|
|
description: >-
|
|
Optional filter to control which routes are returned. Can be an API level
|
|
('v1', 'v1alpha', 'v1beta') to show non-deprecated routes at that level,
|
|
or 'deprecated' to show deprecated routes across all levels. If not specified,
|
|
returns only non-deprecated v1 routes.
|
|
required: false
|
|
schema:
|
|
type: string
|
|
enum:
|
|
- v1
|
|
- v1alpha
|
|
- v1beta
|
|
- deprecated
|
|
deprecated: false
|
|
/v1/models:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Models
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_models_v1_models_get
|
|
responses:
|
|
'200':
|
|
description: A OpenAIListModelsResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIListModelsResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
tags:
|
|
- Models
|
|
summary: List models using the OpenAI API.
|
|
description: List models using the OpenAI API.
|
|
parameters: []
|
|
deprecated: false
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Register Model
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: register_model_v1_models_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_models_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A Model.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Model'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/models/{model_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Unregister Model
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: unregister_model_v1_models__model_id__delete
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
- name: model_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: model_id'
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Model
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_model_v1_models__model_id__get
|
|
parameters:
|
|
- name: model_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Model Id
|
|
responses:
|
|
'200':
|
|
description: A Model.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Model'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/moderations:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Run Moderation
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: run_moderation_v1_moderations_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_moderations_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A moderation object.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ModerationObject'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/prompts:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Prompts
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_prompts_v1_prompts_get
|
|
responses:
|
|
'200':
|
|
description: A ListPromptsResponse containing all prompts.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListPromptsResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Create Prompt
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: create_prompt_v1_prompts_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_prompts_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: The created Prompt resource.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Prompt'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/prompts/{prompt_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Delete Prompt
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: delete_prompt_v1_prompts__prompt_id__delete
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
- &id001
|
|
name: prompt_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: prompt_id'
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Prompt
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_prompt_v1_prompts__prompt_id__get
|
|
parameters:
|
|
- name: prompt_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Prompt Id
|
|
- name: version
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: integer
|
|
title: Version
|
|
responses:
|
|
'200':
|
|
description: A Prompt resource.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Prompt'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Update Prompt
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: update_prompt_v1_prompts__prompt_id__post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_prompts_prompt_id_Request'
|
|
responses:
|
|
'200':
|
|
description: The updated Prompt resource with incremented version.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Prompt'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
parameters:
|
|
- *id001
|
|
/v1/prompts/{prompt_id}/set-default-version:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Set Default Version
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: set_default_version_v1_prompts__prompt_id__set_default_version_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_prompts_prompt_id_set_default_version_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: The prompt with the specified version now set as default.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Prompt'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
parameters:
|
|
- name: prompt_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: prompt_id'
|
|
/v1/prompts/{prompt_id}/versions:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Prompt Versions
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_prompt_versions_v1_prompts__prompt_id__versions_get
|
|
parameters:
|
|
- name: prompt_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Prompt Id
|
|
responses:
|
|
'200':
|
|
description: A ListPromptsResponse containing all versions of the prompt.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListPromptsResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/providers:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Providers
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_providers_v1_providers_get
|
|
responses:
|
|
'200':
|
|
description: A ListProvidersResponse containing information about all providers.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListProvidersResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/providers/{provider_id}:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Inspect Provider
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: inspect_provider_v1_providers__provider_id__get
|
|
parameters:
|
|
- name: provider_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Provider Id
|
|
responses:
|
|
'200':
|
|
description: A ProviderInfo object containing the provider's details.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ProviderInfo'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/responses:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Openai Responses
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_openai_responses_v1_responses_get
|
|
parameters:
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: model
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Model
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 50
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
$ref: '#/components/schemas/Order'
|
|
default: desc
|
|
responses:
|
|
'200':
|
|
description: A ListOpenAIResponseObject.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListOpenAIResponseObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Create Openai Response
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: create_openai_response_v1_responses_post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_responses_Request'
|
|
responses:
|
|
'200':
|
|
description: An OpenAIResponseObject.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/responses/{response_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Delete Openai Response
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: delete_openai_response_v1_responses__response_id__delete
|
|
parameters:
|
|
- name: response_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Response Id
|
|
responses:
|
|
'200':
|
|
description: An OpenAIDeleteResponseObject
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIDeleteResponseObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Openai Response
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_openai_response_v1_responses__response_id__get
|
|
parameters:
|
|
- name: response_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Response Id
|
|
responses:
|
|
'200':
|
|
description: An OpenAIResponseObject.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/responses/{response_id}/input_items:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Openai Response Input Items
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_openai_response_input_items_v1_responses__response_id__input_items_get
|
|
parameters:
|
|
- name: response_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Response Id
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: before
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Before
|
|
- name: include
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Include
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 20
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
$ref: '#/components/schemas/Order'
|
|
default: desc
|
|
responses:
|
|
'200':
|
|
description: An ListOpenAIResponseInputItem.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListOpenAIResponseInputItem'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/safety/run-shield:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Run Shield
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: run_shield_v1_safety_run_shield_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_safety_run_shield_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A RunShieldResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/RunShieldResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/scoring-functions:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Scoring Functions
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_scoring_functions_v1_scoring_functions_get
|
|
responses:
|
|
'200':
|
|
description: A ListScoringFunctionsResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListScoringFunctionsResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Register Scoring Function
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: register_scoring_function_v1_scoring_functions_post
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/scoring-functions/{scoring_fn_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Unregister Scoring Function
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: unregister_scoring_function_v1_scoring_functions__scoring_fn_id__delete
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
- name: scoring_fn_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: scoring_fn_id'
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Scoring Function
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_scoring_function_v1_scoring_functions__scoring_fn_id__get
|
|
parameters:
|
|
- name: scoring_fn_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Scoring Fn Id
|
|
responses:
|
|
'200':
|
|
description: A ScoringFn.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ScoringFn'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/scoring/score:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Score
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: score_v1_scoring_score_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_scoring_score_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A ScoreResponse object containing rows and aggregated results.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ScoreResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/scoring/score-batch:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Score Batch
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: score_batch_v1_scoring_score_batch_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_scoring_score_batch_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A ScoreBatchResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ScoreBatchResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/shields:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Shields
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_shields_v1_shields_get
|
|
responses:
|
|
'200':
|
|
description: A ListShieldsResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListShieldsResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Register Shield
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: register_shield_v1_shields_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_shields_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A Shield.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Shield'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/shields/{identifier}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Unregister Shield
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: unregister_shield_v1_shields__identifier__delete
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
- name: identifier
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: identifier'
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Shield
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_shield_v1_shields__identifier__get
|
|
parameters:
|
|
- name: identifier
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Identifier
|
|
responses:
|
|
'200':
|
|
description: A Shield.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Shield'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
tags:
|
|
- Shields
|
|
summary: Get a shield by its identifier.
|
|
description: Get a shield by its identifier.
|
|
parameters:
|
|
- name: identifier
|
|
in: path
|
|
description: The identifier of the shield to get.
|
|
required: true
|
|
schema:
|
|
type: string
|
|
deprecated: false
|
|
delete:
|
|
responses:
|
|
'200':
|
|
description: OK
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
$ref: >-
|
|
#/components/responses/TooManyRequests429
|
|
'500':
|
|
$ref: >-
|
|
#/components/responses/InternalServerError500
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
tags:
|
|
- Shields
|
|
summary: Unregister a shield.
|
|
description: Unregister a shield.
|
|
parameters:
|
|
- name: identifier
|
|
in: path
|
|
description: >-
|
|
The identifier of the shield to unregister.
|
|
required: true
|
|
schema:
|
|
type: string
|
|
deprecated: false
|
|
/v1/tool-runtime/invoke:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Invoke Tool
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: invoke_tool_v1_tool_runtime_invoke_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_tool_runtime_invoke_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A ToolInvocationResult.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ToolInvocationResult'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/tool-runtime/list-tools:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Runtime Tools
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_runtime_tools_v1_tool_runtime_list_tools_get
|
|
parameters:
|
|
- name: tool_group_id
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Tool Group Id
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/URL'
|
|
responses:
|
|
'200':
|
|
description: A ListToolDefsResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListToolDefsResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/tool-runtime/rag-tool/insert:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Rag Tool.Insert
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: rag_tool_insert_v1_tool_runtime_rag_tool_insert_post
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/tool-runtime/rag-tool/query:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Rag Tool.Query
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: rag_tool_query_v1_tool_runtime_rag_tool_query_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_tool_runtime_rag_tool_query_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: RAGQueryResult containing the retrieved content and metadata
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/RAGQueryResult'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/toolgroups:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Tool Groups
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: list_tool_groups_v1_toolgroups_get
|
|
responses:
|
|
'200':
|
|
description: A ListToolGroupsResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListToolGroupsResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Register Tool Group
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: register_tool_group_v1_toolgroups_post
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/toolgroups/{toolgroup_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Unregister Toolgroup
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: unregister_toolgroup_v1_toolgroups__toolgroup_id__delete
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
- name: toolgroup_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: toolgroup_id'
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Tool Group
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_tool_group_v1_toolgroups__toolgroup_id__get
|
|
parameters:
|
|
- name: toolgroup_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Toolgroup Id
|
|
responses:
|
|
'200':
|
|
description: A ToolGroup.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ToolGroup'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/tools:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: List Tools
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: list_tools_v1_tools_get
|
|
parameters:
|
|
- name: toolgroup_id
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Toolgroup Id
|
|
responses:
|
|
'200':
|
|
description: A ListToolDefsResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ListToolDefsResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/tools/{tool_name}:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Get Tool
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: get_tool_v1_tools__tool_name__get
|
|
parameters:
|
|
- name: tool_name
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Tool Name
|
|
responses:
|
|
'200':
|
|
description: A ToolDef.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/ToolDef'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/vector-io/insert:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Insert Chunks
|
|
description: Generic endpoint - this would be replaced with actual implementation.
|
|
operationId: insert_chunks_v1_vector_io_insert_post
|
|
parameters:
|
|
- name: args
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Args
|
|
- name: kwargs
|
|
in: query
|
|
required: true
|
|
schema:
|
|
title: Kwargs
|
|
responses:
|
|
'200':
|
|
description: Successful Response
|
|
content:
|
|
application/json:
|
|
schema: {}
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/vector-io/query:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Query Chunks
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: query_chunks_v1_vector_io_query_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_vector_io_query_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A QueryChunksResponse.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/QueryChunksResponse'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
/v1/vector_stores:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai List Vector Stores
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_list_vector_stores_v1_vector_stores_get
|
|
parameters:
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: before
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Before
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 20
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: string
|
|
default: desc
|
|
title: Order
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreListResponse containing the list of vector stores.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreListResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Create Vector Store
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_create_vector_store_v1_vector_stores_post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAICreateVectorStoreRequestWithExtraBody'
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreObject representing the created vector store.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/vector_stores/{vector_store_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Openai Delete Vector Store
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_delete_vector_store_v1_vector_stores__vector_store_id__delete
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreDeleteResponse indicating the deletion status.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreDeleteResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai Retrieve Vector Store
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_retrieve_vector_store_v1_vector_stores__vector_store_id__get
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreObject representing the vector store.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Update Vector Store
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_update_vector_store_v1_vector_stores__vector_store_id__post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_vector_stores_vector_store_id_Request'
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreObject representing the updated vector store.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: vector_store_id'
|
|
/v1/vector_stores/{vector_store_id}/file_batches:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Create Vector Store File Batch
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_create_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/OpenAICreateVectorStoreFileBatchRequestWithExtraBody'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileBatchObject representing the created file
|
|
batch.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileBatchObject'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: vector_store_id'
|
|
/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai Retrieve Vector Store File Batch
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_retrieve_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__get
|
|
parameters:
|
|
- name: batch_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Batch Id
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileBatchObject representing the file batch.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileBatchObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Cancel Vector Store File Batch
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_cancel_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__cancel_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileBatchObject representing the cancelled file
|
|
batch.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileBatchObject'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: vector_store_id'
|
|
- name: batch_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: batch_id'
|
|
/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai List Files In Vector Store File Batch
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_list_files_in_vector_store_file_batch_v1_vector_stores__vector_store_id__file_batches__batch_id__files_get
|
|
parameters:
|
|
- name: batch_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Batch Id
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: before
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Before
|
|
- name: filter
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Filter
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 20
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: string
|
|
default: desc
|
|
title: Order
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFilesListInBatchResponse containing the list of
|
|
files in the batch.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFilesListInBatchResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/vector_stores/{vector_store_id}/files:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai List Files In Vector Store
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_list_files_in_vector_store_v1_vector_stores__vector_store_id__files_get
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
- name: after
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: After
|
|
- name: before
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Before
|
|
- name: filter
|
|
in: query
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Filter
|
|
- name: limit
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: integer
|
|
default: 20
|
|
title: Limit
|
|
- name: order
|
|
in: query
|
|
required: false
|
|
schema:
|
|
type: string
|
|
default: desc
|
|
title: Order
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreListFilesResponse containing the list of files.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreListFilesResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Attach File To Vector Store
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_attach_file_to_vector_store_v1_vector_stores__vector_store_id__files_post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_vector_stores_vector_store_id_files_Request'
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileObject representing the attached file.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: vector_store_id'
|
|
/v1/vector_stores/{vector_store_id}/files/{file_id}:
|
|
delete:
|
|
tags:
|
|
- V1
|
|
summary: Openai Delete Vector Store File
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_delete_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__delete
|
|
parameters:
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: File Id
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileDeleteResponse indicating the deletion status.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileDeleteResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai Retrieve Vector Store File
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_retrieve_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__get
|
|
parameters:
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: File Id
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileObject representing the file.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Update Vector Store File
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_update_vector_store_file_v1_vector_stores__vector_store_id__files__file_id__post
|
|
requestBody:
|
|
required: true
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_vector_stores_vector_store_id_files_file_id_Request'
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreFileObject representing the updated file.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileObject'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: vector_store_id'
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: file_id'
|
|
/v1/vector_stores/{vector_store_id}/files/{file_id}/content:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Openai Retrieve Vector Store File Contents
|
|
description: Query endpoint for proper schema generation.
|
|
operationId: openai_retrieve_vector_store_file_contents_v1_vector_stores__vector_store_id__files__file_id__content_get
|
|
parameters:
|
|
- name: file_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: File Id
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
title: Vector Store Id
|
|
responses:
|
|
'200':
|
|
description: A list of InterleavedContent representing the file contents.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreFileContentsResponse'
|
|
'400':
|
|
$ref: '#/components/responses/BadRequest400'
|
|
description: Bad Request
|
|
'429':
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
description: Too Many Requests
|
|
'500':
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
description: Internal Server Error
|
|
default:
|
|
$ref: '#/components/responses/DefaultError'
|
|
description: Default Response
|
|
/v1/vector_stores/{vector_store_id}/search:
|
|
post:
|
|
tags:
|
|
- V1
|
|
summary: Openai Search Vector Store
|
|
description: Typed endpoint for proper schema generation.
|
|
operationId: openai_search_vector_store_v1_vector_stores__vector_store_id__search_post
|
|
requestBody:
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/_vector_stores_vector_store_id_search_Request'
|
|
required: true
|
|
responses:
|
|
'200':
|
|
description: A VectorStoreSearchResponse containing the search results.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VectorStoreSearchResponsePage'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
parameters:
|
|
- name: vector_store_id
|
|
in: path
|
|
required: true
|
|
schema:
|
|
type: string
|
|
description: 'Path parameter: vector_store_id'
|
|
/v1/version:
|
|
get:
|
|
tags:
|
|
- V1
|
|
summary: Version
|
|
description: Response-only endpoint for proper schema generation.
|
|
operationId: version_v1_version_get
|
|
responses:
|
|
'200':
|
|
description: Version information containing the service version number.
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/VersionInfo'
|
|
'400':
|
|
description: Bad Request
|
|
$ref: '#/components/responses/BadRequest400'
|
|
'429':
|
|
description: Too Many Requests
|
|
$ref: '#/components/responses/TooManyRequests429'
|
|
'500':
|
|
description: Internal Server Error
|
|
$ref: '#/components/responses/InternalServerError500'
|
|
default:
|
|
description: Default Response
|
|
$ref: '#/components/responses/DefaultError'
|
|
components:
|
|
schemas:
|
|
Error:
|
|
type: object
|
|
properties:
|
|
status:
|
|
type: integer
|
|
description: HTTP status code
|
|
title:
|
|
type: string
|
|
description: >-
|
|
Error title, a short summary of the error which is invariant for an error
|
|
type
|
|
detail:
|
|
type: string
|
|
description: >-
|
|
Error detail, a longer human-readable description of the error
|
|
instance:
|
|
type: string
|
|
description: >-
|
|
(Optional) A URL which can be used to retrieve more information about
|
|
the specific occurrence of the error
|
|
additionalProperties: false
|
|
required:
|
|
- status
|
|
- title
|
|
- detail
|
|
title: Error
|
|
description: >-
|
|
Error response from the API. Roughly follows RFC 7807.
|
|
ListBatchesResponse:
|
|
type: object
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: list
|
|
default: list
|
|
data:
|
|
type: array
|
|
items:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
completion_window:
|
|
type: string
|
|
created_at:
|
|
type: integer
|
|
endpoint:
|
|
type: string
|
|
input_file_id:
|
|
type: string
|
|
object:
|
|
type: string
|
|
const: batch
|
|
status:
|
|
type: string
|
|
enum:
|
|
- validating
|
|
- failed
|
|
- in_progress
|
|
- finalizing
|
|
- completed
|
|
- expired
|
|
- cancelling
|
|
- cancelled
|
|
cancelled_at:
|
|
type: integer
|
|
cancelling_at:
|
|
type: integer
|
|
completed_at:
|
|
type: integer
|
|
error_file_id:
|
|
type: string
|
|
errors:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
type: object
|
|
properties:
|
|
code:
|
|
type: string
|
|
line:
|
|
type: integer
|
|
message:
|
|
type: string
|
|
param:
|
|
type: string
|
|
additionalProperties: false
|
|
title: BatchError
|
|
object:
|
|
type: string
|
|
additionalProperties: false
|
|
title: Errors
|
|
expired_at:
|
|
type: integer
|
|
expires_at:
|
|
type: integer
|
|
failed_at:
|
|
type: integer
|
|
finalizing_at:
|
|
type: integer
|
|
in_progress_at:
|
|
type: integer
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
type: string
|
|
model:
|
|
type: string
|
|
output_file_id:
|
|
type: string
|
|
request_counts:
|
|
type: object
|
|
properties:
|
|
completed:
|
|
type: integer
|
|
failed:
|
|
type: integer
|
|
total:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- completed
|
|
- failed
|
|
- total
|
|
title: BatchRequestCounts
|
|
usage:
|
|
type: object
|
|
properties:
|
|
input_tokens:
|
|
type: integer
|
|
input_tokens_details:
|
|
type: object
|
|
properties:
|
|
cached_tokens:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- cached_tokens
|
|
title: InputTokensDetails
|
|
output_tokens:
|
|
type: integer
|
|
output_tokens_details:
|
|
type: object
|
|
properties:
|
|
reasoning_tokens:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- reasoning_tokens
|
|
title: OutputTokensDetails
|
|
total_tokens:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- input_tokens
|
|
- input_tokens_details
|
|
- output_tokens
|
|
- output_tokens_details
|
|
- total_tokens
|
|
title: BatchUsage
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- completion_window
|
|
- created_at
|
|
- endpoint
|
|
- input_file_id
|
|
- object
|
|
- status
|
|
title: Batch
|
|
first_id:
|
|
type: string
|
|
last_id:
|
|
type: string
|
|
has_more:
|
|
type: boolean
|
|
default: false
|
|
additionalProperties: false
|
|
required:
|
|
- object
|
|
- data
|
|
- has_more
|
|
title: ListBatchesResponse
|
|
description: >-
|
|
Response containing a list of batch objects.
|
|
CreateBatchRequest:
|
|
type: object
|
|
properties:
|
|
input_file_id:
|
|
type: string
|
|
description: >-
|
|
The ID of an uploaded file containing requests for the batch.
|
|
endpoint:
|
|
type: string
|
|
description: >-
|
|
The endpoint to be used for all requests in the batch.
|
|
completion_window:
|
|
type: string
|
|
const: 24h
|
|
description: >-
|
|
The time window within which the batch should be processed.
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
type: string
|
|
description: Optional metadata for the batch.
|
|
idempotency_key:
|
|
type: string
|
|
description: >-
|
|
Optional idempotency key. When provided, enables idempotent behavior.
|
|
additionalProperties: false
|
|
required:
|
|
- input_file_id
|
|
- endpoint
|
|
- completion_window
|
|
title: CreateBatchRequest
|
|
Batch:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
completion_window:
|
|
type: string
|
|
created_at:
|
|
type: integer
|
|
endpoint:
|
|
type: string
|
|
input_file_id:
|
|
type: string
|
|
object:
|
|
type: string
|
|
const: batch
|
|
status:
|
|
type: string
|
|
enum:
|
|
- validating
|
|
- failed
|
|
- in_progress
|
|
- finalizing
|
|
- completed
|
|
- expired
|
|
- cancelling
|
|
- cancelled
|
|
cancelled_at:
|
|
type: integer
|
|
cancelling_at:
|
|
type: integer
|
|
completed_at:
|
|
type: integer
|
|
error_file_id:
|
|
type: string
|
|
errors:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
type: object
|
|
properties:
|
|
code:
|
|
type: string
|
|
line:
|
|
type: integer
|
|
message:
|
|
type: string
|
|
param:
|
|
type: string
|
|
additionalProperties: false
|
|
title: BatchError
|
|
object:
|
|
type: string
|
|
additionalProperties: false
|
|
title: Errors
|
|
expired_at:
|
|
type: integer
|
|
expires_at:
|
|
type: integer
|
|
failed_at:
|
|
type: integer
|
|
finalizing_at:
|
|
type: integer
|
|
in_progress_at:
|
|
type: integer
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
type: string
|
|
model:
|
|
type: string
|
|
output_file_id:
|
|
type: string
|
|
request_counts:
|
|
type: object
|
|
properties:
|
|
completed:
|
|
type: integer
|
|
failed:
|
|
type: integer
|
|
total:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- completed
|
|
- failed
|
|
- total
|
|
title: BatchRequestCounts
|
|
usage:
|
|
type: object
|
|
properties:
|
|
input_tokens:
|
|
type: integer
|
|
input_tokens_details:
|
|
type: object
|
|
properties:
|
|
cached_tokens:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- cached_tokens
|
|
title: InputTokensDetails
|
|
output_tokens:
|
|
type: integer
|
|
output_tokens_details:
|
|
type: object
|
|
properties:
|
|
reasoning_tokens:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- reasoning_tokens
|
|
title: OutputTokensDetails
|
|
total_tokens:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- input_tokens
|
|
- input_tokens_details
|
|
- output_tokens
|
|
- output_tokens_details
|
|
- total_tokens
|
|
title: BatchUsage
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- completion_window
|
|
- created_at
|
|
- endpoint
|
|
- input_file_id
|
|
- object
|
|
- status
|
|
title: Batch
|
|
Order:
|
|
type: string
|
|
enum:
|
|
- asc
|
|
- desc
|
|
title: Order
|
|
description: Sort order for paginated responses.
|
|
ListOpenAIChatCompletionResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: The ID of the chat completion
|
|
choices:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChoice'
|
|
description: List of choices
|
|
object:
|
|
type: string
|
|
const: chat.completion
|
|
default: chat.completion
|
|
description: >-
|
|
The object type, which will be "chat.completion"
|
|
created:
|
|
type: integer
|
|
description: >-
|
|
The Unix timestamp in seconds when the chat completion was created
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The model that was used to generate the chat completion
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsage'
|
|
description: >-
|
|
Token usage information for the completion
|
|
input_messages:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIMessageParam'
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- choices
|
|
- object
|
|
- created
|
|
- model
|
|
- input_messages
|
|
title: OpenAICompletionWithInputMessages
|
|
description: >-
|
|
List of chat completion objects with their input messages
|
|
has_more:
|
|
type: boolean
|
|
description: >-
|
|
Whether there are more completions available beyond this list
|
|
first_id:
|
|
type: string
|
|
description: ID of the first completion in this list
|
|
last_id:
|
|
type: string
|
|
description: ID of the last completion in this list
|
|
object:
|
|
type: string
|
|
const: list
|
|
default: list
|
|
description: >-
|
|
Must be "list" to identify this as a list response
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
- has_more
|
|
- first_id
|
|
- last_id
|
|
- object
|
|
title: ListOpenAIChatCompletionResponse
|
|
description: >-
|
|
Response from listing OpenAI-compatible chat completions.
|
|
OpenAIAssistantMessageParam:
|
|
type: object
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: assistant
|
|
default: assistant
|
|
description: >-
|
|
Must be "assistant" to identify this as the model's response
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
description: The content of the model's response
|
|
name:
|
|
type: string
|
|
description: >-
|
|
(Optional) The name of the assistant message participant.
|
|
tool_calls:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCall'
|
|
description: >-
|
|
List of tool calls. Each tool call is an OpenAIChatCompletionToolCall
|
|
object.
|
|
additionalProperties: false
|
|
required:
|
|
- role
|
|
title: OpenAIAssistantMessageParam
|
|
description: >-
|
|
A message containing the model's (assistant) response in an OpenAI-compatible
|
|
chat completion request.
|
|
"OpenAIChatCompletionContentPartImageParam":
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: image_url
|
|
default: image_url
|
|
description: >-
|
|
Must be "image_url" to identify this as image content
|
|
image_url:
|
|
$ref: '#/components/schemas/OpenAIImageURL'
|
|
description: >-
|
|
Image URL specification and processing details
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- image_url
|
|
title: >-
|
|
OpenAIChatCompletionContentPartImageParam
|
|
description: >-
|
|
Image content part for OpenAI-compatible chat completion messages.
|
|
OpenAIChatCompletionContentPartParam:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
- $ref: '#/components/schemas/OpenAIFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
file: '#/components/schemas/OpenAIFile'
|
|
OpenAIChatCompletionContentPartTextParam:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
default: text
|
|
description: >-
|
|
Must be "text" to identify this as text content
|
|
text:
|
|
type: string
|
|
description: The text content of the message
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- text
|
|
title: OpenAIChatCompletionContentPartTextParam
|
|
description: >-
|
|
Text content part for OpenAI-compatible chat completion messages.
|
|
OpenAIChatCompletionToolCall:
|
|
type: object
|
|
properties:
|
|
index:
|
|
type: integer
|
|
description: >-
|
|
(Optional) Index of the tool call in the list
|
|
id:
|
|
type: string
|
|
description: >-
|
|
(Optional) Unique identifier for the tool call
|
|
type:
|
|
type: string
|
|
const: function
|
|
default: function
|
|
description: >-
|
|
Must be "function" to identify this as a function call
|
|
function:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction'
|
|
description: (Optional) Function call details
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: OpenAIChatCompletionToolCall
|
|
description: >-
|
|
Tool call specification for OpenAI-compatible chat completion responses.
|
|
OpenAIChatCompletionToolCallFunction:
|
|
type: object
|
|
properties:
|
|
name:
|
|
type: string
|
|
description: (Optional) Name of the function to call
|
|
arguments:
|
|
type: string
|
|
description: >-
|
|
(Optional) Arguments to pass to the function as a JSON string
|
|
additionalProperties: false
|
|
title: OpenAIChatCompletionToolCallFunction
|
|
description: >-
|
|
Function call details for OpenAI-compatible tool calls.
|
|
OpenAIChatCompletionUsage:
|
|
type: object
|
|
properties:
|
|
prompt_tokens:
|
|
type: integer
|
|
description: Number of tokens in the prompt
|
|
completion_tokens:
|
|
type: integer
|
|
description: Number of tokens in the completion
|
|
total_tokens:
|
|
type: integer
|
|
description: Total tokens used (prompt + completion)
|
|
prompt_tokens_details:
|
|
type: object
|
|
properties:
|
|
cached_tokens:
|
|
type: integer
|
|
description: Number of tokens retrieved from cache
|
|
additionalProperties: false
|
|
title: >-
|
|
OpenAIChatCompletionUsagePromptTokensDetails
|
|
description: >-
|
|
Token details for prompt tokens in OpenAI chat completion usage.
|
|
completion_tokens_details:
|
|
type: object
|
|
properties:
|
|
reasoning_tokens:
|
|
type: integer
|
|
description: >-
|
|
Number of tokens used for reasoning (o1/o3 models)
|
|
additionalProperties: false
|
|
title: >-
|
|
OpenAIChatCompletionUsageCompletionTokensDetails
|
|
description: >-
|
|
Token details for output tokens in OpenAI chat completion usage.
|
|
additionalProperties: false
|
|
required:
|
|
- prompt_tokens
|
|
- completion_tokens
|
|
- total_tokens
|
|
title: OpenAIChatCompletionUsage
|
|
description: >-
|
|
Usage information for OpenAI chat completion.
|
|
OpenAIChoice:
|
|
type: object
|
|
properties:
|
|
message:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
discriminator:
|
|
propertyName: role
|
|
mapping:
|
|
user: '#/components/schemas/OpenAIUserMessageParam'
|
|
system: '#/components/schemas/OpenAISystemMessageParam'
|
|
assistant: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
tool: '#/components/schemas/OpenAIToolMessageParam'
|
|
developer: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
description: The message from the model
|
|
finish_reason:
|
|
type: string
|
|
description: The reason the model stopped generating
|
|
index:
|
|
type: integer
|
|
description: The index of the choice
|
|
logprobs:
|
|
$ref: '#/components/schemas/OpenAIChoiceLogprobs'
|
|
description: >-
|
|
(Optional) The log probabilities for the tokens in the message
|
|
additionalProperties: false
|
|
required:
|
|
- message
|
|
- finish_reason
|
|
- index
|
|
title: OpenAIChoice
|
|
description: >-
|
|
A choice from an OpenAI-compatible chat completion response.
|
|
OpenAIChoiceLogprobs:
|
|
type: object
|
|
properties:
|
|
content:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITokenLogProb'
|
|
description: >-
|
|
(Optional) The log probabilities for the tokens in the message
|
|
refusal:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITokenLogProb'
|
|
description: >-
|
|
(Optional) The log probabilities for the tokens in the message
|
|
additionalProperties: false
|
|
title: OpenAIChoiceLogprobs
|
|
description: >-
|
|
The log probabilities for the tokens in the message from an OpenAI-compatible
|
|
chat completion response.
|
|
OpenAIDeveloperMessageParam:
|
|
type: object
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: developer
|
|
default: developer
|
|
description: >-
|
|
Must be "developer" to identify this as a developer message
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
description: The content of the developer message
|
|
name:
|
|
type: string
|
|
description: >-
|
|
(Optional) The name of the developer message participant.
|
|
additionalProperties: false
|
|
required:
|
|
- role
|
|
- content
|
|
title: OpenAIDeveloperMessageParam
|
|
description: >-
|
|
A message from the developer in an OpenAI-compatible chat completion request.
|
|
OpenAIFile:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file
|
|
default: file
|
|
file:
|
|
$ref: '#/components/schemas/OpenAIFileFile'
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- file
|
|
title: OpenAIFile
|
|
OpenAIFileFile:
|
|
type: object
|
|
properties:
|
|
file_data:
|
|
type: string
|
|
file_id:
|
|
type: string
|
|
filename:
|
|
type: string
|
|
additionalProperties: false
|
|
title: OpenAIFileFile
|
|
OpenAIImageURL:
|
|
type: object
|
|
properties:
|
|
url:
|
|
type: string
|
|
description: >-
|
|
URL of the image to include in the message
|
|
detail:
|
|
type: string
|
|
description: >-
|
|
(Optional) Level of detail for image processing. Can be "low", "high",
|
|
or "auto"
|
|
additionalProperties: false
|
|
required:
|
|
- url
|
|
title: OpenAIImageURL
|
|
description: >-
|
|
Image URL specification for OpenAI-compatible chat completion messages.
|
|
OpenAIMessageParam:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
discriminator:
|
|
propertyName: role
|
|
mapping:
|
|
user: '#/components/schemas/OpenAIUserMessageParam'
|
|
system: '#/components/schemas/OpenAISystemMessageParam'
|
|
assistant: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
tool: '#/components/schemas/OpenAIToolMessageParam'
|
|
developer: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
OpenAISystemMessageParam:
|
|
type: object
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: system
|
|
default: system
|
|
description: >-
|
|
Must be "system" to identify this as a system message
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
description: >-
|
|
The content of the "system prompt". If multiple system messages are provided,
|
|
they are concatenated. The underlying Llama Stack code may also add other
|
|
system messages (for example, for formatting tool definitions).
|
|
name:
|
|
type: string
|
|
description: >-
|
|
(Optional) The name of the system message participant.
|
|
additionalProperties: false
|
|
required:
|
|
- role
|
|
- content
|
|
title: OpenAISystemMessageParam
|
|
description: >-
|
|
A system message providing instructions or context to the model.
|
|
OpenAITokenLogProb:
|
|
type: object
|
|
properties:
|
|
token:
|
|
type: string
|
|
bytes:
|
|
type: array
|
|
items:
|
|
type: integer
|
|
logprob:
|
|
type: number
|
|
top_logprobs:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITopLogProb'
|
|
additionalProperties: false
|
|
required:
|
|
- token
|
|
- logprob
|
|
- top_logprobs
|
|
title: OpenAITokenLogProb
|
|
description: >-
|
|
The log probability for a token from an OpenAI-compatible chat completion
|
|
response.
|
|
OpenAIToolMessageParam:
|
|
type: object
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: tool
|
|
default: tool
|
|
description: >-
|
|
Must be "tool" to identify this as a tool response
|
|
tool_call_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier for the tool call this response is for
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
description: The response content from the tool
|
|
additionalProperties: false
|
|
required:
|
|
- role
|
|
- tool_call_id
|
|
- content
|
|
title: OpenAIToolMessageParam
|
|
description: >-
|
|
A message representing the result of a tool invocation in an OpenAI-compatible
|
|
chat completion request.
|
|
OpenAITopLogProb:
|
|
type: object
|
|
properties:
|
|
token:
|
|
type: string
|
|
bytes:
|
|
type: array
|
|
items:
|
|
type: integer
|
|
logprob:
|
|
type: number
|
|
additionalProperties: false
|
|
required:
|
|
- token
|
|
- logprob
|
|
title: OpenAITopLogProb
|
|
description: >-
|
|
The top log probability for a token from an OpenAI-compatible chat completion
|
|
response.
|
|
OpenAIUserMessageParam:
|
|
type: object
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: user
|
|
default: user
|
|
description: >-
|
|
Must be "user" to identify this as a user message
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartParam'
|
|
description: >-
|
|
The content of the message, which can include text and other media
|
|
name:
|
|
type: string
|
|
description: >-
|
|
(Optional) The name of the user message participant.
|
|
additionalProperties: false
|
|
required:
|
|
- role
|
|
- content
|
|
title: OpenAIUserMessageParam
|
|
description: >-
|
|
A message from the user in an OpenAI-compatible chat completion request.
|
|
OpenAIJSONSchema:
|
|
type: object
|
|
properties:
|
|
name:
|
|
type: string
|
|
description: Name of the schema
|
|
description:
|
|
type: string
|
|
description: (Optional) Description of the schema
|
|
strict:
|
|
type: boolean
|
|
description: >-
|
|
(Optional) Whether to enforce strict adherence to the schema
|
|
schema:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) The JSON schema definition
|
|
additionalProperties: false
|
|
required:
|
|
- name
|
|
title: OpenAIJSONSchema
|
|
description: >-
|
|
JSON schema specification for OpenAI-compatible structured response format.
|
|
OpenAIResponseFormatJSONObject:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: json_object
|
|
default: json_object
|
|
description: >-
|
|
Must be "json_object" to indicate generic JSON object response format
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: OpenAIResponseFormatJSONObject
|
|
description: >-
|
|
JSON object response format for OpenAI-compatible chat completion requests.
|
|
OpenAIResponseFormatJSONSchema:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: json_schema
|
|
default: json_schema
|
|
description: >-
|
|
Must be "json_schema" to indicate structured JSON response format
|
|
json_schema:
|
|
$ref: '#/components/schemas/OpenAIJSONSchema'
|
|
description: >-
|
|
The JSON schema specification for the response
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- json_schema
|
|
title: OpenAIResponseFormatJSONSchema
|
|
description: >-
|
|
JSON schema response format for OpenAI-compatible chat completion requests.
|
|
OpenAIResponseFormatParam:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseFormatText'
|
|
- $ref: '#/components/schemas/OpenAIResponseFormatJSONSchema'
|
|
- $ref: '#/components/schemas/OpenAIResponseFormatJSONObject'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
text: '#/components/schemas/OpenAIResponseFormatText'
|
|
json_schema: '#/components/schemas/OpenAIResponseFormatJSONSchema'
|
|
json_object: '#/components/schemas/OpenAIResponseFormatJSONObject'
|
|
OpenAIResponseFormatText:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
default: text
|
|
description: >-
|
|
Must be "text" to indicate plain text response format
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: OpenAIResponseFormatText
|
|
description: >-
|
|
Text response format for OpenAI-compatible chat completion requests.
|
|
OpenAIChatCompletionRequestWithExtraBody:
|
|
type: object
|
|
properties:
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The identifier of the model to use. The model must be registered with
|
|
Llama Stack and available via the /models endpoint.
|
|
messages:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIMessageParam'
|
|
description: List of messages in the conversation.
|
|
frequency_penalty:
|
|
type: number
|
|
description: >-
|
|
(Optional) The penalty for repeated tokens.
|
|
function_call:
|
|
oneOf:
|
|
- type: string
|
|
- type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) The function call to use.
|
|
functions:
|
|
type: array
|
|
items:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) List of functions to use.
|
|
logit_bias:
|
|
type: object
|
|
additionalProperties:
|
|
type: number
|
|
description: (Optional) The logit bias to use.
|
|
logprobs:
|
|
type: boolean
|
|
description: (Optional) The log probabilities to use.
|
|
max_completion_tokens:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The maximum number of tokens to generate.
|
|
max_tokens:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The maximum number of tokens to generate.
|
|
n:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The number of completions to generate.
|
|
parallel_tool_calls:
|
|
type: boolean
|
|
description: >-
|
|
(Optional) Whether to parallelize tool calls.
|
|
presence_penalty:
|
|
type: number
|
|
description: >-
|
|
(Optional) The penalty for repeated tokens.
|
|
response_format:
|
|
$ref: '#/components/schemas/OpenAIResponseFormatParam'
|
|
description: (Optional) The response format to use.
|
|
seed:
|
|
type: integer
|
|
description: (Optional) The seed to use.
|
|
stop:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
type: string
|
|
description: (Optional) The stop tokens to use.
|
|
stream:
|
|
type: boolean
|
|
description: >-
|
|
(Optional) Whether to stream the response.
|
|
stream_options:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) The stream options to use.
|
|
temperature:
|
|
type: number
|
|
description: (Optional) The temperature to use.
|
|
tool_choice:
|
|
oneOf:
|
|
- type: string
|
|
- type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) The tool choice to use.
|
|
tools:
|
|
type: array
|
|
items:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) The tools to use.
|
|
top_logprobs:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The top log probabilities to use.
|
|
top_p:
|
|
type: number
|
|
description: (Optional) The top p to use.
|
|
user:
|
|
type: string
|
|
description: (Optional) The user to use.
|
|
additionalProperties: false
|
|
required:
|
|
- model
|
|
- messages
|
|
title: OpenAIChatCompletionRequestWithExtraBody
|
|
description: >-
|
|
Request parameters for OpenAI-compatible chat completion endpoint.
|
|
OpenAIChatCompletion:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: The ID of the chat completion
|
|
choices:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChoice'
|
|
description: List of choices
|
|
object:
|
|
type: string
|
|
const: chat.completion
|
|
default: chat.completion
|
|
description: >-
|
|
The object type, which will be "chat.completion"
|
|
created:
|
|
type: integer
|
|
description: >-
|
|
The Unix timestamp in seconds when the chat completion was created
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The model that was used to generate the chat completion
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsage'
|
|
description: >-
|
|
Token usage information for the completion
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- choices
|
|
- object
|
|
- created
|
|
- model
|
|
title: OpenAIChatCompletion
|
|
description: >-
|
|
Response from an OpenAI-compatible chat completion request.
|
|
OpenAIChatCompletionChunk:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: The ID of the chat completion
|
|
choices:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChunkChoice'
|
|
description: List of choices
|
|
object:
|
|
type: string
|
|
const: chat.completion.chunk
|
|
default: chat.completion.chunk
|
|
description: >-
|
|
The object type, which will be "chat.completion.chunk"
|
|
created:
|
|
type: integer
|
|
description: >-
|
|
The Unix timestamp in seconds when the chat completion was created
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The model that was used to generate the chat completion
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsage'
|
|
description: >-
|
|
Token usage information (typically included in final chunk with stream_options)
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- choices
|
|
- object
|
|
- created
|
|
- model
|
|
title: OpenAIChatCompletionChunk
|
|
description: >-
|
|
Chunk from a streaming response to an OpenAI-compatible chat completion request.
|
|
OpenAIChoiceDelta:
|
|
type: object
|
|
properties:
|
|
content:
|
|
type: string
|
|
description: (Optional) The content of the delta
|
|
refusal:
|
|
type: string
|
|
description: (Optional) The refusal of the delta
|
|
role:
|
|
type: string
|
|
description: (Optional) The role of the delta
|
|
tool_calls:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCall'
|
|
description: (Optional) The tool calls of the delta
|
|
reasoning_content:
|
|
type: string
|
|
description: >-
|
|
(Optional) The reasoning content from the model (non-standard, for o1/o3
|
|
models)
|
|
additionalProperties: false
|
|
title: OpenAIChoiceDelta
|
|
description: >-
|
|
A delta from an OpenAI-compatible chat completion streaming response.
|
|
OpenAIChunkChoice:
|
|
type: object
|
|
properties:
|
|
delta:
|
|
$ref: '#/components/schemas/OpenAIChoiceDelta'
|
|
description: The delta from the chunk
|
|
finish_reason:
|
|
type: string
|
|
description: The reason the model stopped generating
|
|
index:
|
|
type: integer
|
|
description: The index of the choice
|
|
logprobs:
|
|
$ref: '#/components/schemas/OpenAIChoiceLogprobs'
|
|
description: >-
|
|
(Optional) The log probabilities for the tokens in the message
|
|
additionalProperties: false
|
|
required:
|
|
- delta
|
|
- finish_reason
|
|
- index
|
|
title: OpenAIChunkChoice
|
|
description: >-
|
|
A chunk choice from an OpenAI-compatible chat completion streaming response.
|
|
OpenAICompletionWithInputMessages:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: The ID of the chat completion
|
|
choices:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChoice'
|
|
description: List of choices
|
|
object:
|
|
type: string
|
|
const: chat.completion
|
|
default: chat.completion
|
|
description: >-
|
|
The object type, which will be "chat.completion"
|
|
created:
|
|
type: integer
|
|
description: >-
|
|
The Unix timestamp in seconds when the chat completion was created
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The model that was used to generate the chat completion
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsage'
|
|
description: >-
|
|
Token usage information for the completion
|
|
input_messages:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIMessageParam'
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- choices
|
|
- object
|
|
- created
|
|
- model
|
|
- input_messages
|
|
title: OpenAICompletionWithInputMessages
|
|
OpenAICompletionRequestWithExtraBody:
|
|
type: object
|
|
properties:
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The identifier of the model to use. The model must be registered with
|
|
Llama Stack and available via the /models endpoint.
|
|
prompt:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
type: string
|
|
- type: array
|
|
items:
|
|
type: integer
|
|
- type: array
|
|
items:
|
|
type: array
|
|
items:
|
|
type: integer
|
|
description: The prompt to generate a completion for.
|
|
best_of:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The number of completions to generate.
|
|
echo:
|
|
type: boolean
|
|
description: (Optional) Whether to echo the prompt.
|
|
frequency_penalty:
|
|
type: number
|
|
description: >-
|
|
(Optional) The penalty for repeated tokens.
|
|
logit_bias:
|
|
type: object
|
|
additionalProperties:
|
|
type: number
|
|
description: (Optional) The logit bias to use.
|
|
logprobs:
|
|
type: boolean
|
|
description: (Optional) The log probabilities to use.
|
|
max_tokens:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The maximum number of tokens to generate.
|
|
n:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The number of completions to generate.
|
|
presence_penalty:
|
|
type: number
|
|
description: >-
|
|
(Optional) The penalty for repeated tokens.
|
|
seed:
|
|
type: integer
|
|
description: (Optional) The seed to use.
|
|
stop:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
type: string
|
|
description: (Optional) The stop tokens to use.
|
|
stream:
|
|
type: boolean
|
|
description: >-
|
|
(Optional) Whether to stream the response.
|
|
stream_options:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) The stream options to use.
|
|
temperature:
|
|
type: number
|
|
description: (Optional) The temperature to use.
|
|
top_p:
|
|
type: number
|
|
description: (Optional) The top p to use.
|
|
user:
|
|
type: string
|
|
description: (Optional) The user to use.
|
|
suffix:
|
|
type: string
|
|
description: >-
|
|
(Optional) The suffix that should be appended to the completion.
|
|
additionalProperties: false
|
|
required:
|
|
- model
|
|
- prompt
|
|
title: OpenAICompletionRequestWithExtraBody
|
|
description: >-
|
|
Request parameters for OpenAI-compatible completion endpoint.
|
|
OpenAICompletion:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
choices:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAICompletionChoice'
|
|
created:
|
|
type: integer
|
|
model:
|
|
type: string
|
|
object:
|
|
type: string
|
|
const: text_completion
|
|
default: text_completion
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- choices
|
|
- created
|
|
- model
|
|
- object
|
|
title: OpenAICompletion
|
|
description: >-
|
|
Response from an OpenAI-compatible completion request.
|
|
OpenAICompletionChoice:
|
|
type: object
|
|
properties:
|
|
finish_reason:
|
|
type: string
|
|
text:
|
|
type: string
|
|
index:
|
|
type: integer
|
|
logprobs:
|
|
$ref: '#/components/schemas/OpenAIChoiceLogprobs'
|
|
additionalProperties: false
|
|
required:
|
|
- finish_reason
|
|
- text
|
|
- index
|
|
title: OpenAICompletionChoice
|
|
description: >-
|
|
A choice from an OpenAI-compatible completion response.
|
|
ConversationItem:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
function_call_output: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
mcp_approval_response: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
OpenAIResponseAnnotationCitation:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: url_citation
|
|
default: url_citation
|
|
description: >-
|
|
Annotation type identifier, always "url_citation"
|
|
end_index:
|
|
type: integer
|
|
description: >-
|
|
End position of the citation span in the content
|
|
start_index:
|
|
type: integer
|
|
description: >-
|
|
Start position of the citation span in the content
|
|
title:
|
|
type: string
|
|
description: Title of the referenced web resource
|
|
url:
|
|
type: string
|
|
description: URL of the referenced web resource
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- end_index
|
|
- start_index
|
|
- title
|
|
- url
|
|
title: OpenAIResponseAnnotationCitation
|
|
description: >-
|
|
URL citation annotation for referencing external web resources.
|
|
"OpenAIResponseAnnotationContainerFileCitation":
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: container_file_citation
|
|
default: container_file_citation
|
|
container_id:
|
|
type: string
|
|
end_index:
|
|
type: integer
|
|
file_id:
|
|
type: string
|
|
filename:
|
|
type: string
|
|
start_index:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- container_id
|
|
- end_index
|
|
- file_id
|
|
- filename
|
|
- start_index
|
|
title: >-
|
|
OpenAIResponseAnnotationContainerFileCitation
|
|
OpenAIResponseAnnotationFileCitation:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file_citation
|
|
default: file_citation
|
|
description: >-
|
|
Annotation type identifier, always "file_citation"
|
|
file_id:
|
|
type: string
|
|
description: Unique identifier of the referenced file
|
|
filename:
|
|
type: string
|
|
description: Name of the referenced file
|
|
index:
|
|
type: integer
|
|
description: >-
|
|
Position index of the citation within the content
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- file_id
|
|
- filename
|
|
- index
|
|
title: OpenAIResponseAnnotationFileCitation
|
|
description: >-
|
|
File citation annotation for referencing specific files in response content.
|
|
OpenAIResponseAnnotationFilePath:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file_path
|
|
default: file_path
|
|
file_id:
|
|
type: string
|
|
index:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- file_id
|
|
- index
|
|
title: OpenAIResponseAnnotationFilePath
|
|
OpenAIResponseAnnotations:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
|
|
url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
|
|
container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
|
|
file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
|
|
OpenAIResponseContentPartRefusal:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: refusal
|
|
default: refusal
|
|
description: >-
|
|
Content part type identifier, always "refusal"
|
|
refusal:
|
|
type: string
|
|
description: Refusal text supplied by the model
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- refusal
|
|
title: OpenAIResponseContentPartRefusal
|
|
description: >-
|
|
Refusal content within a streamed response part.
|
|
"OpenAIResponseInputFunctionToolCallOutput":
|
|
type: object
|
|
properties:
|
|
call_id:
|
|
type: string
|
|
output:
|
|
type: string
|
|
type:
|
|
type: string
|
|
const: function_call_output
|
|
default: function_call_output
|
|
id:
|
|
type: string
|
|
status:
|
|
type: string
|
|
additionalProperties: false
|
|
required:
|
|
- call_id
|
|
- output
|
|
- type
|
|
title: >-
|
|
OpenAIResponseInputFunctionToolCallOutput
|
|
description: >-
|
|
This represents the output of a function call that gets passed back to the
|
|
model.
|
|
OpenAIResponseInputMessageContent:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
OpenAIResponseInputMessageContentFile:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: input_file
|
|
default: input_file
|
|
description: >-
|
|
The type of the input item. Always `input_file`.
|
|
file_data:
|
|
type: string
|
|
description: >-
|
|
The data of the file to be sent to the model.
|
|
file_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) The ID of the file to be sent to the model.
|
|
file_url:
|
|
type: string
|
|
description: >-
|
|
The URL of the file to be sent to the model.
|
|
filename:
|
|
type: string
|
|
description: >-
|
|
The name of the file to be sent to the model.
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: OpenAIResponseInputMessageContentFile
|
|
description: >-
|
|
File content for input messages in OpenAI response format.
|
|
OpenAIResponseInputMessageContentImage:
|
|
type: object
|
|
properties:
|
|
detail:
|
|
oneOf:
|
|
- type: string
|
|
const: low
|
|
- type: string
|
|
const: high
|
|
- type: string
|
|
const: auto
|
|
default: auto
|
|
description: >-
|
|
Level of detail for image processing, can be "low", "high", or "auto"
|
|
type:
|
|
type: string
|
|
const: input_image
|
|
default: input_image
|
|
description: >-
|
|
Content type identifier, always "input_image"
|
|
file_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) The ID of the file to be sent to the model.
|
|
image_url:
|
|
type: string
|
|
description: (Optional) URL of the image content
|
|
additionalProperties: false
|
|
required:
|
|
- detail
|
|
- type
|
|
title: OpenAIResponseInputMessageContentImage
|
|
description: >-
|
|
Image content for input messages in OpenAI response format.
|
|
OpenAIResponseInputMessageContentText:
|
|
type: object
|
|
properties:
|
|
text:
|
|
type: string
|
|
description: The text content of the input message
|
|
type:
|
|
type: string
|
|
const: input_text
|
|
default: input_text
|
|
description: >-
|
|
Content type identifier, always "input_text"
|
|
additionalProperties: false
|
|
required:
|
|
- text
|
|
- type
|
|
title: OpenAIResponseInputMessageContentText
|
|
description: >-
|
|
Text content for input messages in OpenAI response format.
|
|
OpenAIResponseMCPApprovalRequest:
|
|
type: object
|
|
properties:
|
|
arguments:
|
|
type: string
|
|
id:
|
|
type: string
|
|
name:
|
|
type: string
|
|
server_label:
|
|
type: string
|
|
type:
|
|
type: string
|
|
const: mcp_approval_request
|
|
default: mcp_approval_request
|
|
additionalProperties: false
|
|
required:
|
|
- arguments
|
|
- id
|
|
- name
|
|
- server_label
|
|
- type
|
|
title: OpenAIResponseMCPApprovalRequest
|
|
description: >-
|
|
A request for human approval of a tool invocation.
|
|
OpenAIResponseMCPApprovalResponse:
|
|
type: object
|
|
properties:
|
|
approval_request_id:
|
|
type: string
|
|
approve:
|
|
type: boolean
|
|
type:
|
|
type: string
|
|
const: mcp_approval_response
|
|
default: mcp_approval_response
|
|
id:
|
|
type: string
|
|
reason:
|
|
type: string
|
|
additionalProperties: false
|
|
required:
|
|
- approval_request_id
|
|
- approve
|
|
- type
|
|
title: OpenAIResponseMCPApprovalResponse
|
|
description: A response to an MCP approval request.
|
|
OpenAIResponseMessage:
|
|
type: object
|
|
properties:
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseOutputMessageContent'
|
|
role:
|
|
oneOf:
|
|
- type: string
|
|
const: system
|
|
- type: string
|
|
const: developer
|
|
- type: string
|
|
const: user
|
|
- type: string
|
|
const: assistant
|
|
type:
|
|
type: string
|
|
const: message
|
|
default: message
|
|
id:
|
|
type: string
|
|
status:
|
|
type: string
|
|
additionalProperties: false
|
|
required:
|
|
- content
|
|
- role
|
|
- type
|
|
title: OpenAIResponseMessage
|
|
description: >-
|
|
Corresponds to the various Message types in the Responses API. They are all
|
|
under one type because the Responses API gives them all the same "type" value,
|
|
and there is no way to tell them apart in certain scenarios.
|
|
OpenAIResponseOutputMessageContent:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
output_text: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
refusal: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
"OpenAIResponseOutputMessageContentOutputText":
|
|
type: object
|
|
properties:
|
|
text:
|
|
type: string
|
|
type:
|
|
type: string
|
|
const: output_text
|
|
default: output_text
|
|
annotations:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseAnnotations'
|
|
additionalProperties: false
|
|
required:
|
|
- text
|
|
- type
|
|
- annotations
|
|
title: >-
|
|
OpenAIResponseOutputMessageContentOutputText
|
|
"OpenAIResponseOutputMessageFileSearchToolCall":
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: Unique identifier for this tool call
|
|
queries:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: List of search queries executed
|
|
status:
|
|
type: string
|
|
description: >-
|
|
Current status of the file search operation
|
|
type:
|
|
type: string
|
|
const: file_search_call
|
|
default: file_search_call
|
|
description: >-
|
|
Tool call type identifier, always "file_search_call"
|
|
results:
|
|
type: array
|
|
items:
|
|
type: object
|
|
properties:
|
|
attributes:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) Key-value attributes associated with the file
|
|
file_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the file containing the result
|
|
filename:
|
|
type: string
|
|
description: Name of the file containing the result
|
|
score:
|
|
type: number
|
|
description: >-
|
|
Relevance score for this search result (between 0 and 1)
|
|
text:
|
|
type: string
|
|
description: Text content of the search result
|
|
additionalProperties: false
|
|
required:
|
|
- attributes
|
|
- file_id
|
|
- filename
|
|
- score
|
|
- text
|
|
title: >-
|
|
OpenAIResponseOutputMessageFileSearchToolCallResults
|
|
description: >-
|
|
Search results returned by the file search operation.
|
|
description: >-
|
|
(Optional) Search results returned by the file search operation
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- queries
|
|
- status
|
|
- type
|
|
title: >-
|
|
OpenAIResponseOutputMessageFileSearchToolCall
|
|
description: >-
|
|
File search tool call output message for OpenAI responses.
|
|
"OpenAIResponseOutputMessageFunctionToolCall":
|
|
type: object
|
|
properties:
|
|
call_id:
|
|
type: string
|
|
description: Unique identifier for the function call
|
|
name:
|
|
type: string
|
|
description: Name of the function being called
|
|
arguments:
|
|
type: string
|
|
description: >-
|
|
JSON string containing the function arguments
|
|
type:
|
|
type: string
|
|
const: function_call
|
|
default: function_call
|
|
description: >-
|
|
Tool call type identifier, always "function_call"
|
|
id:
|
|
type: string
|
|
description: >-
|
|
(Optional) Additional identifier for the tool call
|
|
status:
|
|
type: string
|
|
description: >-
|
|
(Optional) Current status of the function call execution
|
|
additionalProperties: false
|
|
required:
|
|
- call_id
|
|
- name
|
|
- arguments
|
|
- type
|
|
title: >-
|
|
OpenAIResponseOutputMessageFunctionToolCall
|
|
description: >-
|
|
Function tool call output message for OpenAI responses.
|
|
OpenAIResponseOutputMessageMCPCall:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: Unique identifier for this MCP call
|
|
type:
|
|
type: string
|
|
const: mcp_call
|
|
default: mcp_call
|
|
description: >-
|
|
Tool call type identifier, always "mcp_call"
|
|
arguments:
|
|
type: string
|
|
description: >-
|
|
JSON string containing the MCP call arguments
|
|
name:
|
|
type: string
|
|
description: Name of the MCP method being called
|
|
server_label:
|
|
type: string
|
|
description: >-
|
|
Label identifying the MCP server handling the call
|
|
error:
|
|
type: string
|
|
description: >-
|
|
(Optional) Error message if the MCP call failed
|
|
output:
|
|
type: string
|
|
description: >-
|
|
(Optional) Output result from the successful MCP call
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- type
|
|
- arguments
|
|
- name
|
|
- server_label
|
|
title: OpenAIResponseOutputMessageMCPCall
|
|
description: >-
|
|
Model Context Protocol (MCP) call output message for OpenAI responses.
|
|
OpenAIResponseOutputMessageMCPListTools:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier for this MCP list tools operation
|
|
type:
|
|
type: string
|
|
const: mcp_list_tools
|
|
default: mcp_list_tools
|
|
description: >-
|
|
Tool call type identifier, always "mcp_list_tools"
|
|
server_label:
|
|
type: string
|
|
description: >-
|
|
Label identifying the MCP server providing the tools
|
|
tools:
|
|
type: array
|
|
items:
|
|
type: object
|
|
properties:
|
|
input_schema:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
JSON schema defining the tool's input parameters
|
|
name:
|
|
type: string
|
|
description: Name of the tool
|
|
description:
|
|
type: string
|
|
description: >-
|
|
(Optional) Description of what the tool does
|
|
additionalProperties: false
|
|
required:
|
|
- input_schema
|
|
- name
|
|
title: MCPListToolsTool
|
|
description: >-
|
|
Tool definition returned by MCP list tools operation.
|
|
description: >-
|
|
List of available tools provided by the MCP server
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- type
|
|
- server_label
|
|
- tools
|
|
title: OpenAIResponseOutputMessageMCPListTools
|
|
description: >-
|
|
MCP list tools output message containing available tools from an MCP server.
|
|
"OpenAIResponseOutputMessageWebSearchToolCall":
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: Unique identifier for this tool call
|
|
status:
|
|
type: string
|
|
description: >-
|
|
Current status of the web search operation
|
|
type:
|
|
type: string
|
|
const: web_search_call
|
|
default: web_search_call
|
|
description: >-
|
|
Tool call type identifier, always "web_search_call"
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- status
|
|
- type
|
|
title: >-
|
|
OpenAIResponseOutputMessageWebSearchToolCall
|
|
description: >-
|
|
Web search tool call output message for OpenAI responses.
|
|
CreateConversationRequest:
|
|
type: object
|
|
properties:
|
|
items:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ConversationItem'
|
|
description: >-
|
|
Initial items to include in the conversation context.
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
type: string
|
|
description: >-
|
|
Set of key-value pairs that can be attached to an object.
|
|
additionalProperties: false
|
|
title: CreateConversationRequest
|
|
Conversation:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
object:
|
|
type: string
|
|
const: conversation
|
|
default: conversation
|
|
created_at:
|
|
type: integer
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
type: string
|
|
items:
|
|
type: array
|
|
items:
|
|
type: object
|
|
title: dict
|
|
description: >-
|
|
dict() -> new empty dictionary dict(mapping) -> new dictionary initialized
|
|
from a mapping object's (key, value) pairs dict(iterable) -> new
|
|
dictionary initialized as if via: d = {} for k, v in iterable: d[k]
|
|
= v dict(**kwargs) -> new dictionary initialized with the name=value
|
|
pairs in the keyword argument list. For example: dict(one=1, two=2)
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- object
|
|
- created_at
|
|
title: Conversation
|
|
description: OpenAI-compatible conversation object.
|
|
UpdateConversationRequest:
|
|
type: object
|
|
properties:
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
type: string
|
|
description: >-
|
|
Set of key-value pairs that can be attached to an object.
|
|
additionalProperties: false
|
|
required:
|
|
- metadata
|
|
title: UpdateConversationRequest
|
|
ConversationDeletedResource:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
object:
|
|
type: string
|
|
default: conversation.deleted
|
|
deleted:
|
|
type: boolean
|
|
default: true
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- object
|
|
- deleted
|
|
title: ConversationDeletedResource
|
|
description: Response for deleted conversation.
|
|
ConversationItemList:
|
|
type: object
|
|
properties:
|
|
object:
|
|
type: string
|
|
default: list
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ConversationItem'
|
|
first_id:
|
|
type: string
|
|
last_id:
|
|
type: string
|
|
has_more:
|
|
type: boolean
|
|
default: false
|
|
additionalProperties: false
|
|
required:
|
|
- object
|
|
- data
|
|
- has_more
|
|
title: ConversationItemList
|
|
description: >-
|
|
List of conversation items with pagination.
|
|
AddItemsRequest:
|
|
type: object
|
|
properties:
|
|
items:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ConversationItem'
|
|
description: >-
|
|
Items to include in the conversation context.
|
|
additionalProperties: false
|
|
required:
|
|
- items
|
|
title: AddItemsRequest
|
|
ConversationItemDeletedResource:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
object:
|
|
type: string
|
|
default: conversation.item.deleted
|
|
deleted:
|
|
type: boolean
|
|
default: true
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- object
|
|
- deleted
|
|
title: ConversationItemDeletedResource
|
|
description: Response for deleted conversation item.
|
|
OpenAIEmbeddingsRequestWithExtraBody:
|
|
type: object
|
|
properties:
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The identifier of the model to use. The model must be an embedding model
|
|
registered with Llama Stack and available via the /models endpoint.
|
|
input:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
Input text to embed, encoded as a string or array of strings. To embed
|
|
multiple inputs in a single request, pass an array of strings.
|
|
encoding_format:
|
|
type: string
|
|
default: float
|
|
description: >-
|
|
(Optional) The format to return the embeddings in. Can be either "float"
|
|
or "base64". Defaults to "float".
|
|
dimensions:
|
|
type: integer
|
|
description: >-
|
|
(Optional) The number of dimensions the resulting output embeddings should
|
|
have. Only supported in text-embedding-3 and later models.
|
|
user:
|
|
type: string
|
|
description: >-
|
|
(Optional) A unique identifier representing your end-user, which can help
|
|
OpenAI to monitor and detect abuse.
|
|
additionalProperties: false
|
|
required:
|
|
- model
|
|
- input
|
|
title: OpenAIEmbeddingsRequestWithExtraBody
|
|
description: >-
|
|
Request parameters for OpenAI-compatible embeddings endpoint.
|
|
OpenAIEmbeddingData:
|
|
type: object
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: embedding
|
|
default: embedding
|
|
description: >-
|
|
The object type, which will be "embedding"
|
|
embedding:
|
|
oneOf:
|
|
- type: array
|
|
items:
|
|
type: number
|
|
- type: string
|
|
description: >-
|
|
The embedding vector as a list of floats (when encoding_format="float")
|
|
or as a base64-encoded string (when encoding_format="base64")
|
|
index:
|
|
type: integer
|
|
description: >-
|
|
The index of the embedding in the input list
|
|
additionalProperties: false
|
|
required:
|
|
- object
|
|
- embedding
|
|
- index
|
|
title: OpenAIEmbeddingData
|
|
description: >-
|
|
A single embedding data object from an OpenAI-compatible embeddings response.
|
|
OpenAIEmbeddingUsage:
|
|
type: object
|
|
properties:
|
|
prompt_tokens:
|
|
type: integer
|
|
description: The number of tokens in the input
|
|
total_tokens:
|
|
type: integer
|
|
description: The total number of tokens used
|
|
additionalProperties: false
|
|
required:
|
|
- prompt_tokens
|
|
- total_tokens
|
|
title: OpenAIEmbeddingUsage
|
|
description: >-
|
|
Usage information for an OpenAI-compatible embeddings response.
|
|
OpenAIEmbeddingsResponse:
|
|
type: object
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: list
|
|
default: list
|
|
description: The object type, which will be "list"
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIEmbeddingData'
|
|
description: List of embedding data objects
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The model that was used to generate the embeddings
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIEmbeddingUsage'
|
|
description: Usage information
|
|
additionalProperties: false
|
|
required:
|
|
- object
|
|
- data
|
|
- model
|
|
- usage
|
|
title: OpenAIEmbeddingsResponse
|
|
description: >-
|
|
Response from an OpenAI-compatible embeddings request.
|
|
OpenAIFilePurpose:
|
|
type: string
|
|
enum:
|
|
- assistants
|
|
- batch
|
|
title: OpenAIFilePurpose
|
|
description: >-
|
|
Valid purpose values for OpenAI Files API.
|
|
ListOpenAIFileResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIFileObject'
|
|
description: List of file objects
|
|
has_more:
|
|
type: boolean
|
|
description: >-
|
|
Whether there are more files available beyond this page
|
|
first_id:
|
|
type: string
|
|
description: >-
|
|
ID of the first file in the list for pagination
|
|
last_id:
|
|
type: string
|
|
description: >-
|
|
ID of the last file in the list for pagination
|
|
object:
|
|
type: string
|
|
const: list
|
|
default: list
|
|
description: The object type, which is always "list"
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
- has_more
|
|
- first_id
|
|
- last_id
|
|
- object
|
|
title: ListOpenAIFileResponse
|
|
description: >-
|
|
Response for listing files in OpenAI Files API.
|
|
OpenAIFileObject:
|
|
type: object
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: file
|
|
default: file
|
|
description: The object type, which is always "file"
|
|
id:
|
|
type: string
|
|
description: >-
|
|
The file identifier, which can be referenced in the API endpoints
|
|
bytes:
|
|
type: integer
|
|
description: The size of the file, in bytes
|
|
created_at:
|
|
type: integer
|
|
description: >-
|
|
The Unix timestamp (in seconds) for when the file was created
|
|
expires_at:
|
|
type: integer
|
|
description: >-
|
|
The Unix timestamp (in seconds) for when the file expires
|
|
filename:
|
|
type: string
|
|
description: The name of the file
|
|
purpose:
|
|
type: string
|
|
enum:
|
|
- assistants
|
|
- batch
|
|
description: The intended purpose of the file
|
|
additionalProperties: false
|
|
required:
|
|
- object
|
|
- id
|
|
- bytes
|
|
- created_at
|
|
- expires_at
|
|
- filename
|
|
- purpose
|
|
title: OpenAIFileObject
|
|
description: >-
|
|
OpenAI File object as defined in the OpenAI Files API.
|
|
ExpiresAfter:
|
|
type: object
|
|
properties:
|
|
anchor:
|
|
type: string
|
|
const: created_at
|
|
seconds:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- anchor
|
|
- seconds
|
|
title: ExpiresAfter
|
|
description: >-
|
|
Control expiration of uploaded files.
|
|
|
|
Params:
|
|
- anchor, must be "created_at"
|
|
- seconds, must be int between 3600 and 2592000 (1 hour to 30 days)
|
|
OpenAIFileDeleteResponse:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: The file identifier that was deleted
|
|
object:
|
|
type: string
|
|
const: file
|
|
default: file
|
|
description: The object type, which is always "file"
|
|
deleted:
|
|
type: boolean
|
|
description: >-
|
|
Whether the file was successfully deleted
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- object
|
|
- deleted
|
|
title: OpenAIFileDeleteResponse
|
|
description: >-
|
|
Response for deleting a file in OpenAI Files API.
|
|
Response:
|
|
type: object
|
|
title: Response
|
|
HealthInfo:
|
|
type: object
|
|
properties:
|
|
status:
|
|
type: string
|
|
enum:
|
|
- OK
|
|
- Error
|
|
- Not Implemented
|
|
description: Current health status of the service
|
|
additionalProperties: false
|
|
required:
|
|
- status
|
|
title: HealthInfo
|
|
description: >-
|
|
Health status information for the service.
|
|
RouteInfo:
|
|
type: object
|
|
properties:
|
|
route:
|
|
type: string
|
|
description: The API endpoint path
|
|
method:
|
|
type: string
|
|
description: HTTP method for the route
|
|
provider_types:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
List of provider types that implement this route
|
|
additionalProperties: false
|
|
required:
|
|
- route
|
|
- method
|
|
- provider_types
|
|
title: RouteInfo
|
|
description: >-
|
|
Information about an API route including its path, method, and implementing
|
|
providers.
|
|
ListRoutesResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/RouteInfo'
|
|
description: >-
|
|
List of available route information objects
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: ListRoutesResponse
|
|
description: >-
|
|
Response containing a list of all available API routes.
|
|
OpenAIModel:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
object:
|
|
type: string
|
|
const: model
|
|
default: model
|
|
created:
|
|
type: integer
|
|
owned_by:
|
|
type: string
|
|
custom_metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- object
|
|
- created
|
|
- owned_by
|
|
title: OpenAIModel
|
|
description: A model from OpenAI.
|
|
OpenAIListModelsResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIModel'
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: OpenAIListModelsResponse
|
|
ModelType:
|
|
type: string
|
|
enum:
|
|
- llm
|
|
- embedding
|
|
- rerank
|
|
title: ModelType
|
|
description: >-
|
|
Enumeration of supported model types in Llama Stack.
|
|
RegisterModelRequest:
|
|
type: object
|
|
properties:
|
|
model_id:
|
|
type: string
|
|
description: The identifier of the model to register.
|
|
provider_model_id:
|
|
type: string
|
|
description: >-
|
|
The identifier of the model in the provider.
|
|
provider_id:
|
|
type: string
|
|
description: The identifier of the provider.
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: Any additional metadata for this model.
|
|
model_type:
|
|
$ref: '#/components/schemas/ModelType'
|
|
description: The type of model to register.
|
|
additionalProperties: false
|
|
required:
|
|
- model_id
|
|
title: RegisterModelRequest
|
|
Model:
|
|
type: object
|
|
properties:
|
|
identifier:
|
|
type: string
|
|
description: >-
|
|
Unique identifier for this resource in llama stack
|
|
provider_resource_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier for this resource in the provider
|
|
provider_id:
|
|
type: string
|
|
description: >-
|
|
ID of the provider that owns this resource
|
|
type:
|
|
type: string
|
|
enum:
|
|
- model
|
|
- shield
|
|
- vector_store
|
|
- dataset
|
|
- scoring_function
|
|
- benchmark
|
|
- tool
|
|
- tool_group
|
|
- prompt
|
|
const: model
|
|
default: model
|
|
description: >-
|
|
The resource type, always 'model' for model resources
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: Any additional metadata for this model
|
|
model_type:
|
|
$ref: '#/components/schemas/ModelType'
|
|
default: llm
|
|
description: >-
|
|
The type of model (LLM or embedding model)
|
|
additionalProperties: false
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
- type
|
|
- metadata
|
|
- model_type
|
|
title: Model
|
|
description: >-
|
|
A model resource representing an AI model registered in Llama Stack.
|
|
RunModerationRequest:
|
|
type: object
|
|
properties:
|
|
input:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
Input (or inputs) to classify. Can be a single string, an array of strings,
|
|
or an array of multi-modal input objects similar to other models.
|
|
model:
|
|
type: string
|
|
description: >-
|
|
(Optional) The content moderation model you would like to use.
|
|
additionalProperties: false
|
|
required:
|
|
- input
|
|
title: RunModerationRequest
|
|
ModerationObject:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: >-
|
|
The unique identifier for the moderation request.
|
|
model:
|
|
type: string
|
|
description: >-
|
|
The model used to generate the moderation results.
|
|
results:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ModerationObjectResults'
|
|
description: A list of moderation objects
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- model
|
|
- results
|
|
title: ModerationObject
|
|
description: A moderation object.
|
|
ModerationObjectResults:
|
|
type: object
|
|
properties:
|
|
flagged:
|
|
type: boolean
|
|
description: >-
|
|
Whether any of the below categories are flagged.
|
|
categories:
|
|
type: object
|
|
additionalProperties:
|
|
type: boolean
|
|
description: >-
|
|
A list of the categories, and whether they are flagged or not.
|
|
category_applied_input_types:
|
|
type: object
|
|
additionalProperties:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
A list of the categories along with the input type(s) that the score applies
|
|
to.
|
|
category_scores:
|
|
type: object
|
|
additionalProperties:
|
|
type: number
|
|
description: >-
|
|
A list of the categories along with their scores as predicted by model.
|
|
user_message:
|
|
type: string
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
additionalProperties: false
|
|
required:
|
|
- flagged
|
|
- metadata
|
|
title: ModerationObjectResults
|
|
description: A moderation object.
|
|
Prompt:
|
|
type: object
|
|
properties:
|
|
prompt:
|
|
type: string
|
|
description: >-
|
|
The system prompt text with variable placeholders. Variables are only
|
|
supported when using the Responses API.
|
|
version:
|
|
type: integer
|
|
description: >-
|
|
Version (integer starting at 1, incremented on save)
|
|
prompt_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier formatted as 'pmpt_<48-digit-hash>'
|
|
variables:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
List of prompt variable names that can be used in the prompt template
|
|
is_default:
|
|
type: boolean
|
|
default: false
|
|
description: >-
|
|
Boolean indicating whether this version is the default version for this
|
|
prompt
|
|
additionalProperties: false
|
|
required:
|
|
- version
|
|
- prompt_id
|
|
- variables
|
|
- is_default
|
|
title: Prompt
|
|
description: >-
|
|
A prompt resource representing a stored OpenAI Compatible prompt template
|
|
in Llama Stack.
|
|
ListPromptsResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/Prompt'
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: ListPromptsResponse
|
|
description: Response model to list prompts.
|
|
CreatePromptRequest:
|
|
type: object
|
|
properties:
|
|
prompt:
|
|
type: string
|
|
description: >-
|
|
The prompt text content with variable placeholders.
|
|
variables:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
List of variable names that can be used in the prompt template.
|
|
additionalProperties: false
|
|
required:
|
|
- prompt
|
|
title: CreatePromptRequest
|
|
UpdatePromptRequest:
|
|
type: object
|
|
properties:
|
|
prompt:
|
|
type: string
|
|
description: The updated prompt text content.
|
|
version:
|
|
type: integer
|
|
description: >-
|
|
The current version of the prompt being updated.
|
|
variables:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
Updated list of variable names that can be used in the prompt template.
|
|
set_as_default:
|
|
type: boolean
|
|
description: >-
|
|
Set the new version as the default (default=True).
|
|
additionalProperties: false
|
|
required:
|
|
- prompt
|
|
- version
|
|
- set_as_default
|
|
title: UpdatePromptRequest
|
|
SetDefaultVersionRequest:
|
|
type: object
|
|
properties:
|
|
version:
|
|
type: integer
|
|
description: The version to set as default.
|
|
additionalProperties: false
|
|
required:
|
|
- version
|
|
title: SetDefaultVersionRequest
|
|
ProviderInfo:
|
|
type: object
|
|
properties:
|
|
api:
|
|
type: string
|
|
description: The API name this provider implements
|
|
provider_id:
|
|
type: string
|
|
description: Unique identifier for the provider
|
|
provider_type:
|
|
type: string
|
|
description: The type of provider implementation
|
|
config:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
Configuration parameters for the provider
|
|
health:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: Current health status of the provider
|
|
additionalProperties: false
|
|
required:
|
|
- api
|
|
- provider_id
|
|
- provider_type
|
|
- config
|
|
- health
|
|
title: ProviderInfo
|
|
description: >-
|
|
Information about a registered provider including its configuration and health
|
|
status.
|
|
ListProvidersResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ProviderInfo'
|
|
description: List of provider information objects
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: ListProvidersResponse
|
|
description: >-
|
|
Response containing a list of all available providers.
|
|
ListOpenAIResponseObject:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseObjectWithInput'
|
|
description: >-
|
|
List of response objects with their input context
|
|
has_more:
|
|
type: boolean
|
|
description: >-
|
|
Whether there are more results available beyond this page
|
|
first_id:
|
|
type: string
|
|
description: >-
|
|
Identifier of the first item in this page
|
|
last_id:
|
|
type: string
|
|
description: Identifier of the last item in this page
|
|
object:
|
|
type: string
|
|
const: list
|
|
default: list
|
|
description: Object type identifier, always "list"
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
- has_more
|
|
- first_id
|
|
- last_id
|
|
- object
|
|
title: ListOpenAIResponseObject
|
|
description: >-
|
|
Paginated list of OpenAI response objects with navigation metadata.
|
|
OpenAIResponseError:
|
|
type: object
|
|
properties:
|
|
code:
|
|
type: string
|
|
description: >-
|
|
Error code identifying the type of failure
|
|
message:
|
|
type: string
|
|
description: >-
|
|
Human-readable error message describing the failure
|
|
additionalProperties: false
|
|
required:
|
|
- code
|
|
- message
|
|
title: OpenAIResponseError
|
|
description: >-
|
|
Error details for failed OpenAI response requests.
|
|
OpenAIResponseInput:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
OpenAIResponseInputToolFileSearch:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file_search
|
|
default: file_search
|
|
description: >-
|
|
Tool type identifier, always "file_search"
|
|
vector_store_ids:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
List of vector store identifiers to search within
|
|
filters:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) Additional filters to apply to the search
|
|
max_num_results:
|
|
type: integer
|
|
default: 10
|
|
description: >-
|
|
(Optional) Maximum number of search results to return (1-50)
|
|
ranking_options:
|
|
type: object
|
|
properties:
|
|
ranker:
|
|
type: string
|
|
description: >-
|
|
(Optional) Name of the ranking algorithm to use
|
|
score_threshold:
|
|
type: number
|
|
default: 0.0
|
|
description: >-
|
|
(Optional) Minimum relevance score threshold for results
|
|
additionalProperties: false
|
|
description: >-
|
|
(Optional) Options for ranking and scoring search results
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- vector_store_ids
|
|
title: OpenAIResponseInputToolFileSearch
|
|
description: >-
|
|
File search tool configuration for OpenAI response inputs.
|
|
OpenAIResponseInputToolFunction:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: function
|
|
default: function
|
|
description: Tool type identifier, always "function"
|
|
name:
|
|
type: string
|
|
description: Name of the function that can be called
|
|
description:
|
|
type: string
|
|
description: >-
|
|
(Optional) Description of what the function does
|
|
parameters:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) JSON schema defining the function's parameters
|
|
strict:
|
|
type: boolean
|
|
description: >-
|
|
(Optional) Whether to enforce strict parameter validation
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- name
|
|
title: OpenAIResponseInputToolFunction
|
|
description: >-
|
|
Function tool configuration for OpenAI response inputs.
|
|
OpenAIResponseInputToolWebSearch:
|
|
type: object
|
|
properties:
|
|
type:
|
|
oneOf:
|
|
- type: string
|
|
const: web_search
|
|
- type: string
|
|
const: web_search_preview
|
|
- type: string
|
|
const: web_search_preview_2025_03_11
|
|
default: web_search
|
|
description: Web search tool type variant to use
|
|
search_context_size:
|
|
type: string
|
|
default: medium
|
|
description: >-
|
|
(Optional) Size of search context, must be "low", "medium", or "high"
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: OpenAIResponseInputToolWebSearch
|
|
description: >-
|
|
Web search tool configuration for OpenAI response inputs.
|
|
OpenAIResponseObjectWithInput:
|
|
type: object
|
|
properties:
|
|
created_at:
|
|
type: integer
|
|
description: >-
|
|
Unix timestamp when the response was created
|
|
error:
|
|
$ref: '#/components/schemas/OpenAIResponseError'
|
|
description: >-
|
|
(Optional) Error details if the response generation failed
|
|
id:
|
|
type: string
|
|
description: Unique identifier for this response
|
|
model:
|
|
type: string
|
|
description: Model identifier used for generation
|
|
object:
|
|
type: string
|
|
const: response
|
|
default: response
|
|
description: >-
|
|
Object type identifier, always "response"
|
|
output:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseOutput'
|
|
description: >-
|
|
List of generated output items (messages, tool calls, etc.)
|
|
parallel_tool_calls:
|
|
type: boolean
|
|
default: false
|
|
description: >-
|
|
Whether tool calls can be executed in parallel
|
|
previous_response_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) ID of the previous response in a conversation
|
|
prompt:
|
|
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
|
description: >-
|
|
(Optional) Reference to a prompt template and its variables.
|
|
status:
|
|
type: string
|
|
description: >-
|
|
Current status of the response generation
|
|
temperature:
|
|
type: number
|
|
description: >-
|
|
(Optional) Sampling temperature used for generation
|
|
text:
|
|
$ref: '#/components/schemas/OpenAIResponseText'
|
|
description: >-
|
|
Text formatting configuration for the response
|
|
top_p:
|
|
type: number
|
|
description: >-
|
|
(Optional) Nucleus sampling parameter used for generation
|
|
tools:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
|
description: >-
|
|
(Optional) An array of tools the model may call while generating a response.
|
|
truncation:
|
|
type: string
|
|
description: >-
|
|
(Optional) Truncation strategy applied to the response
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIResponseUsage'
|
|
description: >-
|
|
(Optional) Token usage information for the response
|
|
instructions:
|
|
type: string
|
|
description: >-
|
|
(Optional) System message inserted into the model's context
|
|
input:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
description: >-
|
|
List of input items that led to this response
|
|
additionalProperties: false
|
|
required:
|
|
- created_at
|
|
- id
|
|
- model
|
|
- object
|
|
- output
|
|
- parallel_tool_calls
|
|
- status
|
|
- text
|
|
- input
|
|
title: OpenAIResponseObjectWithInput
|
|
description: >-
|
|
OpenAI response object extended with input context information.
|
|
OpenAIResponseOutput:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
OpenAIResponsePrompt:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: Unique identifier of the prompt template
|
|
variables:
|
|
type: object
|
|
additionalProperties:
|
|
$ref: '#/components/schemas/OpenAIResponseInputMessageContent'
|
|
description: >-
|
|
Dictionary of variable names to OpenAIResponseInputMessageContent structure
|
|
for template substitution. The substitution values can either be strings,
|
|
or other Response input types like images or files.
|
|
version:
|
|
type: string
|
|
description: >-
|
|
Version number of the prompt to use (defaults to latest if not specified)
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
title: OpenAIResponsePrompt
|
|
description: >-
|
|
OpenAI compatible Prompt object that is used in OpenAI responses.
|
|
OpenAIResponseText:
|
|
type: object
|
|
properties:
|
|
format:
|
|
type: object
|
|
properties:
|
|
type:
|
|
oneOf:
|
|
- type: string
|
|
const: text
|
|
- type: string
|
|
const: json_schema
|
|
- type: string
|
|
const: json_object
|
|
description: >-
|
|
Must be "text", "json_schema", or "json_object" to identify the format
|
|
type
|
|
name:
|
|
type: string
|
|
description: >-
|
|
The name of the response format. Only used for json_schema.
|
|
schema:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
The JSON schema the response should conform to. In a Python SDK, this
|
|
is often a `pydantic` model. Only used for json_schema.
|
|
description:
|
|
type: string
|
|
description: >-
|
|
(Optional) A description of the response format. Only used for json_schema.
|
|
strict:
|
|
type: boolean
|
|
description: >-
|
|
(Optional) Whether to strictly enforce the JSON schema. If true, the
|
|
response must match the schema exactly. Only used for json_schema.
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
description: >-
|
|
(Optional) Text format configuration specifying output format requirements
|
|
additionalProperties: false
|
|
title: OpenAIResponseText
|
|
description: >-
|
|
Text response configuration for OpenAI responses.
|
|
OpenAIResponseTool:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
- $ref: '#/components/schemas/OpenAIResponseToolMCP'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
mcp: '#/components/schemas/OpenAIResponseToolMCP'
|
|
OpenAIResponseToolMCP:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: mcp
|
|
default: mcp
|
|
description: Tool type identifier, always "mcp"
|
|
server_label:
|
|
type: string
|
|
description: Label to identify this MCP server
|
|
allowed_tools:
|
|
oneOf:
|
|
- type: array
|
|
items:
|
|
type: string
|
|
- type: object
|
|
properties:
|
|
tool_names:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
(Optional) List of specific tool names that are allowed
|
|
additionalProperties: false
|
|
title: AllowedToolsFilter
|
|
description: >-
|
|
Filter configuration for restricting which MCP tools can be used.
|
|
description: >-
|
|
(Optional) Restriction on which tools can be used from this server
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- server_label
|
|
title: OpenAIResponseToolMCP
|
|
description: >-
|
|
Model Context Protocol (MCP) tool configuration for OpenAI response object.
|
|
OpenAIResponseUsage:
|
|
type: object
|
|
properties:
|
|
input_tokens:
|
|
type: integer
|
|
description: Number of tokens in the input
|
|
output_tokens:
|
|
type: integer
|
|
description: Number of tokens in the output
|
|
total_tokens:
|
|
type: integer
|
|
description: Total tokens used (input + output)
|
|
input_tokens_details:
|
|
type: object
|
|
properties:
|
|
cached_tokens:
|
|
type: integer
|
|
description: Number of tokens retrieved from cache
|
|
additionalProperties: false
|
|
description: Detailed breakdown of input token usage
|
|
output_tokens_details:
|
|
type: object
|
|
properties:
|
|
reasoning_tokens:
|
|
type: integer
|
|
description: >-
|
|
Number of tokens used for reasoning (o1/o3 models)
|
|
additionalProperties: false
|
|
description: Detailed breakdown of output token usage
|
|
additionalProperties: false
|
|
required:
|
|
- input_tokens
|
|
- output_tokens
|
|
- total_tokens
|
|
title: OpenAIResponseUsage
|
|
description: Usage information for OpenAI response.
|
|
ResponseGuardrailSpec:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
description: The type/identifier of the guardrail.
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: ResponseGuardrailSpec
|
|
description: >-
|
|
Specification for a guardrail to apply during response generation.
|
|
OpenAIResponseInputTool:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolMCP'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
|
OpenAIResponseInputToolMCP:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: mcp
|
|
default: mcp
|
|
description: Tool type identifier, always "mcp"
|
|
server_label:
|
|
type: string
|
|
description: Label to identify this MCP server
|
|
server_url:
|
|
type: string
|
|
description: URL endpoint of the MCP server
|
|
headers:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) HTTP headers to include when connecting to the server
|
|
require_approval:
|
|
oneOf:
|
|
- type: string
|
|
const: always
|
|
- type: string
|
|
const: never
|
|
- type: object
|
|
properties:
|
|
always:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
(Optional) List of tool names that always require approval
|
|
never:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
(Optional) List of tool names that never require approval
|
|
additionalProperties: false
|
|
title: ApprovalFilter
|
|
description: >-
|
|
Filter configuration for MCP tool approval requirements.
|
|
default: never
|
|
description: >-
|
|
Approval requirement for tool calls ("always", "never", or filter)
|
|
allowed_tools:
|
|
oneOf:
|
|
- type: array
|
|
items:
|
|
type: string
|
|
- type: object
|
|
properties:
|
|
tool_names:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
(Optional) List of specific tool names that are allowed
|
|
additionalProperties: false
|
|
title: AllowedToolsFilter
|
|
description: >-
|
|
Filter configuration for restricting which MCP tools can be used.
|
|
description: >-
|
|
(Optional) Restriction on which tools can be used from this server
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- server_label
|
|
- server_url
|
|
- require_approval
|
|
title: OpenAIResponseInputToolMCP
|
|
description: >-
|
|
Model Context Protocol (MCP) tool configuration for OpenAI response inputs.
|
|
CreateOpenaiResponseRequest:
|
|
type: object
|
|
properties:
|
|
input:
|
|
oneOf:
|
|
- type: string
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
description: Input message(s) to create the response.
|
|
model:
|
|
type: string
|
|
description: The underlying LLM used for completions.
|
|
prompt:
|
|
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
|
description: >-
|
|
(Optional) Prompt object with ID, version, and variables.
|
|
instructions:
|
|
type: string
|
|
previous_response_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) if specified, the new response will be a continuation of the
|
|
previous response. This can be used to easily fork-off new responses from
|
|
existing responses.
|
|
conversation:
|
|
type: string
|
|
description: >-
|
|
(Optional) The ID of a conversation to add the response to. Must begin
|
|
with 'conv_'. Input and output messages will be automatically added to
|
|
the conversation.
|
|
store:
|
|
type: boolean
|
|
stream:
|
|
type: boolean
|
|
temperature:
|
|
type: number
|
|
text:
|
|
$ref: '#/components/schemas/OpenAIResponseText'
|
|
tools:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseInputTool'
|
|
include:
|
|
type: array
|
|
items:
|
|
type: string
|
|
description: >-
|
|
(Optional) Additional fields to include in the response.
|
|
max_infer_iters:
|
|
type: integer
|
|
additionalProperties: false
|
|
required:
|
|
- input
|
|
- model
|
|
title: CreateOpenaiResponseRequest
|
|
OpenAIResponseObject:
|
|
type: object
|
|
properties:
|
|
created_at:
|
|
type: integer
|
|
description: >-
|
|
Unix timestamp when the response was created
|
|
error:
|
|
$ref: '#/components/schemas/OpenAIResponseError'
|
|
description: >-
|
|
(Optional) Error details if the response generation failed
|
|
id:
|
|
type: string
|
|
description: Unique identifier for this response
|
|
model:
|
|
type: string
|
|
description: Model identifier used for generation
|
|
object:
|
|
type: string
|
|
const: response
|
|
default: response
|
|
description: >-
|
|
Object type identifier, always "response"
|
|
output:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseOutput'
|
|
description: >-
|
|
List of generated output items (messages, tool calls, etc.)
|
|
parallel_tool_calls:
|
|
type: boolean
|
|
default: false
|
|
description: >-
|
|
Whether tool calls can be executed in parallel
|
|
previous_response_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) ID of the previous response in a conversation
|
|
prompt:
|
|
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
|
description: >-
|
|
(Optional) Reference to a prompt template and its variables.
|
|
status:
|
|
type: string
|
|
description: >-
|
|
Current status of the response generation
|
|
temperature:
|
|
type: number
|
|
description: >-
|
|
(Optional) Sampling temperature used for generation
|
|
text:
|
|
$ref: '#/components/schemas/OpenAIResponseText'
|
|
description: >-
|
|
Text formatting configuration for the response
|
|
top_p:
|
|
type: number
|
|
description: >-
|
|
(Optional) Nucleus sampling parameter used for generation
|
|
tools:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseTool'
|
|
description: >-
|
|
(Optional) An array of tools the model may call while generating a response.
|
|
truncation:
|
|
type: string
|
|
description: >-
|
|
(Optional) Truncation strategy applied to the response
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIResponseUsage'
|
|
description: >-
|
|
(Optional) Token usage information for the response
|
|
instructions:
|
|
type: string
|
|
description: >-
|
|
(Optional) System message inserted into the model's context
|
|
additionalProperties: false
|
|
required:
|
|
- created_at
|
|
- id
|
|
- model
|
|
- object
|
|
- output
|
|
- parallel_tool_calls
|
|
- status
|
|
- text
|
|
title: OpenAIResponseObject
|
|
description: >-
|
|
Complete OpenAI response object containing generation results and metadata.
|
|
OpenAIResponseContentPartOutputText:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: output_text
|
|
default: output_text
|
|
description: >-
|
|
Content part type identifier, always "output_text"
|
|
text:
|
|
type: string
|
|
description: Text emitted for this content part
|
|
annotations:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseAnnotations'
|
|
description: >-
|
|
Structured annotations associated with the text
|
|
logprobs:
|
|
type: array
|
|
items:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: (Optional) Token log probability details
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- text
|
|
- annotations
|
|
title: OpenAIResponseContentPartOutputText
|
|
description: >-
|
|
Text content within a streamed response part.
|
|
"OpenAIResponseContentPartReasoningSummary":
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: summary_text
|
|
default: summary_text
|
|
description: >-
|
|
Content part type identifier, always "summary_text"
|
|
text:
|
|
type: string
|
|
description: Summary text
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- text
|
|
title: >-
|
|
OpenAIResponseContentPartReasoningSummary
|
|
description: >-
|
|
Reasoning summary part in a streamed response.
|
|
OpenAIResponseContentPartReasoningText:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: reasoning_text
|
|
default: reasoning_text
|
|
description: >-
|
|
Content part type identifier, always "reasoning_text"
|
|
text:
|
|
type: string
|
|
description: Reasoning text supplied by the model
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- text
|
|
title: OpenAIResponseContentPartReasoningText
|
|
description: >-
|
|
Reasoning text emitted as part of a streamed response.
|
|
OpenAIResponseObjectStream:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseInProgress'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputItemAdded'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputItemDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDelta'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseWebSearchCallInProgress'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseWebSearchCallSearching'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseWebSearchCallCompleted'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpListToolsInProgress'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpListToolsFailed'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpListToolsCompleted'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallArgumentsDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallInProgress'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallFailed'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallCompleted'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseContentPartAdded'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseContentPartDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningTextDelta'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningTextDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryPartDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryTextDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseRefusalDelta'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseRefusalDone'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseFileSearchCallInProgress'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseFileSearchCallSearching'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseFileSearchCallCompleted'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseIncomplete'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseFailed'
|
|
- $ref: '#/components/schemas/OpenAIResponseObjectStreamResponseCompleted'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
response.created: '#/components/schemas/OpenAIResponseObjectStreamResponseCreated'
|
|
response.in_progress: '#/components/schemas/OpenAIResponseObjectStreamResponseInProgress'
|
|
response.output_item.added: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputItemAdded'
|
|
response.output_item.done: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputItemDone'
|
|
response.output_text.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDelta'
|
|
response.output_text.done: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextDone'
|
|
response.function_call_arguments.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta'
|
|
response.function_call_arguments.done: '#/components/schemas/OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone'
|
|
response.web_search_call.in_progress: '#/components/schemas/OpenAIResponseObjectStreamResponseWebSearchCallInProgress'
|
|
response.web_search_call.searching: '#/components/schemas/OpenAIResponseObjectStreamResponseWebSearchCallSearching'
|
|
response.web_search_call.completed: '#/components/schemas/OpenAIResponseObjectStreamResponseWebSearchCallCompleted'
|
|
response.mcp_list_tools.in_progress: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpListToolsInProgress'
|
|
response.mcp_list_tools.failed: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpListToolsFailed'
|
|
response.mcp_list_tools.completed: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpListToolsCompleted'
|
|
response.mcp_call.arguments.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta'
|
|
response.mcp_call.arguments.done: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallArgumentsDone'
|
|
response.mcp_call.in_progress: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallInProgress'
|
|
response.mcp_call.failed: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallFailed'
|
|
response.mcp_call.completed: '#/components/schemas/OpenAIResponseObjectStreamResponseMcpCallCompleted'
|
|
response.content_part.added: '#/components/schemas/OpenAIResponseObjectStreamResponseContentPartAdded'
|
|
response.content_part.done: '#/components/schemas/OpenAIResponseObjectStreamResponseContentPartDone'
|
|
response.reasoning_text.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningTextDelta'
|
|
response.reasoning_text.done: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningTextDone'
|
|
response.reasoning_summary_part.added: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded'
|
|
response.reasoning_summary_part.done: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryPartDone'
|
|
response.reasoning_summary_text.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta'
|
|
response.reasoning_summary_text.done: '#/components/schemas/OpenAIResponseObjectStreamResponseReasoningSummaryTextDone'
|
|
response.refusal.delta: '#/components/schemas/OpenAIResponseObjectStreamResponseRefusalDelta'
|
|
response.refusal.done: '#/components/schemas/OpenAIResponseObjectStreamResponseRefusalDone'
|
|
response.output_text.annotation.added: '#/components/schemas/OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded'
|
|
response.file_search_call.in_progress: '#/components/schemas/OpenAIResponseObjectStreamResponseFileSearchCallInProgress'
|
|
response.file_search_call.searching: '#/components/schemas/OpenAIResponseObjectStreamResponseFileSearchCallSearching'
|
|
response.file_search_call.completed: '#/components/schemas/OpenAIResponseObjectStreamResponseFileSearchCallCompleted'
|
|
response.incomplete: '#/components/schemas/OpenAIResponseObjectStreamResponseIncomplete'
|
|
response.failed: '#/components/schemas/OpenAIResponseObjectStreamResponseFailed'
|
|
response.completed: '#/components/schemas/OpenAIResponseObjectStreamResponseCompleted'
|
|
"OpenAIResponseObjectStreamResponseCompleted":
|
|
type: object
|
|
properties:
|
|
response:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
description: Completed response object
|
|
type:
|
|
type: string
|
|
const: response.completed
|
|
default: response.completed
|
|
description: >-
|
|
Event type identifier, always "response.completed"
|
|
additionalProperties: false
|
|
required:
|
|
- response
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseCompleted
|
|
description: >-
|
|
Streaming event indicating a response has been completed.
|
|
"OpenAIResponseObjectStreamResponseContentPartAdded":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the part within the content array
|
|
response_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the response containing this content
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the output item containing this content part
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the output item in the response
|
|
part:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartOutputText'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartReasoningText'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
output_text: '#/components/schemas/OpenAIResponseContentPartOutputText'
|
|
refusal: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
reasoning_text: '#/components/schemas/OpenAIResponseContentPartReasoningText'
|
|
description: The content part that was added
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.content_part.added
|
|
default: response.content_part.added
|
|
description: >-
|
|
Event type identifier, always "response.content_part.added"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- response_id
|
|
- item_id
|
|
- output_index
|
|
- part
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseContentPartAdded
|
|
description: >-
|
|
Streaming event for when a new content part is added to a response item.
|
|
"OpenAIResponseObjectStreamResponseContentPartDone":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the part within the content array
|
|
response_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the response containing this content
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the output item containing this content part
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the output item in the response
|
|
part:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartOutputText'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartReasoningText'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
output_text: '#/components/schemas/OpenAIResponseContentPartOutputText'
|
|
refusal: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
reasoning_text: '#/components/schemas/OpenAIResponseContentPartReasoningText'
|
|
description: The completed content part
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.content_part.done
|
|
default: response.content_part.done
|
|
description: >-
|
|
Event type identifier, always "response.content_part.done"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- response_id
|
|
- item_id
|
|
- output_index
|
|
- part
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseContentPartDone
|
|
description: >-
|
|
Streaming event for when a content part is completed.
|
|
"OpenAIResponseObjectStreamResponseCreated":
|
|
type: object
|
|
properties:
|
|
response:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
description: The response object that was created
|
|
type:
|
|
type: string
|
|
const: response.created
|
|
default: response.created
|
|
description: >-
|
|
Event type identifier, always "response.created"
|
|
additionalProperties: false
|
|
required:
|
|
- response
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseCreated
|
|
description: >-
|
|
Streaming event indicating a new response has been created.
|
|
OpenAIResponseObjectStreamResponseFailed:
|
|
type: object
|
|
properties:
|
|
response:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
description: Response object describing the failure
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.failed
|
|
default: response.failed
|
|
description: >-
|
|
Event type identifier, always "response.failed"
|
|
additionalProperties: false
|
|
required:
|
|
- response
|
|
- sequence_number
|
|
- type
|
|
title: OpenAIResponseObjectStreamResponseFailed
|
|
description: >-
|
|
Streaming event emitted when a response fails.
|
|
"OpenAIResponseObjectStreamResponseFileSearchCallCompleted":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the completed file search call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.file_search_call.completed
|
|
default: response.file_search_call.completed
|
|
description: >-
|
|
Event type identifier, always "response.file_search_call.completed"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseFileSearchCallCompleted
|
|
description: >-
|
|
Streaming event for completed file search calls.
|
|
"OpenAIResponseObjectStreamResponseFileSearchCallInProgress":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the file search call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.file_search_call.in_progress
|
|
default: response.file_search_call.in_progress
|
|
description: >-
|
|
Event type identifier, always "response.file_search_call.in_progress"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseFileSearchCallInProgress
|
|
description: >-
|
|
Streaming event for file search calls in progress.
|
|
"OpenAIResponseObjectStreamResponseFileSearchCallSearching":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the file search call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.file_search_call.searching
|
|
default: response.file_search_call.searching
|
|
description: >-
|
|
Event type identifier, always "response.file_search_call.searching"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseFileSearchCallSearching
|
|
description: >-
|
|
Streaming event for file search currently searching.
|
|
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta":
|
|
type: object
|
|
properties:
|
|
delta:
|
|
type: string
|
|
description: >-
|
|
Incremental function call arguments being added
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the function call being updated
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.function_call_arguments.delta
|
|
default: response.function_call_arguments.delta
|
|
description: >-
|
|
Event type identifier, always "response.function_call_arguments.delta"
|
|
additionalProperties: false
|
|
required:
|
|
- delta
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta
|
|
description: >-
|
|
Streaming event for incremental function call argument updates.
|
|
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone":
|
|
type: object
|
|
properties:
|
|
arguments:
|
|
type: string
|
|
description: >-
|
|
Final complete arguments JSON string for the function call
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the completed function call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.function_call_arguments.done
|
|
default: response.function_call_arguments.done
|
|
description: >-
|
|
Event type identifier, always "response.function_call_arguments.done"
|
|
additionalProperties: false
|
|
required:
|
|
- arguments
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone
|
|
description: >-
|
|
Streaming event for when function call arguments are completed.
|
|
"OpenAIResponseObjectStreamResponseInProgress":
|
|
type: object
|
|
properties:
|
|
response:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
description: Current response state while in progress
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.in_progress
|
|
default: response.in_progress
|
|
description: >-
|
|
Event type identifier, always "response.in_progress"
|
|
additionalProperties: false
|
|
required:
|
|
- response
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseInProgress
|
|
description: >-
|
|
Streaming event indicating the response remains in progress.
|
|
"OpenAIResponseObjectStreamResponseIncomplete":
|
|
type: object
|
|
properties:
|
|
response:
|
|
$ref: '#/components/schemas/OpenAIResponseObject'
|
|
description: >-
|
|
Response object describing the incomplete state
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.incomplete
|
|
default: response.incomplete
|
|
description: >-
|
|
Event type identifier, always "response.incomplete"
|
|
additionalProperties: false
|
|
required:
|
|
- response
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseIncomplete
|
|
description: >-
|
|
Streaming event emitted when a response ends in an incomplete state.
|
|
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta":
|
|
type: object
|
|
properties:
|
|
delta:
|
|
type: string
|
|
item_id:
|
|
type: string
|
|
output_index:
|
|
type: integer
|
|
sequence_number:
|
|
type: integer
|
|
type:
|
|
type: string
|
|
const: response.mcp_call.arguments.delta
|
|
default: response.mcp_call.arguments.delta
|
|
additionalProperties: false
|
|
required:
|
|
- delta
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta
|
|
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDone":
|
|
type: object
|
|
properties:
|
|
arguments:
|
|
type: string
|
|
item_id:
|
|
type: string
|
|
output_index:
|
|
type: integer
|
|
sequence_number:
|
|
type: integer
|
|
type:
|
|
type: string
|
|
const: response.mcp_call.arguments.done
|
|
default: response.mcp_call.arguments.done
|
|
additionalProperties: false
|
|
required:
|
|
- arguments
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpCallArgumentsDone
|
|
"OpenAIResponseObjectStreamResponseMcpCallCompleted":
|
|
type: object
|
|
properties:
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.mcp_call.completed
|
|
default: response.mcp_call.completed
|
|
description: >-
|
|
Event type identifier, always "response.mcp_call.completed"
|
|
additionalProperties: false
|
|
required:
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpCallCompleted
|
|
description: Streaming event for completed MCP calls.
|
|
"OpenAIResponseObjectStreamResponseMcpCallFailed":
|
|
type: object
|
|
properties:
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.mcp_call.failed
|
|
default: response.mcp_call.failed
|
|
description: >-
|
|
Event type identifier, always "response.mcp_call.failed"
|
|
additionalProperties: false
|
|
required:
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpCallFailed
|
|
description: Streaming event for failed MCP calls.
|
|
"OpenAIResponseObjectStreamResponseMcpCallInProgress":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the MCP call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.mcp_call.in_progress
|
|
default: response.mcp_call.in_progress
|
|
description: >-
|
|
Event type identifier, always "response.mcp_call.in_progress"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpCallInProgress
|
|
description: >-
|
|
Streaming event for MCP calls in progress.
|
|
"OpenAIResponseObjectStreamResponseMcpListToolsCompleted":
|
|
type: object
|
|
properties:
|
|
sequence_number:
|
|
type: integer
|
|
type:
|
|
type: string
|
|
const: response.mcp_list_tools.completed
|
|
default: response.mcp_list_tools.completed
|
|
additionalProperties: false
|
|
required:
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpListToolsCompleted
|
|
"OpenAIResponseObjectStreamResponseMcpListToolsFailed":
|
|
type: object
|
|
properties:
|
|
sequence_number:
|
|
type: integer
|
|
type:
|
|
type: string
|
|
const: response.mcp_list_tools.failed
|
|
default: response.mcp_list_tools.failed
|
|
additionalProperties: false
|
|
required:
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpListToolsFailed
|
|
"OpenAIResponseObjectStreamResponseMcpListToolsInProgress":
|
|
type: object
|
|
properties:
|
|
sequence_number:
|
|
type: integer
|
|
type:
|
|
type: string
|
|
const: response.mcp_list_tools.in_progress
|
|
default: response.mcp_list_tools.in_progress
|
|
additionalProperties: false
|
|
required:
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseMcpListToolsInProgress
|
|
"OpenAIResponseObjectStreamResponseOutputItemAdded":
|
|
type: object
|
|
properties:
|
|
response_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the response containing this output
|
|
item:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
description: >-
|
|
The output item that was added (message, tool call, etc.)
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of this item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.output_item.added
|
|
default: response.output_item.added
|
|
description: >-
|
|
Event type identifier, always "response.output_item.added"
|
|
additionalProperties: false
|
|
required:
|
|
- response_id
|
|
- item
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseOutputItemAdded
|
|
description: >-
|
|
Streaming event for when a new output item is added to the response.
|
|
"OpenAIResponseObjectStreamResponseOutputItemDone":
|
|
type: object
|
|
properties:
|
|
response_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the response containing this output
|
|
item:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
description: >-
|
|
The completed output item (message, tool call, etc.)
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of this item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.output_item.done
|
|
default: response.output_item.done
|
|
description: >-
|
|
Event type identifier, always "response.output_item.done"
|
|
additionalProperties: false
|
|
required:
|
|
- response_id
|
|
- item
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseOutputItemDone
|
|
description: >-
|
|
Streaming event for when an output item is completed.
|
|
"OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the item to which the annotation is being added
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the output item in the response's output array
|
|
content_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the content part within the output item
|
|
annotation_index:
|
|
type: integer
|
|
description: >-
|
|
Index of the annotation within the content part
|
|
annotation:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
|
|
url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
|
|
container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
|
|
file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
|
|
description: The annotation object being added
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.output_text.annotation.added
|
|
default: response.output_text.annotation.added
|
|
description: >-
|
|
Event type identifier, always "response.output_text.annotation.added"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- content_index
|
|
- annotation_index
|
|
- annotation
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded
|
|
description: >-
|
|
Streaming event for when an annotation is added to output text.
|
|
"OpenAIResponseObjectStreamResponseOutputTextDelta":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: Index position within the text content
|
|
delta:
|
|
type: string
|
|
description: Incremental text content being added
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the output item being updated
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.output_text.delta
|
|
default: response.output_text.delta
|
|
description: >-
|
|
Event type identifier, always "response.output_text.delta"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- delta
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseOutputTextDelta
|
|
description: >-
|
|
Streaming event for incremental text content updates.
|
|
"OpenAIResponseObjectStreamResponseOutputTextDone":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: Index position within the text content
|
|
text:
|
|
type: string
|
|
description: >-
|
|
Final complete text content of the output item
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the completed output item
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.output_text.done
|
|
default: response.output_text.done
|
|
description: >-
|
|
Event type identifier, always "response.output_text.done"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- text
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseOutputTextDone
|
|
description: >-
|
|
Streaming event for when text output is completed.
|
|
"OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the output item
|
|
output_index:
|
|
type: integer
|
|
description: Index position of the output item
|
|
part:
|
|
$ref: '#/components/schemas/OpenAIResponseContentPartReasoningSummary'
|
|
description: The summary part that was added
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
summary_index:
|
|
type: integer
|
|
description: >-
|
|
Index of the summary part within the reasoning summary
|
|
type:
|
|
type: string
|
|
const: response.reasoning_summary_part.added
|
|
default: response.reasoning_summary_part.added
|
|
description: >-
|
|
Event type identifier, always "response.reasoning_summary_part.added"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- part
|
|
- sequence_number
|
|
- summary_index
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded
|
|
description: >-
|
|
Streaming event for when a new reasoning summary part is added.
|
|
"OpenAIResponseObjectStreamResponseReasoningSummaryPartDone":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the output item
|
|
output_index:
|
|
type: integer
|
|
description: Index position of the output item
|
|
part:
|
|
$ref: '#/components/schemas/OpenAIResponseContentPartReasoningSummary'
|
|
description: The completed summary part
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
summary_index:
|
|
type: integer
|
|
description: >-
|
|
Index of the summary part within the reasoning summary
|
|
type:
|
|
type: string
|
|
const: response.reasoning_summary_part.done
|
|
default: response.reasoning_summary_part.done
|
|
description: >-
|
|
Event type identifier, always "response.reasoning_summary_part.done"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- part
|
|
- sequence_number
|
|
- summary_index
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseReasoningSummaryPartDone
|
|
description: >-
|
|
Streaming event for when a reasoning summary part is completed.
|
|
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta":
|
|
type: object
|
|
properties:
|
|
delta:
|
|
type: string
|
|
description: Incremental summary text being added
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the output item
|
|
output_index:
|
|
type: integer
|
|
description: Index position of the output item
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
summary_index:
|
|
type: integer
|
|
description: >-
|
|
Index of the summary part within the reasoning summary
|
|
type:
|
|
type: string
|
|
const: response.reasoning_summary_text.delta
|
|
default: response.reasoning_summary_text.delta
|
|
description: >-
|
|
Event type identifier, always "response.reasoning_summary_text.delta"
|
|
additionalProperties: false
|
|
required:
|
|
- delta
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- summary_index
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta
|
|
description: >-
|
|
Streaming event for incremental reasoning summary text updates.
|
|
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDone":
|
|
type: object
|
|
properties:
|
|
text:
|
|
type: string
|
|
description: Final complete summary text
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the output item
|
|
output_index:
|
|
type: integer
|
|
description: Index position of the output item
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
summary_index:
|
|
type: integer
|
|
description: >-
|
|
Index of the summary part within the reasoning summary
|
|
type:
|
|
type: string
|
|
const: response.reasoning_summary_text.done
|
|
default: response.reasoning_summary_text.done
|
|
description: >-
|
|
Event type identifier, always "response.reasoning_summary_text.done"
|
|
additionalProperties: false
|
|
required:
|
|
- text
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- summary_index
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseReasoningSummaryTextDone
|
|
description: >-
|
|
Streaming event for when reasoning summary text is completed.
|
|
"OpenAIResponseObjectStreamResponseReasoningTextDelta":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the reasoning content part
|
|
delta:
|
|
type: string
|
|
description: Incremental reasoning text being added
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the output item being updated
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.reasoning_text.delta
|
|
default: response.reasoning_text.delta
|
|
description: >-
|
|
Event type identifier, always "response.reasoning_text.delta"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- delta
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseReasoningTextDelta
|
|
description: >-
|
|
Streaming event for incremental reasoning text updates.
|
|
"OpenAIResponseObjectStreamResponseReasoningTextDone":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the reasoning content part
|
|
text:
|
|
type: string
|
|
description: Final complete reasoning text
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the completed output item
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.reasoning_text.done
|
|
default: response.reasoning_text.done
|
|
description: >-
|
|
Event type identifier, always "response.reasoning_text.done"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- text
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseReasoningTextDone
|
|
description: >-
|
|
Streaming event for when reasoning text is completed.
|
|
"OpenAIResponseObjectStreamResponseRefusalDelta":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: Index position of the content part
|
|
delta:
|
|
type: string
|
|
description: Incremental refusal text being added
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the output item
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.refusal.delta
|
|
default: response.refusal.delta
|
|
description: >-
|
|
Event type identifier, always "response.refusal.delta"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- delta
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseRefusalDelta
|
|
description: >-
|
|
Streaming event for incremental refusal text updates.
|
|
"OpenAIResponseObjectStreamResponseRefusalDone":
|
|
type: object
|
|
properties:
|
|
content_index:
|
|
type: integer
|
|
description: Index position of the content part
|
|
refusal:
|
|
type: string
|
|
description: Final complete refusal text
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the output item
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.refusal.done
|
|
default: response.refusal.done
|
|
description: >-
|
|
Event type identifier, always "response.refusal.done"
|
|
additionalProperties: false
|
|
required:
|
|
- content_index
|
|
- refusal
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseRefusalDone
|
|
description: >-
|
|
Streaming event for when refusal text is completed.
|
|
"OpenAIResponseObjectStreamResponseWebSearchCallCompleted":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the completed web search call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.web_search_call.completed
|
|
default: response.web_search_call.completed
|
|
description: >-
|
|
Event type identifier, always "response.web_search_call.completed"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseWebSearchCallCompleted
|
|
description: >-
|
|
Streaming event for completed web search calls.
|
|
"OpenAIResponseObjectStreamResponseWebSearchCallInProgress":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
description: Unique identifier of the web search call
|
|
output_index:
|
|
type: integer
|
|
description: >-
|
|
Index position of the item in the output list
|
|
sequence_number:
|
|
type: integer
|
|
description: >-
|
|
Sequential number for ordering streaming events
|
|
type:
|
|
type: string
|
|
const: response.web_search_call.in_progress
|
|
default: response.web_search_call.in_progress
|
|
description: >-
|
|
Event type identifier, always "response.web_search_call.in_progress"
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseWebSearchCallInProgress
|
|
description: >-
|
|
Streaming event for web search calls in progress.
|
|
"OpenAIResponseObjectStreamResponseWebSearchCallSearching":
|
|
type: object
|
|
properties:
|
|
item_id:
|
|
type: string
|
|
output_index:
|
|
type: integer
|
|
sequence_number:
|
|
type: integer
|
|
type:
|
|
type: string
|
|
const: response.web_search_call.searching
|
|
default: response.web_search_call.searching
|
|
additionalProperties: false
|
|
required:
|
|
- item_id
|
|
- output_index
|
|
- sequence_number
|
|
- type
|
|
title: >-
|
|
OpenAIResponseObjectStreamResponseWebSearchCallSearching
|
|
OpenAIDeleteResponseObject:
|
|
type: object
|
|
properties:
|
|
id:
|
|
type: string
|
|
description: >-
|
|
Unique identifier of the deleted response
|
|
object:
|
|
type: string
|
|
const: response
|
|
default: response
|
|
description: >-
|
|
Object type identifier, always "response"
|
|
deleted:
|
|
type: boolean
|
|
default: true
|
|
description: Deletion confirmation flag, always True
|
|
additionalProperties: false
|
|
required:
|
|
- id
|
|
- object
|
|
- deleted
|
|
title: OpenAIDeleteResponseObject
|
|
description: >-
|
|
Response object confirming deletion of an OpenAI response.
|
|
ListOpenAIResponseInputItem:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseInput'
|
|
description: List of input items
|
|
object:
|
|
type: string
|
|
const: list
|
|
default: list
|
|
description: Object type identifier, always "list"
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
- object
|
|
title: ListOpenAIResponseInputItem
|
|
description: >-
|
|
List container for OpenAI response input items.
|
|
RunShieldRequest:
|
|
type: object
|
|
properties:
|
|
shield_id:
|
|
type: string
|
|
description: The identifier of the shield to run.
|
|
messages:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIMessageParam'
|
|
description: The messages to run the shield on.
|
|
params:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: The parameters of the shield.
|
|
additionalProperties: false
|
|
required:
|
|
- shield_id
|
|
- messages
|
|
- params
|
|
title: RunShieldRequest
|
|
RunShieldResponse:
|
|
type: object
|
|
properties:
|
|
violation:
|
|
$ref: '#/components/schemas/SafetyViolation'
|
|
description: >-
|
|
(Optional) Safety violation detected by the shield, if any
|
|
additionalProperties: false
|
|
title: RunShieldResponse
|
|
description: Response from running a safety shield.
|
|
SafetyViolation:
|
|
type: object
|
|
properties:
|
|
violation_level:
|
|
$ref: '#/components/schemas/ViolationLevel'
|
|
description: Severity level of the violation
|
|
user_message:
|
|
type: string
|
|
description: >-
|
|
(Optional) Message to convey to the user about the violation
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
Additional metadata including specific violation codes for debugging and
|
|
telemetry
|
|
additionalProperties: false
|
|
required:
|
|
- violation_level
|
|
- metadata
|
|
title: SafetyViolation
|
|
description: >-
|
|
Details of a safety violation detected by content moderation.
|
|
ViolationLevel:
|
|
type: string
|
|
enum:
|
|
- info
|
|
- warn
|
|
- error
|
|
title: ViolationLevel
|
|
description: Severity level of a safety violation.
|
|
AgentTurnInputType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: agent_turn_input
|
|
title: Type
|
|
default: agent_turn_input
|
|
type: object
|
|
title: AgentTurnInputType
|
|
description: 'Parameter type for agent turn input.
|
|
|
|
|
|
:param type: Discriminator type. Always "agent_turn_input"'
|
|
AggregationFunctionType:
|
|
type: string
|
|
enum:
|
|
- average
|
|
- weighted_average
|
|
- median
|
|
- categorical_count
|
|
- accuracy
|
|
title: AggregationFunctionType
|
|
description: 'Types of aggregation functions for scoring results.
|
|
|
|
:cvar average: Calculate the arithmetic mean of scores
|
|
|
|
:cvar weighted_average: Calculate a weighted average of scores
|
|
|
|
:cvar median: Calculate the median value of scores
|
|
|
|
:cvar categorical_count: Count occurrences of categorical values
|
|
|
|
:cvar accuracy: Calculate accuracy as the proportion of correct answers'
|
|
AllowedToolsFilter:
|
|
properties:
|
|
tool_names:
|
|
title: Tool Names
|
|
items:
|
|
type: string
|
|
type: array
|
|
type: object
|
|
title: AllowedToolsFilter
|
|
description: 'Filter configuration for restricting which MCP tools can be used.
|
|
|
|
|
|
:param tool_names: (Optional) List of specific tool names that are allowed'
|
|
ApprovalFilter:
|
|
properties:
|
|
always:
|
|
title: Always
|
|
items:
|
|
type: string
|
|
type: array
|
|
never:
|
|
title: Never
|
|
items:
|
|
type: string
|
|
type: array
|
|
type: object
|
|
title: ApprovalFilter
|
|
description: 'Filter configuration for MCP tool approval requirements.
|
|
|
|
|
|
:param always: (Optional) List of tool names that always require approval
|
|
|
|
:param never: (Optional) List of tool names that never require approval'
|
|
ArrayType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: array
|
|
title: Type
|
|
default: array
|
|
type: object
|
|
title: ArrayType
|
|
description: 'Parameter type for array values.
|
|
|
|
|
|
:param type: Discriminator type. Always "array"'
|
|
BasicScoringFnParams:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: basic
|
|
title: Type
|
|
default: basic
|
|
aggregation_functions:
|
|
items:
|
|
$ref: '#/components/schemas/AggregationFunctionType'
|
|
type: array
|
|
title: Aggregation Functions
|
|
description: Aggregation functions to apply to the scores of each row
|
|
type: object
|
|
title: BasicScoringFnParams
|
|
description: 'Parameters for basic scoring function configuration.
|
|
|
|
:param type: The type of scoring function parameters, always basic
|
|
|
|
:param aggregation_functions: Aggregation functions to apply to the scores
|
|
of each row'
|
|
Batch:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
completion_window:
|
|
type: string
|
|
title: Completion Window
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
endpoint:
|
|
type: string
|
|
title: Endpoint
|
|
input_file_id:
|
|
type: string
|
|
title: Input File Id
|
|
object:
|
|
type: string
|
|
const: batch
|
|
title: Object
|
|
status:
|
|
type: string
|
|
enum:
|
|
- validating
|
|
- failed
|
|
- in_progress
|
|
- finalizing
|
|
- completed
|
|
- expired
|
|
- cancelling
|
|
- cancelled
|
|
title: Status
|
|
cancelled_at:
|
|
title: Cancelled At
|
|
type: integer
|
|
cancelling_at:
|
|
title: Cancelling At
|
|
type: integer
|
|
completed_at:
|
|
title: Completed At
|
|
type: integer
|
|
error_file_id:
|
|
title: Error File Id
|
|
type: string
|
|
errors:
|
|
$ref: '#/components/schemas/Errors'
|
|
expired_at:
|
|
title: Expired At
|
|
type: integer
|
|
expires_at:
|
|
title: Expires At
|
|
type: integer
|
|
failed_at:
|
|
title: Failed At
|
|
type: integer
|
|
finalizing_at:
|
|
title: Finalizing At
|
|
type: integer
|
|
in_progress_at:
|
|
title: In Progress At
|
|
type: integer
|
|
metadata:
|
|
title: Metadata
|
|
additionalProperties:
|
|
type: string
|
|
type: object
|
|
model:
|
|
title: Model
|
|
type: string
|
|
output_file_id:
|
|
title: Output File Id
|
|
type: string
|
|
request_counts:
|
|
$ref: '#/components/schemas/BatchRequestCounts'
|
|
usage:
|
|
$ref: '#/components/schemas/BatchUsage'
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- id
|
|
- completion_window
|
|
- created_at
|
|
- endpoint
|
|
- input_file_id
|
|
- object
|
|
- status
|
|
title: Batch
|
|
BatchError:
|
|
properties:
|
|
code:
|
|
title: Code
|
|
type: string
|
|
line:
|
|
title: Line
|
|
type: integer
|
|
message:
|
|
title: Message
|
|
type: string
|
|
param:
|
|
title: Param
|
|
type: string
|
|
additionalProperties: true
|
|
type: object
|
|
title: BatchError
|
|
BatchRequestCounts:
|
|
properties:
|
|
completed:
|
|
type: integer
|
|
title: Completed
|
|
failed:
|
|
type: integer
|
|
title: Failed
|
|
total:
|
|
type: integer
|
|
title: Total
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- completed
|
|
- failed
|
|
- total
|
|
title: BatchRequestCounts
|
|
BatchUsage:
|
|
properties:
|
|
input_tokens:
|
|
type: integer
|
|
title: Input Tokens
|
|
input_tokens_details:
|
|
$ref: '#/components/schemas/InputTokensDetails'
|
|
output_tokens:
|
|
type: integer
|
|
title: Output Tokens
|
|
output_tokens_details:
|
|
$ref: '#/components/schemas/OutputTokensDetails'
|
|
total_tokens:
|
|
type: integer
|
|
title: Total Tokens
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- input_tokens
|
|
- input_tokens_details
|
|
- output_tokens
|
|
- output_tokens_details
|
|
- total_tokens
|
|
title: BatchUsage
|
|
BooleanType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: boolean
|
|
title: Type
|
|
default: boolean
|
|
type: object
|
|
title: BooleanType
|
|
description: 'Parameter type for boolean values.
|
|
|
|
|
|
:param type: Discriminator type. Always "boolean"'
|
|
ChatCompletionInputType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: chat_completion_input
|
|
title: Type
|
|
default: chat_completion_input
|
|
type: object
|
|
title: ChatCompletionInputType
|
|
description: 'Parameter type for chat completion input.
|
|
|
|
|
|
:param type: Discriminator type. Always "chat_completion_input"'
|
|
Chunk-Output:
|
|
properties:
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem-Output'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem-Output'
|
|
text: '#/components/schemas/TextContentItem'
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem-Output'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem-Output'
|
|
text: '#/components/schemas/TextContentItem'
|
|
type: array
|
|
title: Content
|
|
chunk_id:
|
|
type: string
|
|
title: Chunk Id
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
embedding:
|
|
title: Embedding
|
|
items:
|
|
type: number
|
|
type: array
|
|
chunk_metadata:
|
|
$ref: '#/components/schemas/ChunkMetadata'
|
|
type: object
|
|
required:
|
|
- content
|
|
- chunk_id
|
|
title: Chunk
|
|
description: "A chunk of content that can be inserted into a vector database.\n\
|
|
:param content: The content of the chunk, which can be interleaved text, images,\
|
|
\ or other types.\n:param chunk_id: Unique identifier for the chunk. Must\
|
|
\ be provided explicitly.\n:param metadata: Metadata associated with the chunk\
|
|
\ that will be used in the model context during inference.\n:param embedding:\
|
|
\ Optional embedding for the chunk. If not provided, it will be computed later.\n\
|
|
:param chunk_metadata: Metadata for the chunk that will NOT be used in the\
|
|
\ context during inference.\n The `chunk_metadata` is required backend\
|
|
\ functionality."
|
|
ChunkMetadata:
|
|
properties:
|
|
chunk_id:
|
|
title: Chunk Id
|
|
type: string
|
|
document_id:
|
|
title: Document Id
|
|
type: string
|
|
source:
|
|
title: Source
|
|
type: string
|
|
created_timestamp:
|
|
title: Created Timestamp
|
|
type: integer
|
|
updated_timestamp:
|
|
title: Updated Timestamp
|
|
type: integer
|
|
chunk_window:
|
|
title: Chunk Window
|
|
type: string
|
|
chunk_tokenizer:
|
|
title: Chunk Tokenizer
|
|
type: string
|
|
chunk_embedding_model:
|
|
title: Chunk Embedding Model
|
|
type: string
|
|
chunk_embedding_dimension:
|
|
title: Chunk Embedding Dimension
|
|
type: integer
|
|
content_token_count:
|
|
title: Content Token Count
|
|
type: integer
|
|
metadata_token_count:
|
|
title: Metadata Token Count
|
|
type: integer
|
|
type: object
|
|
title: ChunkMetadata
|
|
description: "`ChunkMetadata` is backend metadata for a `Chunk` that is used\
|
|
\ to store additional information about the chunk that\n will not be used\
|
|
\ in the context during inference, but is required for backend functionality.\
|
|
\ The `ChunkMetadata`\n is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and\
|
|
\ is not expected to change after.\n Use `Chunk.metadata` for metadata\
|
|
\ that will be used in the context during inference.\n:param chunk_id: The\
|
|
\ ID of the chunk. If not set, it will be generated based on the document\
|
|
\ ID and content.\n:param document_id: The ID of the document this chunk belongs\
|
|
\ to.\n:param source: The source of the content, such as a URL, file path,\
|
|
\ or other identifier.\n:param created_timestamp: An optional timestamp indicating\
|
|
\ when the chunk was created.\n:param updated_timestamp: An optional timestamp\
|
|
\ indicating when the chunk was last updated.\n:param chunk_window: The window\
|
|
\ of the chunk, which can be used to group related chunks together.\n:param\
|
|
\ chunk_tokenizer: The tokenizer used to create the chunk. Default is Tiktoken.\n\
|
|
:param chunk_embedding_model: The embedding model used to create the chunk's\
|
|
\ embedding.\n:param chunk_embedding_dimension: The dimension of the embedding\
|
|
\ vector for the chunk.\n:param content_token_count: The number of tokens\
|
|
\ in the content of the chunk.\n:param metadata_token_count: The number of\
|
|
\ tokens in the metadata of the chunk."
|
|
CompletionInputType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: completion_input
|
|
title: Type
|
|
default: completion_input
|
|
type: object
|
|
title: CompletionInputType
|
|
description: 'Parameter type for completion input.
|
|
|
|
|
|
:param type: Discriminator type. Always "completion_input"'
|
|
Conversation:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
description: The unique ID of the conversation.
|
|
object:
|
|
type: string
|
|
const: conversation
|
|
title: Object
|
|
description: The object type, which is always conversation.
|
|
default: conversation
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
description: The time at which the conversation was created, measured in
|
|
seconds since the Unix epoch.
|
|
metadata:
|
|
title: Metadata
|
|
description: Set of 16 key-value pairs that can be attached to an object.
|
|
This can be useful for storing additional information about the object
|
|
in a structured format, and querying for objects via API or the dashboard.
|
|
additionalProperties:
|
|
type: string
|
|
type: object
|
|
items:
|
|
title: Items
|
|
description: Initial items to include in the conversation context. You may
|
|
add up to 20 items at a time.
|
|
items:
|
|
additionalProperties: true
|
|
type: object
|
|
type: array
|
|
type: object
|
|
required:
|
|
- id
|
|
- created_at
|
|
title: Conversation
|
|
description: OpenAI-compatible conversation object.
|
|
ConversationItemInclude:
|
|
type: string
|
|
enum:
|
|
- llm_as_judge
|
|
- regex_parser
|
|
- basic
|
|
title: ScoringFnParamsType
|
|
description: >-
|
|
Types of scoring function parameter configurations.
|
|
StringType:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: string
|
|
default: string
|
|
description: Discriminator type. Always "string"
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: StringType
|
|
description: Parameter type for string values.
|
|
UnionType:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: union
|
|
default: union
|
|
description: Discriminator type. Always "union"
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
title: UnionType
|
|
description: Parameter type for union values.
|
|
ListScoringFunctionsResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ScoringFn'
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: ListScoringFunctionsResponse
|
|
ParamType:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/StringType'
|
|
- $ref: '#/components/schemas/NumberType'
|
|
- $ref: '#/components/schemas/BooleanType'
|
|
- $ref: '#/components/schemas/ArrayType'
|
|
- $ref: '#/components/schemas/ObjectType'
|
|
- $ref: '#/components/schemas/JsonType'
|
|
- $ref: '#/components/schemas/UnionType'
|
|
- $ref: '#/components/schemas/ChatCompletionInputType'
|
|
- $ref: '#/components/schemas/CompletionInputType'
|
|
- $ref: '#/components/schemas/AgentTurnInputType'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
string: '#/components/schemas/StringType'
|
|
number: '#/components/schemas/NumberType'
|
|
boolean: '#/components/schemas/BooleanType'
|
|
array: '#/components/schemas/ArrayType'
|
|
object: '#/components/schemas/ObjectType'
|
|
json: '#/components/schemas/JsonType'
|
|
union: '#/components/schemas/UnionType'
|
|
chat_completion_input: '#/components/schemas/ChatCompletionInputType'
|
|
completion_input: '#/components/schemas/CompletionInputType'
|
|
agent_turn_input: '#/components/schemas/AgentTurnInputType'
|
|
RegisterScoringFunctionRequest:
|
|
type: object
|
|
properties:
|
|
scoring_fn_id:
|
|
type: string
|
|
description: >-
|
|
The ID of the scoring function to register.
|
|
description:
|
|
type: string
|
|
description: The description of the scoring function.
|
|
return_type:
|
|
$ref: '#/components/schemas/ParamType'
|
|
description: The return type of the scoring function.
|
|
provider_scoring_fn_id:
|
|
type: string
|
|
description: >-
|
|
The ID of the provider scoring function to use for the scoring function.
|
|
provider_id:
|
|
type: string
|
|
description: >-
|
|
The ID of the provider to use for the scoring function.
|
|
params:
|
|
$ref: '#/components/schemas/ScoringFnParams'
|
|
description: >-
|
|
The parameters for the scoring function for benchmark eval, these can
|
|
be overridden for app eval.
|
|
additionalProperties: false
|
|
required:
|
|
- scoring_fn_id
|
|
- description
|
|
- return_type
|
|
title: RegisterScoringFunctionRequest
|
|
ScoreRequest:
|
|
type: object
|
|
properties:
|
|
input_rows:
|
|
type: array
|
|
items:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: The rows to score.
|
|
scoring_functions:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/ScoringFnParams'
|
|
- type: 'null'
|
|
description: >-
|
|
The scoring functions to use for the scoring.
|
|
additionalProperties: false
|
|
required:
|
|
- input_rows
|
|
- scoring_functions
|
|
title: ScoreRequest
|
|
ScoreResponse:
|
|
type: object
|
|
properties:
|
|
results:
|
|
type: object
|
|
additionalProperties:
|
|
$ref: '#/components/schemas/ScoringResult'
|
|
description: >-
|
|
A map of scoring function name to ScoringResult.
|
|
additionalProperties: false
|
|
required:
|
|
- results
|
|
title: ScoreResponse
|
|
description: The response from scoring.
|
|
ScoringResult:
|
|
type: object
|
|
properties:
|
|
score_rows:
|
|
type: array
|
|
items:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
The scoring result for each row. Each row is a map of column name to value.
|
|
aggregated_results:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: Map of metric name to aggregated value
|
|
additionalProperties: false
|
|
required:
|
|
- score_rows
|
|
- aggregated_results
|
|
title: ScoringResult
|
|
description: A scoring result for a single row.
|
|
ScoreBatchRequest:
|
|
type: object
|
|
properties:
|
|
dataset_id:
|
|
type: string
|
|
description: The ID of the dataset to score.
|
|
scoring_functions:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/ScoringFnParams'
|
|
- type: 'null'
|
|
description: >-
|
|
The scoring functions to use for the scoring.
|
|
save_results_dataset:
|
|
type: boolean
|
|
description: >-
|
|
Whether to save the results to a dataset.
|
|
additionalProperties: false
|
|
required:
|
|
- dataset_id
|
|
- scoring_functions
|
|
- save_results_dataset
|
|
title: ScoreBatchRequest
|
|
ScoreBatchResponse:
|
|
type: object
|
|
properties:
|
|
dataset_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) The identifier of the dataset that was scored
|
|
results:
|
|
type: object
|
|
additionalProperties:
|
|
$ref: '#/components/schemas/ScoringResult'
|
|
description: >-
|
|
A map of scoring function name to ScoringResult
|
|
additionalProperties: false
|
|
required:
|
|
- results
|
|
title: ScoreBatchResponse
|
|
description: >-
|
|
Response from batch scoring operations on datasets.
|
|
Shield:
|
|
type: object
|
|
properties:
|
|
identifier:
|
|
type: string
|
|
provider_resource_id:
|
|
type: string
|
|
provider_id:
|
|
type: string
|
|
type:
|
|
type: string
|
|
enum:
|
|
- model
|
|
- shield
|
|
- vector_store
|
|
- dataset
|
|
- scoring_function
|
|
- benchmark
|
|
- tool
|
|
- tool_group
|
|
- prompt
|
|
const: shield
|
|
default: shield
|
|
description: The resource type, always shield
|
|
params:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) Configuration parameters for the shield
|
|
additionalProperties: false
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
- type
|
|
title: Shield
|
|
description: >-
|
|
A safety shield resource that can be used to check content.
|
|
ListShieldsResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/Shield'
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: ListShieldsResponse
|
|
RegisterShieldRequest:
|
|
type: object
|
|
properties:
|
|
shield_id:
|
|
type: string
|
|
description: >-
|
|
The identifier of the shield to register.
|
|
provider_shield_id:
|
|
type: string
|
|
description: >-
|
|
The identifier of the shield in the provider.
|
|
provider_id:
|
|
type: string
|
|
description: The identifier of the provider.
|
|
params:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: The parameters of the shield.
|
|
additionalProperties: false
|
|
required:
|
|
- shield_id
|
|
title: RegisterShieldRequest
|
|
InvokeToolRequest:
|
|
type: object
|
|
properties:
|
|
tool_name:
|
|
type: string
|
|
description: The name of the tool to invoke.
|
|
kwargs:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
A dictionary of arguments to pass to the tool.
|
|
additionalProperties: false
|
|
required:
|
|
- tool_name
|
|
- kwargs
|
|
title: InvokeToolRequest
|
|
ImageContentItem:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: image
|
|
default: image
|
|
description: >-
|
|
Discriminator type of the content item. Always "image"
|
|
image:
|
|
type: object
|
|
properties:
|
|
url:
|
|
$ref: '#/components/schemas/URL'
|
|
description: >-
|
|
A URL of the image or data URL in the format of data:image/{type};base64,{data}.
|
|
Note that URL could have length limits.
|
|
data:
|
|
type: string
|
|
contentEncoding: base64
|
|
description: base64 encoded image data as string
|
|
additionalProperties: false
|
|
description: >-
|
|
Image as a base64 encoded string or an URL
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- image
|
|
title: ImageContentItem
|
|
description: A image content item
|
|
InterleavedContent:
|
|
oneOf:
|
|
- type: string
|
|
- $ref: '#/components/schemas/InterleavedContentItem'
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/InterleavedContentItem'
|
|
InterleavedContentItem:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem'
|
|
text: '#/components/schemas/TextContentItem'
|
|
TextContentItem:
|
|
type: object
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
default: text
|
|
description: >-
|
|
Discriminator type of the content item. Always "text"
|
|
text:
|
|
type: string
|
|
description: Text content
|
|
additionalProperties: false
|
|
required:
|
|
- type
|
|
- text
|
|
title: TextContentItem
|
|
description: A text content item
|
|
ToolInvocationResult:
|
|
type: object
|
|
properties:
|
|
content:
|
|
$ref: '#/components/schemas/InterleavedContent'
|
|
description: >-
|
|
(Optional) The output content from the tool execution
|
|
error_message:
|
|
type: string
|
|
description: >-
|
|
(Optional) Error message if the tool execution failed
|
|
error_code:
|
|
type: integer
|
|
description: >-
|
|
(Optional) Numeric error code if the tool execution failed
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) Additional metadata about the tool execution
|
|
additionalProperties: false
|
|
title: ToolInvocationResult
|
|
description: Result of a tool invocation.
|
|
URL:
|
|
type: object
|
|
properties:
|
|
uri:
|
|
type: string
|
|
description: The URL string pointing to the resource
|
|
additionalProperties: false
|
|
required:
|
|
- uri
|
|
title: URL
|
|
description: A URL reference to external content.
|
|
ToolDef:
|
|
type: object
|
|
properties:
|
|
toolgroup_id:
|
|
type: string
|
|
description: >-
|
|
(Optional) ID of the tool group this tool belongs to
|
|
name:
|
|
type: string
|
|
description: Name of the tool
|
|
description:
|
|
type: string
|
|
description: >-
|
|
(Optional) Human-readable description of what the tool does
|
|
input_schema:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) JSON Schema for tool inputs (MCP inputSchema)
|
|
output_schema:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) JSON Schema for tool outputs (MCP outputSchema)
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: >-
|
|
(Optional) Additional metadata about the tool
|
|
additionalProperties: false
|
|
required:
|
|
- name
|
|
title: ToolDef
|
|
description: >-
|
|
Tool definition used in runtime contexts.
|
|
ListToolDefsResponse:
|
|
type: object
|
|
properties:
|
|
data:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/ToolDef'
|
|
description: List of tool definitions
|
|
additionalProperties: false
|
|
required:
|
|
- data
|
|
title: ListToolDefsResponse
|
|
description: >-
|
|
Response containing a list of tool definitions.
|
|
RAGDocument:
|
|
type: object
|
|
properties:
|
|
document_id:
|
|
type: string
|
|
description: The unique identifier for the document.
|
|
content:
|
|
oneOf:
|
|
- type: string
|
|
- $ref: '#/components/schemas/InterleavedContentItem'
|
|
- type: array
|
|
items:
|
|
$ref: '#/components/schemas/InterleavedContentItem'
|
|
- $ref: '#/components/schemas/URL'
|
|
description: The content of the document.
|
|
mime_type:
|
|
type: string
|
|
description: The MIME type of the document.
|
|
metadata:
|
|
type: object
|
|
additionalProperties:
|
|
oneOf:
|
|
- type: 'null'
|
|
- type: boolean
|
|
- type: number
|
|
- type: string
|
|
- type: array
|
|
- type: object
|
|
description: Additional metadata for the document.
|
|
additionalProperties: false
|
|
required:
|
|
- document_id
|
|
- content
|
|
- metadata
|
|
title: RAGDocument
|
|
description: >-
|
|
A document to be used for document ingestion in the RAG Tool.
|
|
InsertRequest:
|
|
type: object
|
|
properties:
|
|
documents:
|
|
type: array
|
|
items:
|
|
$ref: '#/components/schemas/RAGDocument'
|
|
description: >-
|
|
List of documents to index in the RAG system
|
|
vector_store_id:
|
|
type: string
|
|
description: >-
|
|
ID of the vector database to store the document embeddings
|
|
chunk_size_in_tokens:
|
|
type: integer
|
|
description: >-
|
|
(Optional) Size in tokens for document chunking during indexing
|
|
additionalProperties: false
|
|
required:
|
|
- documents
|
|
- vector_store_id
|
|
- chunk_size_in_tokens
|
|
title: InsertRequest
|
|
DefaultRAGQueryGeneratorConfig:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: default
|
|
title: Type
|
|
default: default
|
|
separator:
|
|
type: string
|
|
title: Separator
|
|
default: ' '
|
|
type: object
|
|
title: DefaultRAGQueryGeneratorConfig
|
|
description: 'Configuration for the default RAG query generator.
|
|
|
|
|
|
:param type: Type of query generator, always ''default''
|
|
|
|
:param separator: String separator used to join query terms'
|
|
Errors:
|
|
properties:
|
|
data:
|
|
title: Data
|
|
items:
|
|
$ref: '#/components/schemas/BatchError'
|
|
type: array
|
|
object:
|
|
title: Object
|
|
type: string
|
|
additionalProperties: true
|
|
type: object
|
|
title: Errors
|
|
HealthInfo:
|
|
properties:
|
|
status:
|
|
$ref: '#/components/schemas/HealthStatus'
|
|
type: object
|
|
required:
|
|
- status
|
|
title: HealthInfo
|
|
description: 'Health status information for the service.
|
|
|
|
|
|
:param status: Current health status of the service'
|
|
HealthStatus:
|
|
type: string
|
|
enum:
|
|
- OK
|
|
- Error
|
|
- Not Implemented
|
|
title: HealthStatus
|
|
ImageContentItem-Output:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: image
|
|
title: Type
|
|
default: image
|
|
image:
|
|
$ref: '#/components/schemas/_URLOrData'
|
|
type: object
|
|
required:
|
|
- image
|
|
title: ImageContentItem
|
|
description: 'A image content item
|
|
|
|
|
|
:param type: Discriminator type of the content item. Always "image"
|
|
|
|
:param image: Image as a base64 encoded string or an URL'
|
|
InputTokensDetails:
|
|
properties:
|
|
cached_tokens:
|
|
type: integer
|
|
title: Cached Tokens
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- cached_tokens
|
|
title: InputTokensDetails
|
|
JsonType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: json
|
|
title: Type
|
|
default: json
|
|
type: object
|
|
title: JsonType
|
|
description: 'Parameter type for JSON values.
|
|
|
|
|
|
:param type: Discriminator type. Always "json"'
|
|
LLMAsJudgeScoringFnParams:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: llm_as_judge
|
|
title: Type
|
|
default: llm_as_judge
|
|
judge_model:
|
|
type: string
|
|
title: Judge Model
|
|
prompt_template:
|
|
title: Prompt Template
|
|
type: string
|
|
judge_score_regexes:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: Judge Score Regexes
|
|
description: Regexes to extract the answer from generated response
|
|
aggregation_functions:
|
|
items:
|
|
$ref: '#/components/schemas/AggregationFunctionType'
|
|
type: array
|
|
title: Aggregation Functions
|
|
description: Aggregation functions to apply to the scores of each row
|
|
type: object
|
|
required:
|
|
- judge_model
|
|
title: LLMAsJudgeScoringFnParams
|
|
description: 'Parameters for LLM-as-judge scoring function configuration.
|
|
|
|
:param type: The type of scoring function parameters, always llm_as_judge
|
|
|
|
:param judge_model: Identifier of the LLM model to use as a judge for scoring
|
|
|
|
:param prompt_template: (Optional) Custom prompt template for the judge model
|
|
|
|
:param judge_score_regexes: Regexes to extract the answer from generated response
|
|
|
|
:param aggregation_functions: Aggregation functions to apply to the scores
|
|
of each row'
|
|
LLMRAGQueryGeneratorConfig:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: llm
|
|
title: Type
|
|
default: llm
|
|
model:
|
|
type: string
|
|
title: Model
|
|
template:
|
|
type: string
|
|
title: Template
|
|
type: object
|
|
required:
|
|
- model
|
|
- template
|
|
title: LLMRAGQueryGeneratorConfig
|
|
description: 'Configuration for the LLM-based RAG query generator.
|
|
|
|
|
|
:param type: Type of query generator, always ''llm''
|
|
|
|
:param model: Name of the language model to use for query generation
|
|
|
|
:param template: Template string for formatting the query generation prompt'
|
|
ListModelsResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/Model'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListModelsResponse
|
|
ListPromptsResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/Prompt'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListPromptsResponse
|
|
description: Response model to list prompts.
|
|
ListProvidersResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/ProviderInfo'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListProvidersResponse
|
|
description: 'Response containing a list of all available providers.
|
|
|
|
|
|
:param data: List of provider information objects'
|
|
ListRoutesResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/RouteInfo'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListRoutesResponse
|
|
description: 'Response containing a list of all available API routes.
|
|
|
|
|
|
:param data: List of available route information objects'
|
|
ListScoringFunctionsResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/ScoringFn-Output'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListScoringFunctionsResponse
|
|
ListShieldsResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/Shield'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListShieldsResponse
|
|
ListToolGroupsResponse:
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/ToolGroup'
|
|
type: array
|
|
title: Data
|
|
type: object
|
|
required:
|
|
- data
|
|
title: ListToolGroupsResponse
|
|
description: 'Response containing a list of tool groups.
|
|
|
|
|
|
:param data: List of tool groups'
|
|
MCPListToolsTool:
|
|
properties:
|
|
input_schema:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Input Schema
|
|
name:
|
|
type: string
|
|
title: Name
|
|
description:
|
|
title: Description
|
|
type: string
|
|
type: object
|
|
required:
|
|
- input_schema
|
|
- name
|
|
title: MCPListToolsTool
|
|
description: 'Tool definition returned by MCP list tools operation.
|
|
|
|
|
|
:param input_schema: JSON schema defining the tool''s input parameters
|
|
|
|
:param name: Name of the tool
|
|
|
|
:param description: (Optional) Description of what the tool does'
|
|
Model:
|
|
properties:
|
|
identifier:
|
|
type: string
|
|
title: Identifier
|
|
description: Unique identifier for this resource in llama stack
|
|
provider_resource_id:
|
|
title: Provider Resource Id
|
|
description: Unique identifier for this resource in the provider
|
|
type: string
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
description: ID of the provider that owns this resource
|
|
type:
|
|
type: string
|
|
const: model
|
|
title: Type
|
|
default: model
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
description: Any additional metadata for this model
|
|
model_type:
|
|
$ref: '#/components/schemas/ModelType'
|
|
default: llm
|
|
type: object
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
title: Model
|
|
description: 'A model resource representing an AI model registered in Llama
|
|
Stack.
|
|
|
|
|
|
:param type: The resource type, always ''model'' for model resources
|
|
|
|
:param model_type: The type of model (LLM or embedding model)
|
|
|
|
:param metadata: Any additional metadata for this model
|
|
|
|
:param identifier: Unique identifier for this resource in llama stack
|
|
|
|
:param provider_resource_id: Unique identifier for this resource in the provider
|
|
|
|
:param provider_id: ID of the provider that owns this resource'
|
|
ModelType:
|
|
type: string
|
|
enum:
|
|
- llm
|
|
- embedding
|
|
- rerank
|
|
title: ModelType
|
|
description: 'Enumeration of supported model types in Llama Stack.
|
|
|
|
:cvar llm: Large language model for text generation and completion
|
|
|
|
:cvar embedding: Embedding model for converting text to vector representations
|
|
|
|
:cvar rerank: Reranking model for reordering documents based on their relevance
|
|
to a query'
|
|
ModerationObject:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
model:
|
|
type: string
|
|
title: Model
|
|
results:
|
|
items:
|
|
$ref: '#/components/schemas/ModerationObjectResults'
|
|
type: array
|
|
title: Results
|
|
type: object
|
|
required:
|
|
- id
|
|
- model
|
|
- results
|
|
title: ModerationObject
|
|
description: 'A moderation object.
|
|
|
|
:param id: The unique identifier for the moderation request.
|
|
|
|
:param model: The model used to generate the moderation results.
|
|
|
|
:param results: A list of moderation objects'
|
|
ModerationObjectResults:
|
|
properties:
|
|
flagged:
|
|
type: boolean
|
|
title: Flagged
|
|
categories:
|
|
title: Categories
|
|
additionalProperties:
|
|
type: boolean
|
|
type: object
|
|
category_applied_input_types:
|
|
title: Category Applied Input Types
|
|
additionalProperties:
|
|
items:
|
|
type: string
|
|
type: array
|
|
type: object
|
|
category_scores:
|
|
title: Category Scores
|
|
additionalProperties:
|
|
type: number
|
|
type: object
|
|
user_message:
|
|
title: User Message
|
|
type: string
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
type: object
|
|
required:
|
|
- flagged
|
|
title: ModerationObjectResults
|
|
description: 'A moderation object.
|
|
|
|
:param flagged: Whether any of the below categories are flagged.
|
|
|
|
:param categories: A list of the categories, and whether they are flagged
|
|
or not.
|
|
|
|
:param category_applied_input_types: A list of the categories along with the
|
|
input type(s) that the score applies to.
|
|
|
|
:param category_scores: A list of the categories along with their scores as
|
|
predicted by model.'
|
|
NumberType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: number
|
|
title: Type
|
|
default: number
|
|
type: object
|
|
title: NumberType
|
|
description: 'Parameter type for numeric values.
|
|
|
|
|
|
:param type: Discriminator type. Always "number"'
|
|
ObjectType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: object
|
|
title: Type
|
|
default: object
|
|
type: object
|
|
title: ObjectType
|
|
description: 'Parameter type for object values.
|
|
|
|
|
|
:param type: Discriminator type. Always "object"'
|
|
OpenAIAssistantMessageParam-Input:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: assistant
|
|
title: Role
|
|
default: assistant
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
tool_calls:
|
|
title: Tool Calls
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCall'
|
|
type: array
|
|
type: object
|
|
title: OpenAIAssistantMessageParam
|
|
description: 'A message containing the model''s (assistant) response in an OpenAI-compatible
|
|
chat completion request.
|
|
|
|
|
|
:param role: Must be "assistant" to identify this as the model''s response
|
|
|
|
:param content: The content of the model''s response
|
|
|
|
:param name: (Optional) The name of the assistant message participant.
|
|
|
|
:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall
|
|
object.'
|
|
OpenAIAssistantMessageParam-Output:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: assistant
|
|
title: Role
|
|
default: assistant
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
tool_calls:
|
|
title: Tool Calls
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCall'
|
|
type: array
|
|
type: object
|
|
title: OpenAIAssistantMessageParam
|
|
description: 'A message containing the model''s (assistant) response in an OpenAI-compatible
|
|
chat completion request.
|
|
|
|
|
|
:param role: Must be "assistant" to identify this as the model''s response
|
|
|
|
:param content: The content of the model''s response
|
|
|
|
:param name: (Optional) The name of the assistant message participant.
|
|
|
|
:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall
|
|
object.'
|
|
OpenAIChatCompletion:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
choices:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChoice-Output'
|
|
type: array
|
|
title: Choices
|
|
object:
|
|
type: string
|
|
const: chat.completion
|
|
title: Object
|
|
default: chat.completion
|
|
created:
|
|
type: integer
|
|
title: Created
|
|
model:
|
|
type: string
|
|
title: Model
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsage'
|
|
type: object
|
|
required:
|
|
- id
|
|
- choices
|
|
- created
|
|
- model
|
|
title: OpenAIChatCompletion
|
|
description: 'Response from an OpenAI-compatible chat completion request.
|
|
|
|
|
|
:param id: The ID of the chat completion
|
|
|
|
:param choices: List of choices
|
|
|
|
:param object: The object type, which will be "chat.completion"
|
|
|
|
:param created: The Unix timestamp in seconds when the chat completion was
|
|
created
|
|
|
|
:param model: The model that was used to generate the chat completion
|
|
|
|
:param usage: Token usage information for the completion'
|
|
OpenAIChatCompletionContentPartImageParam:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: image_url
|
|
title: Type
|
|
default: image_url
|
|
image_url:
|
|
$ref: '#/components/schemas/OpenAIImageURL'
|
|
type: object
|
|
required:
|
|
- image_url
|
|
title: OpenAIChatCompletionContentPartImageParam
|
|
description: 'Image content part for OpenAI-compatible chat completion messages.
|
|
|
|
|
|
:param type: Must be "image_url" to identify this as image content
|
|
|
|
:param image_url: Image URL specification and processing details'
|
|
OpenAIChatCompletionContentPartTextParam:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
title: Type
|
|
default: text
|
|
text:
|
|
type: string
|
|
title: Text
|
|
type: object
|
|
required:
|
|
- text
|
|
title: OpenAIChatCompletionContentPartTextParam
|
|
description: 'Text content part for OpenAI-compatible chat completion messages.
|
|
|
|
|
|
:param type: Must be "text" to identify this as text content
|
|
|
|
:param text: The text content of the message'
|
|
OpenAIChatCompletionRequestWithExtraBody:
|
|
properties:
|
|
model:
|
|
type: string
|
|
title: Model
|
|
messages:
|
|
items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam-Input'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam-Input'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
discriminator:
|
|
propertyName: role
|
|
mapping:
|
|
assistant: '#/components/schemas/OpenAIAssistantMessageParam-Input'
|
|
developer: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
system: '#/components/schemas/OpenAISystemMessageParam'
|
|
tool: '#/components/schemas/OpenAIToolMessageParam'
|
|
user: '#/components/schemas/OpenAIUserMessageParam-Input'
|
|
type: array
|
|
minItems: 1
|
|
title: Messages
|
|
frequency_penalty:
|
|
title: Frequency Penalty
|
|
type: number
|
|
function_call:
|
|
anyOf:
|
|
- type: string
|
|
- additionalProperties: true
|
|
type: object
|
|
title: Function Call
|
|
functions:
|
|
title: Functions
|
|
items:
|
|
additionalProperties: true
|
|
type: object
|
|
type: array
|
|
logit_bias:
|
|
title: Logit Bias
|
|
additionalProperties:
|
|
type: number
|
|
type: object
|
|
logprobs:
|
|
title: Logprobs
|
|
type: boolean
|
|
max_completion_tokens:
|
|
title: Max Completion Tokens
|
|
type: integer
|
|
max_tokens:
|
|
title: Max Tokens
|
|
type: integer
|
|
n:
|
|
title: N
|
|
type: integer
|
|
parallel_tool_calls:
|
|
title: Parallel Tool Calls
|
|
type: boolean
|
|
presence_penalty:
|
|
title: Presence Penalty
|
|
type: number
|
|
response_format:
|
|
title: Response Format
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseFormatText'
|
|
- $ref: '#/components/schemas/OpenAIResponseFormatJSONSchema'
|
|
- $ref: '#/components/schemas/OpenAIResponseFormatJSONObject'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
json_object: '#/components/schemas/OpenAIResponseFormatJSONObject'
|
|
json_schema: '#/components/schemas/OpenAIResponseFormatJSONSchema'
|
|
text: '#/components/schemas/OpenAIResponseFormatText'
|
|
seed:
|
|
title: Seed
|
|
type: integer
|
|
stop:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
type: string
|
|
type: array
|
|
title: Stop
|
|
stream:
|
|
title: Stream
|
|
type: boolean
|
|
stream_options:
|
|
title: Stream Options
|
|
additionalProperties: true
|
|
type: object
|
|
temperature:
|
|
title: Temperature
|
|
type: number
|
|
tool_choice:
|
|
anyOf:
|
|
- type: string
|
|
- additionalProperties: true
|
|
type: object
|
|
title: Tool Choice
|
|
tools:
|
|
title: Tools
|
|
items:
|
|
additionalProperties: true
|
|
type: object
|
|
type: array
|
|
top_logprobs:
|
|
title: Top Logprobs
|
|
type: integer
|
|
top_p:
|
|
title: Top P
|
|
type: number
|
|
user:
|
|
title: User
|
|
type: string
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- model
|
|
- messages
|
|
title: OpenAIChatCompletionRequestWithExtraBody
|
|
description: 'Request parameters for OpenAI-compatible chat completion endpoint.
|
|
|
|
|
|
:param model: The identifier of the model to use. The model must be registered
|
|
with Llama Stack and available via the /models endpoint.
|
|
|
|
:param messages: List of messages in the conversation.
|
|
|
|
:param frequency_penalty: (Optional) The penalty for repeated tokens.
|
|
|
|
:param function_call: (Optional) The function call to use.
|
|
|
|
:param functions: (Optional) List of functions to use.
|
|
|
|
:param logit_bias: (Optional) The logit bias to use.
|
|
|
|
:param logprobs: (Optional) The log probabilities to use.
|
|
|
|
:param max_completion_tokens: (Optional) The maximum number of tokens to generate.
|
|
|
|
:param max_tokens: (Optional) The maximum number of tokens to generate.
|
|
|
|
:param n: (Optional) The number of completions to generate.
|
|
|
|
:param parallel_tool_calls: (Optional) Whether to parallelize tool calls.
|
|
|
|
:param presence_penalty: (Optional) The penalty for repeated tokens.
|
|
|
|
:param response_format: (Optional) The response format to use.
|
|
|
|
:param seed: (Optional) The seed to use.
|
|
|
|
:param stop: (Optional) The stop tokens to use.
|
|
|
|
:param stream: (Optional) Whether to stream the response.
|
|
|
|
:param stream_options: (Optional) The stream options to use.
|
|
|
|
:param temperature: (Optional) The temperature to use.
|
|
|
|
:param tool_choice: (Optional) The tool choice to use.
|
|
|
|
:param tools: (Optional) The tools to use.
|
|
|
|
:param top_logprobs: (Optional) The top log probabilities to use.
|
|
|
|
:param top_p: (Optional) The top p to use.
|
|
|
|
:param user: (Optional) The user to use.'
|
|
OpenAIChatCompletionToolCall:
|
|
properties:
|
|
index:
|
|
title: Index
|
|
type: integer
|
|
id:
|
|
title: Id
|
|
type: string
|
|
type:
|
|
type: string
|
|
const: function
|
|
title: Type
|
|
default: function
|
|
function:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction'
|
|
type: object
|
|
title: OpenAIChatCompletionToolCall
|
|
description: 'Tool call specification for OpenAI-compatible chat completion
|
|
responses.
|
|
|
|
|
|
:param index: (Optional) Index of the tool call in the list
|
|
|
|
:param id: (Optional) Unique identifier for the tool call
|
|
|
|
:param type: Must be "function" to identify this as a function call
|
|
|
|
:param function: (Optional) Function call details'
|
|
OpenAIChatCompletionToolCallFunction:
|
|
properties:
|
|
name:
|
|
title: Name
|
|
type: string
|
|
arguments:
|
|
title: Arguments
|
|
type: string
|
|
type: object
|
|
title: OpenAIChatCompletionToolCallFunction
|
|
description: 'Function call details for OpenAI-compatible tool calls.
|
|
|
|
|
|
:param name: (Optional) Name of the function to call
|
|
|
|
:param arguments: (Optional) Arguments to pass to the function as a JSON string'
|
|
OpenAIChatCompletionUsage:
|
|
properties:
|
|
prompt_tokens:
|
|
type: integer
|
|
title: Prompt Tokens
|
|
completion_tokens:
|
|
type: integer
|
|
title: Completion Tokens
|
|
total_tokens:
|
|
type: integer
|
|
title: Total Tokens
|
|
prompt_tokens_details:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsagePromptTokensDetails'
|
|
completion_tokens_details:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsageCompletionTokensDetails'
|
|
type: object
|
|
required:
|
|
- prompt_tokens
|
|
- completion_tokens
|
|
- total_tokens
|
|
title: OpenAIChatCompletionUsage
|
|
description: 'Usage information for OpenAI chat completion.
|
|
|
|
|
|
:param prompt_tokens: Number of tokens in the prompt
|
|
|
|
:param completion_tokens: Number of tokens in the completion
|
|
|
|
:param total_tokens: Total tokens used (prompt + completion)
|
|
|
|
:param input_tokens_details: Detailed breakdown of input token usage
|
|
|
|
:param output_tokens_details: Detailed breakdown of output token usage'
|
|
OpenAIChatCompletionUsageCompletionTokensDetails:
|
|
properties:
|
|
reasoning_tokens:
|
|
title: Reasoning Tokens
|
|
type: integer
|
|
type: object
|
|
title: OpenAIChatCompletionUsageCompletionTokensDetails
|
|
description: 'Token details for output tokens in OpenAI chat completion usage.
|
|
|
|
|
|
:param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)'
|
|
OpenAIChatCompletionUsagePromptTokensDetails:
|
|
properties:
|
|
cached_tokens:
|
|
title: Cached Tokens
|
|
type: integer
|
|
type: object
|
|
title: OpenAIChatCompletionUsagePromptTokensDetails
|
|
description: 'Token details for prompt tokens in OpenAI chat completion usage.
|
|
|
|
|
|
:param cached_tokens: Number of tokens retrieved from cache'
|
|
OpenAIChoice-Output:
|
|
properties:
|
|
message:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam-Output'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam-Output'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
title: Message
|
|
discriminator:
|
|
propertyName: role
|
|
mapping:
|
|
assistant: '#/components/schemas/OpenAIAssistantMessageParam-Output'
|
|
developer: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
system: '#/components/schemas/OpenAISystemMessageParam'
|
|
tool: '#/components/schemas/OpenAIToolMessageParam'
|
|
user: '#/components/schemas/OpenAIUserMessageParam-Output'
|
|
finish_reason:
|
|
type: string
|
|
title: Finish Reason
|
|
index:
|
|
type: integer
|
|
title: Index
|
|
logprobs:
|
|
$ref: '#/components/schemas/OpenAIChoiceLogprobs-Output'
|
|
type: object
|
|
required:
|
|
- message
|
|
- finish_reason
|
|
- index
|
|
title: OpenAIChoice
|
|
description: 'A choice from an OpenAI-compatible chat completion response.
|
|
|
|
|
|
:param message: The message from the model
|
|
|
|
:param finish_reason: The reason the model stopped generating
|
|
|
|
:param index: The index of the choice
|
|
|
|
:param logprobs: (Optional) The log probabilities for the tokens in the message'
|
|
OpenAIChoiceLogprobs-Output:
|
|
properties:
|
|
content:
|
|
title: Content
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITokenLogProb'
|
|
type: array
|
|
refusal:
|
|
title: Refusal
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITokenLogProb'
|
|
type: array
|
|
type: object
|
|
title: OpenAIChoiceLogprobs
|
|
description: 'The log probabilities for the tokens in the message from an OpenAI-compatible
|
|
chat completion response.
|
|
|
|
|
|
:param content: (Optional) The log probabilities for the tokens in the message
|
|
|
|
:param refusal: (Optional) The log probabilities for the tokens in the message'
|
|
OpenAICompletion:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
choices:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAICompletionChoice-Output'
|
|
type: array
|
|
title: Choices
|
|
created:
|
|
type: integer
|
|
title: Created
|
|
model:
|
|
type: string
|
|
title: Model
|
|
object:
|
|
type: string
|
|
const: text_completion
|
|
title: Object
|
|
default: text_completion
|
|
type: object
|
|
required:
|
|
- id
|
|
- choices
|
|
- created
|
|
- model
|
|
title: OpenAICompletion
|
|
description: 'Response from an OpenAI-compatible completion request.
|
|
|
|
|
|
:id: The ID of the completion
|
|
|
|
:choices: List of choices
|
|
|
|
:created: The Unix timestamp in seconds when the completion was created
|
|
|
|
:model: The model that was used to generate the completion
|
|
|
|
:object: The object type, which will be "text_completion"'
|
|
OpenAICompletionChoice-Output:
|
|
properties:
|
|
finish_reason:
|
|
type: string
|
|
title: Finish Reason
|
|
text:
|
|
type: string
|
|
title: Text
|
|
index:
|
|
type: integer
|
|
title: Index
|
|
logprobs:
|
|
$ref: '#/components/schemas/OpenAIChoiceLogprobs-Output'
|
|
type: object
|
|
required:
|
|
- finish_reason
|
|
- text
|
|
- index
|
|
title: OpenAICompletionChoice
|
|
description: 'A choice from an OpenAI-compatible completion response.
|
|
|
|
|
|
:finish_reason: The reason the model stopped generating
|
|
|
|
:text: The text of the choice
|
|
|
|
:index: The index of the choice
|
|
|
|
:logprobs: (Optional) The log probabilities for the tokens in the choice'
|
|
OpenAICompletionRequestWithExtraBody:
|
|
properties:
|
|
model:
|
|
type: string
|
|
title: Model
|
|
prompt:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
type: string
|
|
type: array
|
|
- items:
|
|
type: integer
|
|
type: array
|
|
- items:
|
|
items:
|
|
type: integer
|
|
type: array
|
|
type: array
|
|
title: Prompt
|
|
best_of:
|
|
title: Best Of
|
|
type: integer
|
|
echo:
|
|
title: Echo
|
|
type: boolean
|
|
frequency_penalty:
|
|
title: Frequency Penalty
|
|
type: number
|
|
logit_bias:
|
|
title: Logit Bias
|
|
additionalProperties:
|
|
type: number
|
|
type: object
|
|
logprobs:
|
|
title: Logprobs
|
|
type: boolean
|
|
max_tokens:
|
|
title: Max Tokens
|
|
type: integer
|
|
n:
|
|
title: N
|
|
type: integer
|
|
presence_penalty:
|
|
title: Presence Penalty
|
|
type: number
|
|
seed:
|
|
title: Seed
|
|
type: integer
|
|
stop:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
type: string
|
|
type: array
|
|
title: Stop
|
|
stream:
|
|
title: Stream
|
|
type: boolean
|
|
stream_options:
|
|
title: Stream Options
|
|
additionalProperties: true
|
|
type: object
|
|
temperature:
|
|
title: Temperature
|
|
type: number
|
|
top_p:
|
|
title: Top P
|
|
type: number
|
|
user:
|
|
title: User
|
|
type: string
|
|
suffix:
|
|
title: Suffix
|
|
type: string
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- model
|
|
- prompt
|
|
title: OpenAICompletionRequestWithExtraBody
|
|
description: 'Request parameters for OpenAI-compatible completion endpoint.
|
|
|
|
|
|
:param model: The identifier of the model to use. The model must be registered
|
|
with Llama Stack and available via the /models endpoint.
|
|
|
|
:param prompt: The prompt to generate a completion for.
|
|
|
|
:param best_of: (Optional) The number of completions to generate.
|
|
|
|
:param echo: (Optional) Whether to echo the prompt.
|
|
|
|
:param frequency_penalty: (Optional) The penalty for repeated tokens.
|
|
|
|
:param logit_bias: (Optional) The logit bias to use.
|
|
|
|
:param logprobs: (Optional) The log probabilities to use.
|
|
|
|
:param max_tokens: (Optional) The maximum number of tokens to generate.
|
|
|
|
:param n: (Optional) The number of completions to generate.
|
|
|
|
:param presence_penalty: (Optional) The penalty for repeated tokens.
|
|
|
|
:param seed: (Optional) The seed to use.
|
|
|
|
:param stop: (Optional) The stop tokens to use.
|
|
|
|
:param stream: (Optional) Whether to stream the response.
|
|
|
|
:param stream_options: (Optional) The stream options to use.
|
|
|
|
:param temperature: (Optional) The temperature to use.
|
|
|
|
:param top_p: (Optional) The top p to use.
|
|
|
|
:param user: (Optional) The user to use.
|
|
|
|
:param suffix: (Optional) The suffix that should be appended to the completion.'
|
|
OpenAICreateVectorStoreFileBatchRequestWithExtraBody:
|
|
properties:
|
|
file_ids:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: File Ids
|
|
attributes:
|
|
title: Attributes
|
|
additionalProperties: true
|
|
type: object
|
|
chunking_strategy:
|
|
title: Chunking Strategy
|
|
oneOf:
|
|
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
|
|
- $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
auto: '#/components/schemas/VectorStoreChunkingStrategyAuto'
|
|
static: '#/components/schemas/VectorStoreChunkingStrategyStatic'
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- file_ids
|
|
title: OpenAICreateVectorStoreFileBatchRequestWithExtraBody
|
|
description: 'Request to create a vector store file batch with extra_body support.
|
|
|
|
|
|
:param file_ids: A list of File IDs that the vector store should use
|
|
|
|
:param attributes: (Optional) Key-value attributes to store with the files
|
|
|
|
:param chunking_strategy: (Optional) The chunking strategy used to chunk the
|
|
file(s). Defaults to auto'
|
|
OpenAICreateVectorStoreRequestWithExtraBody:
|
|
properties:
|
|
name:
|
|
title: Name
|
|
type: string
|
|
file_ids:
|
|
title: File Ids
|
|
items:
|
|
type: string
|
|
type: array
|
|
expires_after:
|
|
title: Expires After
|
|
additionalProperties: true
|
|
type: object
|
|
chunking_strategy:
|
|
title: Chunking Strategy
|
|
additionalProperties: true
|
|
type: object
|
|
metadata:
|
|
title: Metadata
|
|
additionalProperties: true
|
|
type: object
|
|
additionalProperties: true
|
|
type: object
|
|
title: OpenAICreateVectorStoreRequestWithExtraBody
|
|
description: 'Request to create a vector store with extra_body support.
|
|
|
|
|
|
:param name: (Optional) A name for the vector store
|
|
|
|
:param file_ids: List of file IDs to include in the vector store
|
|
|
|
:param expires_after: (Optional) Expiration policy for the vector store
|
|
|
|
:param chunking_strategy: (Optional) Strategy for splitting files into chunks
|
|
|
|
:param metadata: Set of key-value pairs that can be attached to the vector
|
|
store'
|
|
OpenAIDeveloperMessageParam:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: developer
|
|
title: Role
|
|
default: developer
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
type: object
|
|
required:
|
|
- content
|
|
title: OpenAIDeveloperMessageParam
|
|
description: 'A message from the developer in an OpenAI-compatible chat completion
|
|
request.
|
|
|
|
|
|
:param role: Must be "developer" to identify this as a developer message
|
|
|
|
:param content: The content of the developer message
|
|
|
|
:param name: (Optional) The name of the developer message participant.'
|
|
OpenAIEmbeddingData:
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: embedding
|
|
title: Object
|
|
default: embedding
|
|
embedding:
|
|
anyOf:
|
|
- items:
|
|
type: number
|
|
type: array
|
|
- type: string
|
|
title: Embedding
|
|
index:
|
|
type: integer
|
|
title: Index
|
|
type: object
|
|
required:
|
|
- embedding
|
|
- index
|
|
title: OpenAIEmbeddingData
|
|
description: 'A single embedding data object from an OpenAI-compatible embeddings
|
|
response.
|
|
|
|
|
|
:param object: The object type, which will be "embedding"
|
|
|
|
:param embedding: The embedding vector as a list of floats (when encoding_format="float")
|
|
or as a base64-encoded string (when encoding_format="base64")
|
|
|
|
:param index: The index of the embedding in the input list'
|
|
OpenAIEmbeddingUsage:
|
|
properties:
|
|
prompt_tokens:
|
|
type: integer
|
|
title: Prompt Tokens
|
|
total_tokens:
|
|
type: integer
|
|
title: Total Tokens
|
|
type: object
|
|
required:
|
|
- prompt_tokens
|
|
- total_tokens
|
|
title: OpenAIEmbeddingUsage
|
|
description: 'Usage information for an OpenAI-compatible embeddings response.
|
|
|
|
|
|
:param prompt_tokens: The number of tokens in the input
|
|
|
|
:param total_tokens: The total number of tokens used'
|
|
OpenAIEmbeddingsRequestWithExtraBody:
|
|
properties:
|
|
model:
|
|
type: string
|
|
title: Model
|
|
input:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
type: string
|
|
type: array
|
|
title: Input
|
|
encoding_format:
|
|
title: Encoding Format
|
|
default: float
|
|
type: string
|
|
dimensions:
|
|
title: Dimensions
|
|
type: integer
|
|
user:
|
|
title: User
|
|
type: string
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- model
|
|
- input
|
|
title: OpenAIEmbeddingsRequestWithExtraBody
|
|
description: 'Request parameters for OpenAI-compatible embeddings endpoint.
|
|
|
|
|
|
:param model: The identifier of the model to use. The model must be an embedding
|
|
model registered with Llama Stack and available via the /models endpoint.
|
|
|
|
:param input: Input text to embed, encoded as a string or array of strings.
|
|
To embed multiple inputs in a single request, pass an array of strings.
|
|
|
|
:param encoding_format: (Optional) The format to return the embeddings in.
|
|
Can be either "float" or "base64". Defaults to "float".
|
|
|
|
:param dimensions: (Optional) The number of dimensions the resulting output
|
|
embeddings should have. Only supported in text-embedding-3 and later models.
|
|
|
|
:param user: (Optional) A unique identifier representing your end-user, which
|
|
can help OpenAI to monitor and detect abuse.'
|
|
OpenAIEmbeddingsResponse:
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: list
|
|
title: Object
|
|
default: list
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIEmbeddingData'
|
|
type: array
|
|
title: Data
|
|
model:
|
|
type: string
|
|
title: Model
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIEmbeddingUsage'
|
|
type: object
|
|
required:
|
|
- data
|
|
- model
|
|
- usage
|
|
title: OpenAIEmbeddingsResponse
|
|
description: 'Response from an OpenAI-compatible embeddings request.
|
|
|
|
|
|
:param object: The object type, which will be "list"
|
|
|
|
:param data: List of embedding data objects
|
|
|
|
:param model: The model that was used to generate the embeddings
|
|
|
|
:param usage: Usage information'
|
|
OpenAIFile:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file
|
|
title: Type
|
|
default: file
|
|
file:
|
|
$ref: '#/components/schemas/OpenAIFileFile'
|
|
type: object
|
|
required:
|
|
- file
|
|
title: OpenAIFile
|
|
OpenAIFileFile:
|
|
properties:
|
|
file_data:
|
|
title: File Data
|
|
type: string
|
|
file_id:
|
|
title: File Id
|
|
type: string
|
|
filename:
|
|
title: Filename
|
|
type: string
|
|
type: object
|
|
title: OpenAIFileFile
|
|
OpenAIFileObject:
|
|
properties:
|
|
object:
|
|
type: string
|
|
const: file
|
|
title: Object
|
|
default: file
|
|
id:
|
|
type: string
|
|
title: Id
|
|
bytes:
|
|
type: integer
|
|
title: Bytes
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
expires_at:
|
|
type: integer
|
|
title: Expires At
|
|
filename:
|
|
type: string
|
|
title: Filename
|
|
purpose:
|
|
$ref: '#/components/schemas/OpenAIFilePurpose'
|
|
type: object
|
|
required:
|
|
- id
|
|
- bytes
|
|
- created_at
|
|
- expires_at
|
|
- filename
|
|
- purpose
|
|
title: OpenAIFileObject
|
|
description: 'OpenAI File object as defined in the OpenAI Files API.
|
|
|
|
|
|
:param object: The object type, which is always "file"
|
|
|
|
:param id: The file identifier, which can be referenced in the API endpoints
|
|
|
|
:param bytes: The size of the file, in bytes
|
|
|
|
:param created_at: The Unix timestamp (in seconds) for when the file was created
|
|
|
|
:param expires_at: The Unix timestamp (in seconds) for when the file expires
|
|
|
|
:param filename: The name of the file
|
|
|
|
:param purpose: The intended purpose of the file'
|
|
OpenAIFilePurpose:
|
|
type: string
|
|
enum:
|
|
- assistants
|
|
- batch
|
|
title: OpenAIFilePurpose
|
|
description: Valid purpose values for OpenAI Files API.
|
|
OpenAIImageURL:
|
|
properties:
|
|
url:
|
|
type: string
|
|
title: Url
|
|
detail:
|
|
title: Detail
|
|
type: string
|
|
type: object
|
|
required:
|
|
- url
|
|
title: OpenAIImageURL
|
|
description: 'Image URL specification for OpenAI-compatible chat completion
|
|
messages.
|
|
|
|
|
|
:param url: URL of the image to include in the message
|
|
|
|
:param detail: (Optional) Level of detail for image processing. Can be "low",
|
|
"high", or "auto"'
|
|
OpenAIJSONSchema:
|
|
properties:
|
|
name:
|
|
type: string
|
|
title: Name
|
|
description:
|
|
title: Description
|
|
type: string
|
|
strict:
|
|
title: Strict
|
|
type: boolean
|
|
schema:
|
|
title: Schema
|
|
additionalProperties: true
|
|
type: object
|
|
type: object
|
|
title: OpenAIJSONSchema
|
|
description: 'JSON schema specification for OpenAI-compatible structured response
|
|
format.
|
|
|
|
|
|
:param name: Name of the schema
|
|
|
|
:param description: (Optional) Description of the schema
|
|
|
|
:param strict: (Optional) Whether to enforce strict adherence to the schema
|
|
|
|
:param schema: (Optional) The JSON schema definition'
|
|
OpenAIResponseAnnotationCitation:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: url_citation
|
|
title: Type
|
|
default: url_citation
|
|
end_index:
|
|
type: integer
|
|
title: End Index
|
|
start_index:
|
|
type: integer
|
|
title: Start Index
|
|
title:
|
|
type: string
|
|
title: Title
|
|
url:
|
|
type: string
|
|
title: Url
|
|
type: object
|
|
required:
|
|
- end_index
|
|
- start_index
|
|
- title
|
|
- url
|
|
title: OpenAIResponseAnnotationCitation
|
|
description: 'URL citation annotation for referencing external web resources.
|
|
|
|
|
|
:param type: Annotation type identifier, always "url_citation"
|
|
|
|
:param end_index: End position of the citation span in the content
|
|
|
|
:param start_index: Start position of the citation span in the content
|
|
|
|
:param title: Title of the referenced web resource
|
|
|
|
:param url: URL of the referenced web resource'
|
|
OpenAIResponseAnnotationContainerFileCitation:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: container_file_citation
|
|
title: Type
|
|
default: container_file_citation
|
|
container_id:
|
|
type: string
|
|
title: Container Id
|
|
end_index:
|
|
type: integer
|
|
title: End Index
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
filename:
|
|
type: string
|
|
title: Filename
|
|
start_index:
|
|
type: integer
|
|
title: Start Index
|
|
type: object
|
|
required:
|
|
- container_id
|
|
- end_index
|
|
- file_id
|
|
- filename
|
|
- start_index
|
|
title: OpenAIResponseAnnotationContainerFileCitation
|
|
OpenAIResponseAnnotationFileCitation:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file_citation
|
|
title: Type
|
|
default: file_citation
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
filename:
|
|
type: string
|
|
title: Filename
|
|
index:
|
|
type: integer
|
|
title: Index
|
|
type: object
|
|
required:
|
|
- file_id
|
|
- filename
|
|
- index
|
|
title: OpenAIResponseAnnotationFileCitation
|
|
description: 'File citation annotation for referencing specific files in response
|
|
content.
|
|
|
|
|
|
:param type: Annotation type identifier, always "file_citation"
|
|
|
|
:param file_id: Unique identifier of the referenced file
|
|
|
|
:param filename: Name of the referenced file
|
|
|
|
:param index: Position index of the citation within the content'
|
|
OpenAIResponseAnnotationFilePath:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file_path
|
|
title: Type
|
|
default: file_path
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
index:
|
|
type: integer
|
|
title: Index
|
|
type: object
|
|
required:
|
|
- file_id
|
|
- index
|
|
title: OpenAIResponseAnnotationFilePath
|
|
OpenAIResponseContentPartRefusal:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: refusal
|
|
title: Type
|
|
default: refusal
|
|
refusal:
|
|
type: string
|
|
title: Refusal
|
|
type: object
|
|
required:
|
|
- refusal
|
|
title: OpenAIResponseContentPartRefusal
|
|
description: 'Refusal content within a streamed response part.
|
|
|
|
|
|
:param type: Content part type identifier, always "refusal"
|
|
|
|
:param refusal: Refusal text supplied by the model'
|
|
OpenAIResponseError:
|
|
properties:
|
|
code:
|
|
type: string
|
|
title: Code
|
|
message:
|
|
type: string
|
|
title: Message
|
|
type: object
|
|
required:
|
|
- code
|
|
- message
|
|
title: OpenAIResponseError
|
|
description: 'Error details for failed OpenAI response requests.
|
|
|
|
|
|
:param code: Error code identifying the type of failure
|
|
|
|
:param message: Human-readable error message describing the failure'
|
|
OpenAIResponseFormatJSONObject:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: json_object
|
|
title: Type
|
|
default: json_object
|
|
type: object
|
|
title: OpenAIResponseFormatJSONObject
|
|
description: 'JSON object response format for OpenAI-compatible chat completion
|
|
requests.
|
|
|
|
|
|
:param type: Must be "json_object" to indicate generic JSON object response
|
|
format'
|
|
OpenAIResponseFormatJSONSchema:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: json_schema
|
|
title: Type
|
|
default: json_schema
|
|
json_schema:
|
|
$ref: '#/components/schemas/OpenAIJSONSchema'
|
|
type: object
|
|
required:
|
|
- json_schema
|
|
title: OpenAIResponseFormatJSONSchema
|
|
description: 'JSON schema response format for OpenAI-compatible chat completion
|
|
requests.
|
|
|
|
|
|
:param type: Must be "json_schema" to indicate structured JSON response format
|
|
|
|
:param json_schema: The JSON schema specification for the response'
|
|
OpenAIResponseFormatText:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
title: Type
|
|
default: text
|
|
type: object
|
|
title: OpenAIResponseFormatText
|
|
description: 'Text response format for OpenAI-compatible chat completion requests.
|
|
|
|
|
|
:param type: Must be "text" to indicate plain text response format'
|
|
OpenAIResponseInputFunctionToolCallOutput:
|
|
properties:
|
|
call_id:
|
|
type: string
|
|
title: Call Id
|
|
output:
|
|
type: string
|
|
title: Output
|
|
type:
|
|
type: string
|
|
const: function_call_output
|
|
title: Type
|
|
default: function_call_output
|
|
id:
|
|
title: Id
|
|
type: string
|
|
status:
|
|
title: Status
|
|
type: string
|
|
type: object
|
|
required:
|
|
- call_id
|
|
- output
|
|
title: OpenAIResponseInputFunctionToolCallOutput
|
|
description: This represents the output of a function call that gets passed
|
|
back to the model.
|
|
OpenAIResponseInputMessageContentFile:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: input_file
|
|
title: Type
|
|
default: input_file
|
|
file_data:
|
|
title: File Data
|
|
type: string
|
|
file_id:
|
|
title: File Id
|
|
type: string
|
|
file_url:
|
|
title: File Url
|
|
type: string
|
|
filename:
|
|
title: Filename
|
|
type: string
|
|
type: object
|
|
title: OpenAIResponseInputMessageContentFile
|
|
description: 'File content for input messages in OpenAI response format.
|
|
|
|
|
|
:param type: The type of the input item. Always `input_file`.
|
|
|
|
:param file_data: The data of the file to be sent to the model.
|
|
|
|
:param file_id: (Optional) The ID of the file to be sent to the model.
|
|
|
|
:param file_url: The URL of the file to be sent to the model.
|
|
|
|
:param filename: The name of the file to be sent to the model.'
|
|
OpenAIResponseInputMessageContentImage:
|
|
properties:
|
|
detail:
|
|
anyOf:
|
|
- type: string
|
|
const: low
|
|
- type: string
|
|
const: high
|
|
- type: string
|
|
const: auto
|
|
title: Detail
|
|
default: auto
|
|
type:
|
|
type: string
|
|
const: input_image
|
|
title: Type
|
|
default: input_image
|
|
file_id:
|
|
title: File Id
|
|
type: string
|
|
image_url:
|
|
title: Image Url
|
|
type: string
|
|
type: object
|
|
title: OpenAIResponseInputMessageContentImage
|
|
description: 'Image content for input messages in OpenAI response format.
|
|
|
|
|
|
:param detail: Level of detail for image processing, can be "low", "high",
|
|
or "auto"
|
|
|
|
:param type: Content type identifier, always "input_image"
|
|
|
|
:param file_id: (Optional) The ID of the file to be sent to the model.
|
|
|
|
:param image_url: (Optional) URL of the image content'
|
|
OpenAIResponseInputMessageContentText:
|
|
properties:
|
|
text:
|
|
type: string
|
|
title: Text
|
|
type:
|
|
type: string
|
|
const: input_text
|
|
title: Type
|
|
default: input_text
|
|
type: object
|
|
required:
|
|
- text
|
|
title: OpenAIResponseInputMessageContentText
|
|
description: 'Text content for input messages in OpenAI response format.
|
|
|
|
|
|
:param text: The text content of the input message
|
|
|
|
:param type: Content type identifier, always "input_text"'
|
|
OpenAIResponseInputToolFileSearch:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: file_search
|
|
title: Type
|
|
default: file_search
|
|
vector_store_ids:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: Vector Store Ids
|
|
filters:
|
|
title: Filters
|
|
additionalProperties: true
|
|
type: object
|
|
max_num_results:
|
|
title: Max Num Results
|
|
default: 10
|
|
type: integer
|
|
maximum: 50.0
|
|
minimum: 1.0
|
|
ranking_options:
|
|
$ref: '#/components/schemas/SearchRankingOptions'
|
|
type: object
|
|
required:
|
|
- vector_store_ids
|
|
title: OpenAIResponseInputToolFileSearch
|
|
description: 'File search tool configuration for OpenAI response inputs.
|
|
|
|
|
|
:param type: Tool type identifier, always "file_search"
|
|
|
|
:param vector_store_ids: List of vector store identifiers to search within
|
|
|
|
:param filters: (Optional) Additional filters to apply to the search
|
|
|
|
:param max_num_results: (Optional) Maximum number of search results to return
|
|
(1-50)
|
|
|
|
:param ranking_options: (Optional) Options for ranking and scoring search
|
|
results'
|
|
OpenAIResponseInputToolFunction:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: function
|
|
title: Type
|
|
default: function
|
|
name:
|
|
type: string
|
|
title: Name
|
|
description:
|
|
title: Description
|
|
type: string
|
|
parameters:
|
|
title: Parameters
|
|
additionalProperties: true
|
|
type: object
|
|
strict:
|
|
title: Strict
|
|
type: boolean
|
|
type: object
|
|
required:
|
|
- name
|
|
- parameters
|
|
title: OpenAIResponseInputToolFunction
|
|
description: 'Function tool configuration for OpenAI response inputs.
|
|
|
|
|
|
:param type: Tool type identifier, always "function"
|
|
|
|
:param name: Name of the function that can be called
|
|
|
|
:param description: (Optional) Description of what the function does
|
|
|
|
:param parameters: (Optional) JSON schema defining the function''s parameters
|
|
|
|
:param strict: (Optional) Whether to enforce strict parameter validation'
|
|
OpenAIResponseInputToolMCP:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: mcp
|
|
title: Type
|
|
default: mcp
|
|
server_label:
|
|
type: string
|
|
title: Server Label
|
|
server_url:
|
|
type: string
|
|
title: Server Url
|
|
headers:
|
|
title: Headers
|
|
additionalProperties: true
|
|
type: object
|
|
require_approval:
|
|
anyOf:
|
|
- type: string
|
|
const: always
|
|
- type: string
|
|
const: never
|
|
- $ref: '#/components/schemas/ApprovalFilter'
|
|
title: Require Approval
|
|
default: never
|
|
allowed_tools:
|
|
anyOf:
|
|
- items:
|
|
type: string
|
|
type: array
|
|
- $ref: '#/components/schemas/AllowedToolsFilter'
|
|
title: Allowed Tools
|
|
type: object
|
|
required:
|
|
- server_label
|
|
- server_url
|
|
title: OpenAIResponseInputToolMCP
|
|
description: 'Model Context Protocol (MCP) tool configuration for OpenAI response
|
|
inputs.
|
|
|
|
|
|
:param type: Tool type identifier, always "mcp"
|
|
|
|
:param server_label: Label to identify this MCP server
|
|
|
|
:param server_url: URL endpoint of the MCP server
|
|
|
|
:param headers: (Optional) HTTP headers to include when connecting to the
|
|
server
|
|
|
|
:param require_approval: Approval requirement for tool calls ("always", "never",
|
|
or filter)
|
|
|
|
:param allowed_tools: (Optional) Restriction on which tools can be used from
|
|
this server'
|
|
OpenAIResponseInputToolWebSearch:
|
|
properties:
|
|
type:
|
|
anyOf:
|
|
- type: string
|
|
const: web_search
|
|
- type: string
|
|
const: web_search_preview
|
|
- type: string
|
|
const: web_search_preview_2025_03_11
|
|
title: Type
|
|
default: web_search
|
|
search_context_size:
|
|
title: Search Context Size
|
|
default: medium
|
|
type: string
|
|
pattern: ^low|medium|high$
|
|
type: object
|
|
title: OpenAIResponseInputToolWebSearch
|
|
description: 'Web search tool configuration for OpenAI response inputs.
|
|
|
|
|
|
:param type: Web search tool type variant to use
|
|
|
|
:param search_context_size: (Optional) Size of search context, must be "low",
|
|
"medium", or "high"'
|
|
OpenAIResponseMCPApprovalRequest:
|
|
properties:
|
|
arguments:
|
|
type: string
|
|
title: Arguments
|
|
id:
|
|
type: string
|
|
title: Id
|
|
name:
|
|
type: string
|
|
title: Name
|
|
server_label:
|
|
type: string
|
|
title: Server Label
|
|
type:
|
|
type: string
|
|
const: mcp_approval_request
|
|
title: Type
|
|
default: mcp_approval_request
|
|
type: object
|
|
required:
|
|
- arguments
|
|
- id
|
|
- name
|
|
- server_label
|
|
title: OpenAIResponseMCPApprovalRequest
|
|
description: A request for human approval of a tool invocation.
|
|
OpenAIResponseMCPApprovalResponse:
|
|
properties:
|
|
approval_request_id:
|
|
type: string
|
|
title: Approval Request Id
|
|
approve:
|
|
type: boolean
|
|
title: Approve
|
|
type:
|
|
type: string
|
|
const: mcp_approval_response
|
|
title: Type
|
|
default: mcp_approval_response
|
|
id:
|
|
title: Id
|
|
type: string
|
|
reason:
|
|
title: Reason
|
|
type: string
|
|
type: object
|
|
required:
|
|
- approval_request_id
|
|
- approve
|
|
title: OpenAIResponseMCPApprovalResponse
|
|
description: A response to an MCP approval request.
|
|
OpenAIResponseMessage-Input:
|
|
properties:
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
type: array
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
output_text: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
refusal: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
type: array
|
|
title: Content
|
|
role:
|
|
anyOf:
|
|
- type: string
|
|
const: system
|
|
- type: string
|
|
const: developer
|
|
- type: string
|
|
const: user
|
|
- type: string
|
|
const: assistant
|
|
title: Role
|
|
type:
|
|
type: string
|
|
const: message
|
|
title: Type
|
|
default: message
|
|
id:
|
|
title: Id
|
|
type: string
|
|
status:
|
|
title: Status
|
|
type: string
|
|
type: object
|
|
required:
|
|
- content
|
|
- role
|
|
title: OpenAIResponseMessage
|
|
description: 'Corresponds to the various Message types in the Responses API.
|
|
|
|
They are all under one type because the Responses API gives them all
|
|
|
|
the same "type" value, and there is no way to tell them apart in certain
|
|
|
|
scenarios.'
|
|
OpenAIResponseMessage-Output:
|
|
properties:
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
type: array
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
output_text: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
refusal: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
type: array
|
|
title: Content
|
|
role:
|
|
anyOf:
|
|
- type: string
|
|
const: system
|
|
- type: string
|
|
const: developer
|
|
- type: string
|
|
const: user
|
|
- type: string
|
|
const: assistant
|
|
title: Role
|
|
type:
|
|
type: string
|
|
const: message
|
|
title: Type
|
|
default: message
|
|
id:
|
|
title: Id
|
|
type: string
|
|
status:
|
|
title: Status
|
|
type: string
|
|
type: object
|
|
required:
|
|
- content
|
|
- role
|
|
title: OpenAIResponseMessage
|
|
description: 'Corresponds to the various Message types in the Responses API.
|
|
|
|
They are all under one type because the Responses API gives them all
|
|
|
|
the same "type" value, and there is no way to tell them apart in certain
|
|
|
|
scenarios.'
|
|
OpenAIResponseObject:
|
|
properties:
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
error:
|
|
$ref: '#/components/schemas/OpenAIResponseError'
|
|
id:
|
|
type: string
|
|
title: Id
|
|
model:
|
|
type: string
|
|
title: Model
|
|
object:
|
|
type: string
|
|
const: response
|
|
title: Object
|
|
default: response
|
|
output:
|
|
items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage-Output'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
message: '#/components/schemas/OpenAIResponseMessage-Output'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
type: array
|
|
title: Output
|
|
parallel_tool_calls:
|
|
type: boolean
|
|
title: Parallel Tool Calls
|
|
default: false
|
|
previous_response_id:
|
|
title: Previous Response Id
|
|
type: string
|
|
prompt:
|
|
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
|
status:
|
|
type: string
|
|
title: Status
|
|
temperature:
|
|
title: Temperature
|
|
type: number
|
|
text:
|
|
$ref: '#/components/schemas/OpenAIResponseText'
|
|
default:
|
|
format:
|
|
type: text
|
|
top_p:
|
|
title: Top P
|
|
type: number
|
|
tools:
|
|
title: Tools
|
|
items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
- $ref: '#/components/schemas/OpenAIResponseToolMCP'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
mcp: '#/components/schemas/OpenAIResponseToolMCP'
|
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
web_search_preview: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
web_search_preview_2025_03_11: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
type: array
|
|
truncation:
|
|
title: Truncation
|
|
type: string
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIResponseUsage'
|
|
instructions:
|
|
title: Instructions
|
|
type: string
|
|
type: object
|
|
required:
|
|
- created_at
|
|
- id
|
|
- model
|
|
- output
|
|
- status
|
|
title: OpenAIResponseObject
|
|
description: 'Complete OpenAI response object containing generation results
|
|
and metadata.
|
|
|
|
|
|
:param created_at: Unix timestamp when the response was created
|
|
|
|
:param error: (Optional) Error details if the response generation failed
|
|
|
|
:param id: Unique identifier for this response
|
|
|
|
:param model: Model identifier used for generation
|
|
|
|
:param object: Object type identifier, always "response"
|
|
|
|
:param output: List of generated output items (messages, tool calls, etc.)
|
|
|
|
:param parallel_tool_calls: Whether tool calls can be executed in parallel
|
|
|
|
:param previous_response_id: (Optional) ID of the previous response in a conversation
|
|
|
|
:param prompt: (Optional) Reference to a prompt template and its variables.
|
|
|
|
:param status: Current status of the response generation
|
|
|
|
:param temperature: (Optional) Sampling temperature used for generation
|
|
|
|
:param text: Text formatting configuration for the response
|
|
|
|
:param top_p: (Optional) Nucleus sampling parameter used for generation
|
|
|
|
:param tools: (Optional) An array of tools the model may call while generating
|
|
a response.
|
|
|
|
:param truncation: (Optional) Truncation strategy applied to the response
|
|
|
|
:param usage: (Optional) Token usage information for the response
|
|
|
|
:param instructions: (Optional) System message inserted into the model''s
|
|
context'
|
|
OpenAIResponseOutputMessageContentOutputText:
|
|
properties:
|
|
text:
|
|
type: string
|
|
title: Text
|
|
type:
|
|
type: string
|
|
const: output_text
|
|
title: Type
|
|
default: output_text
|
|
annotations:
|
|
items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
|
|
- $ref: '#/components/schemas/OpenAIResponseAnnotationFilePath'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation'
|
|
file_citation: '#/components/schemas/OpenAIResponseAnnotationFileCitation'
|
|
file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath'
|
|
url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation'
|
|
type: array
|
|
title: Annotations
|
|
type: object
|
|
required:
|
|
- text
|
|
title: OpenAIResponseOutputMessageContentOutputText
|
|
OpenAIResponseOutputMessageFileSearchToolCall:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
queries:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: Queries
|
|
status:
|
|
type: string
|
|
title: Status
|
|
type:
|
|
type: string
|
|
const: file_search_call
|
|
title: Type
|
|
default: file_search_call
|
|
results:
|
|
title: Results
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCallResults'
|
|
type: array
|
|
type: object
|
|
required:
|
|
- id
|
|
- queries
|
|
- status
|
|
title: OpenAIResponseOutputMessageFileSearchToolCall
|
|
description: 'File search tool call output message for OpenAI responses.
|
|
|
|
|
|
:param id: Unique identifier for this tool call
|
|
|
|
:param queries: List of search queries executed
|
|
|
|
:param status: Current status of the file search operation
|
|
|
|
:param type: Tool call type identifier, always "file_search_call"
|
|
|
|
:param results: (Optional) Search results returned by the file search operation'
|
|
OpenAIResponseOutputMessageFileSearchToolCallResults:
|
|
properties:
|
|
attributes:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Attributes
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
filename:
|
|
type: string
|
|
title: Filename
|
|
score:
|
|
type: number
|
|
title: Score
|
|
text:
|
|
type: string
|
|
title: Text
|
|
type: object
|
|
required:
|
|
- attributes
|
|
- file_id
|
|
- filename
|
|
- score
|
|
- text
|
|
title: OpenAIResponseOutputMessageFileSearchToolCallResults
|
|
description: 'Search results returned by the file search operation.
|
|
|
|
|
|
:param attributes: (Optional) Key-value attributes associated with the file
|
|
|
|
:param file_id: Unique identifier of the file containing the result
|
|
|
|
:param filename: Name of the file containing the result
|
|
|
|
:param score: Relevance score for this search result (between 0 and 1)
|
|
|
|
:param text: Text content of the search result'
|
|
OpenAIResponseOutputMessageFunctionToolCall:
|
|
properties:
|
|
call_id:
|
|
type: string
|
|
title: Call Id
|
|
name:
|
|
type: string
|
|
title: Name
|
|
arguments:
|
|
type: string
|
|
title: Arguments
|
|
type:
|
|
type: string
|
|
const: function_call
|
|
title: Type
|
|
default: function_call
|
|
id:
|
|
title: Id
|
|
type: string
|
|
status:
|
|
title: Status
|
|
type: string
|
|
type: object
|
|
required:
|
|
- call_id
|
|
- name
|
|
- arguments
|
|
title: OpenAIResponseOutputMessageFunctionToolCall
|
|
description: 'Function tool call output message for OpenAI responses.
|
|
|
|
|
|
:param call_id: Unique identifier for the function call
|
|
|
|
:param name: Name of the function being called
|
|
|
|
:param arguments: JSON string containing the function arguments
|
|
|
|
:param type: Tool call type identifier, always "function_call"
|
|
|
|
:param id: (Optional) Additional identifier for the tool call
|
|
|
|
:param status: (Optional) Current status of the function call execution'
|
|
OpenAIResponseOutputMessageMCPCall:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
type:
|
|
type: string
|
|
const: mcp_call
|
|
title: Type
|
|
default: mcp_call
|
|
arguments:
|
|
type: string
|
|
title: Arguments
|
|
name:
|
|
type: string
|
|
title: Name
|
|
server_label:
|
|
type: string
|
|
title: Server Label
|
|
error:
|
|
title: Error
|
|
type: string
|
|
output:
|
|
title: Output
|
|
type: string
|
|
type: object
|
|
required:
|
|
- id
|
|
- arguments
|
|
- name
|
|
- server_label
|
|
title: OpenAIResponseOutputMessageMCPCall
|
|
description: 'Model Context Protocol (MCP) call output message for OpenAI responses.
|
|
|
|
|
|
:param id: Unique identifier for this MCP call
|
|
|
|
:param type: Tool call type identifier, always "mcp_call"
|
|
|
|
:param arguments: JSON string containing the MCP call arguments
|
|
|
|
:param name: Name of the MCP method being called
|
|
|
|
:param server_label: Label identifying the MCP server handling the call
|
|
|
|
:param error: (Optional) Error message if the MCP call failed
|
|
|
|
:param output: (Optional) Output result from the successful MCP call'
|
|
OpenAIResponseOutputMessageMCPListTools:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
type:
|
|
type: string
|
|
const: mcp_list_tools
|
|
title: Type
|
|
default: mcp_list_tools
|
|
server_label:
|
|
type: string
|
|
title: Server Label
|
|
tools:
|
|
items:
|
|
$ref: '#/components/schemas/MCPListToolsTool'
|
|
type: array
|
|
title: Tools
|
|
type: object
|
|
required:
|
|
- id
|
|
- server_label
|
|
- tools
|
|
title: OpenAIResponseOutputMessageMCPListTools
|
|
description: 'MCP list tools output message containing available tools from
|
|
an MCP server.
|
|
|
|
|
|
:param id: Unique identifier for this MCP list tools operation
|
|
|
|
:param type: Tool call type identifier, always "mcp_list_tools"
|
|
|
|
:param server_label: Label identifying the MCP server providing the tools
|
|
|
|
:param tools: List of available tools provided by the MCP server'
|
|
OpenAIResponseOutputMessageWebSearchToolCall:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
status:
|
|
type: string
|
|
title: Status
|
|
type:
|
|
type: string
|
|
const: web_search_call
|
|
title: Type
|
|
default: web_search_call
|
|
type: object
|
|
required:
|
|
- id
|
|
- status
|
|
title: OpenAIResponseOutputMessageWebSearchToolCall
|
|
description: 'Web search tool call output message for OpenAI responses.
|
|
|
|
|
|
:param id: Unique identifier for this tool call
|
|
|
|
:param status: Current status of the web search operation
|
|
|
|
:param type: Tool call type identifier, always "web_search_call"'
|
|
OpenAIResponsePrompt:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
variables:
|
|
title: Variables
|
|
additionalProperties:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
type: object
|
|
version:
|
|
title: Version
|
|
type: string
|
|
type: object
|
|
required:
|
|
- id
|
|
title: OpenAIResponsePrompt
|
|
description: 'OpenAI compatible Prompt object that is used in OpenAI responses.
|
|
|
|
|
|
:param id: Unique identifier of the prompt template
|
|
|
|
:param variables: Dictionary of variable names to OpenAIResponseInputMessageContent
|
|
structure for template substitution. The substitution values can either be
|
|
strings, or other Response input types
|
|
|
|
like images or files.
|
|
|
|
:param version: Version number of the prompt to use (defaults to latest if
|
|
not specified)'
|
|
OpenAIResponseText:
|
|
properties:
|
|
format:
|
|
$ref: '#/components/schemas/OpenAIResponseTextFormat'
|
|
type: object
|
|
title: OpenAIResponseText
|
|
description: 'Text response configuration for OpenAI responses.
|
|
|
|
|
|
:param format: (Optional) Text format configuration specifying output format
|
|
requirements'
|
|
OpenAIResponseTextFormat:
|
|
properties:
|
|
type:
|
|
anyOf:
|
|
- type: string
|
|
const: text
|
|
- type: string
|
|
const: json_schema
|
|
- type: string
|
|
const: json_object
|
|
title: Type
|
|
name:
|
|
title: Name
|
|
type: string
|
|
schema:
|
|
title: Schema
|
|
additionalProperties: true
|
|
type: object
|
|
description:
|
|
title: Description
|
|
type: string
|
|
strict:
|
|
title: Strict
|
|
type: boolean
|
|
type: object
|
|
title: OpenAIResponseTextFormat
|
|
description: 'Configuration for Responses API text format.
|
|
|
|
|
|
:param type: Must be "text", "json_schema", or "json_object" to identify the
|
|
format type
|
|
|
|
:param name: The name of the response format. Only used for json_schema.
|
|
|
|
:param schema: The JSON schema the response should conform to. In a Python
|
|
SDK, this is often a `pydantic` model. Only used for json_schema.
|
|
|
|
:param description: (Optional) A description of the response format. Only
|
|
used for json_schema.
|
|
|
|
:param strict: (Optional) Whether to strictly enforce the JSON schema. If
|
|
true, the response must match the schema exactly. Only used for json_schema.'
|
|
OpenAIResponseToolMCP:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: mcp
|
|
title: Type
|
|
default: mcp
|
|
server_label:
|
|
type: string
|
|
title: Server Label
|
|
allowed_tools:
|
|
anyOf:
|
|
- items:
|
|
type: string
|
|
type: array
|
|
- $ref: '#/components/schemas/AllowedToolsFilter'
|
|
title: Allowed Tools
|
|
type: object
|
|
required:
|
|
- server_label
|
|
title: OpenAIResponseToolMCP
|
|
description: 'Model Context Protocol (MCP) tool configuration for OpenAI response
|
|
object.
|
|
|
|
|
|
:param type: Tool type identifier, always "mcp"
|
|
|
|
:param server_label: Label to identify this MCP server
|
|
|
|
:param allowed_tools: (Optional) Restriction on which tools can be used from
|
|
this server'
|
|
OpenAIResponseUsage:
|
|
properties:
|
|
input_tokens:
|
|
type: integer
|
|
title: Input Tokens
|
|
output_tokens:
|
|
type: integer
|
|
title: Output Tokens
|
|
total_tokens:
|
|
type: integer
|
|
title: Total Tokens
|
|
input_tokens_details:
|
|
$ref: '#/components/schemas/OpenAIResponseUsageInputTokensDetails'
|
|
output_tokens_details:
|
|
$ref: '#/components/schemas/OpenAIResponseUsageOutputTokensDetails'
|
|
type: object
|
|
required:
|
|
- input_tokens
|
|
- output_tokens
|
|
- total_tokens
|
|
title: OpenAIResponseUsage
|
|
description: 'Usage information for OpenAI response.
|
|
|
|
|
|
:param input_tokens: Number of tokens in the input
|
|
|
|
:param output_tokens: Number of tokens in the output
|
|
|
|
:param total_tokens: Total tokens used (input + output)
|
|
|
|
:param input_tokens_details: Detailed breakdown of input token usage
|
|
|
|
:param output_tokens_details: Detailed breakdown of output token usage'
|
|
OpenAIResponseUsageInputTokensDetails:
|
|
properties:
|
|
cached_tokens:
|
|
title: Cached Tokens
|
|
type: integer
|
|
type: object
|
|
title: OpenAIResponseUsageInputTokensDetails
|
|
description: 'Token details for input tokens in OpenAI response usage.
|
|
|
|
|
|
:param cached_tokens: Number of tokens retrieved from cache'
|
|
OpenAIResponseUsageOutputTokensDetails:
|
|
properties:
|
|
reasoning_tokens:
|
|
title: Reasoning Tokens
|
|
type: integer
|
|
type: object
|
|
title: OpenAIResponseUsageOutputTokensDetails
|
|
description: 'Token details for output tokens in OpenAI response usage.
|
|
|
|
|
|
:param reasoning_tokens: Number of tokens used for reasoning (o1/o3 models)'
|
|
OpenAISystemMessageParam:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: system
|
|
title: Role
|
|
default: system
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
type: object
|
|
required:
|
|
- content
|
|
title: OpenAISystemMessageParam
|
|
description: 'A system message providing instructions or context to the model.
|
|
|
|
|
|
:param role: Must be "system" to identify this as a system message
|
|
|
|
:param content: The content of the "system prompt". If multiple system messages
|
|
are provided, they are concatenated. The underlying Llama Stack code may also
|
|
add other system messages (for example, for formatting tool definitions).
|
|
|
|
:param name: (Optional) The name of the system message participant.'
|
|
OpenAITokenLogProb:
|
|
properties:
|
|
token:
|
|
type: string
|
|
title: Token
|
|
bytes:
|
|
title: Bytes
|
|
items:
|
|
type: integer
|
|
type: array
|
|
logprob:
|
|
type: number
|
|
title: Logprob
|
|
top_logprobs:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITopLogProb'
|
|
type: array
|
|
title: Top Logprobs
|
|
type: object
|
|
required:
|
|
- token
|
|
- logprob
|
|
- top_logprobs
|
|
title: OpenAITokenLogProb
|
|
description: 'The log probability for a token from an OpenAI-compatible chat
|
|
completion response.
|
|
|
|
|
|
:token: The token
|
|
|
|
:bytes: (Optional) The bytes for the token
|
|
|
|
:logprob: The log probability of the token
|
|
|
|
:top_logprobs: The top log probabilities for the token'
|
|
OpenAIToolMessageParam:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: tool
|
|
title: Role
|
|
default: tool
|
|
tool_call_id:
|
|
type: string
|
|
title: Tool Call Id
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
type: object
|
|
required:
|
|
- tool_call_id
|
|
- content
|
|
title: OpenAIToolMessageParam
|
|
description: 'A message representing the result of a tool invocation in an OpenAI-compatible
|
|
chat completion request.
|
|
|
|
|
|
:param role: Must be "tool" to identify this as a tool response
|
|
|
|
:param tool_call_id: Unique identifier for the tool call this response is
|
|
for
|
|
|
|
:param content: The response content from the tool'
|
|
OpenAITopLogProb:
|
|
properties:
|
|
token:
|
|
type: string
|
|
title: Token
|
|
bytes:
|
|
title: Bytes
|
|
items:
|
|
type: integer
|
|
type: array
|
|
logprob:
|
|
type: number
|
|
title: Logprob
|
|
type: object
|
|
required:
|
|
- token
|
|
- logprob
|
|
title: OpenAITopLogProb
|
|
description: 'The top log probability for a token from an OpenAI-compatible
|
|
chat completion response.
|
|
|
|
|
|
:token: The token
|
|
|
|
:bytes: (Optional) The bytes for the token
|
|
|
|
:logprob: The log probability of the token'
|
|
OpenAIUserMessageParam-Input:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: user
|
|
title: Role
|
|
default: user
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
- $ref: '#/components/schemas/OpenAIFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file: '#/components/schemas/OpenAIFile'
|
|
image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
type: object
|
|
required:
|
|
- content
|
|
title: OpenAIUserMessageParam
|
|
description: 'A message from the user in an OpenAI-compatible chat completion
|
|
request.
|
|
|
|
|
|
:param role: Must be "user" to identify this as a user message
|
|
|
|
:param content: The content of the message, which can include text and other
|
|
media
|
|
|
|
:param name: (Optional) The name of the user message participant.'
|
|
OpenAIUserMessageParam-Output:
|
|
properties:
|
|
role:
|
|
type: string
|
|
const: user
|
|
title: Role
|
|
default: user
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
- $ref: '#/components/schemas/OpenAIFile'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file: '#/components/schemas/OpenAIFile'
|
|
image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
type: object
|
|
required:
|
|
- content
|
|
title: OpenAIUserMessageParam
|
|
description: 'A message from the user in an OpenAI-compatible chat completion
|
|
request.
|
|
|
|
|
|
:param role: Must be "user" to identify this as a user message
|
|
|
|
:param content: The content of the message, which can include text and other
|
|
media
|
|
|
|
:param name: (Optional) The name of the user message participant.'
|
|
Order:
|
|
type: string
|
|
enum:
|
|
- asc
|
|
- desc
|
|
title: Order
|
|
description: 'Sort order for paginated responses.
|
|
|
|
:cvar asc: Ascending order
|
|
|
|
:cvar desc: Descending order'
|
|
OutputTokensDetails:
|
|
properties:
|
|
reasoning_tokens:
|
|
type: integer
|
|
title: Reasoning Tokens
|
|
additionalProperties: true
|
|
type: object
|
|
required:
|
|
- reasoning_tokens
|
|
title: OutputTokensDetails
|
|
Prompt:
|
|
properties:
|
|
prompt:
|
|
title: Prompt
|
|
description: The system prompt with variable placeholders
|
|
type: string
|
|
version:
|
|
type: integer
|
|
minimum: 1.0
|
|
title: Version
|
|
description: Version (integer starting at 1, incremented on save)
|
|
prompt_id:
|
|
type: string
|
|
title: Prompt Id
|
|
description: Unique identifier in format 'pmpt_<48-digit-hash>'
|
|
variables:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: Variables
|
|
description: List of variable names that can be used in the prompt template
|
|
is_default:
|
|
type: boolean
|
|
title: Is Default
|
|
description: Boolean indicating whether this version is the default version
|
|
default: false
|
|
type: object
|
|
required:
|
|
- version
|
|
- prompt_id
|
|
title: Prompt
|
|
description: 'A prompt resource representing a stored OpenAI Compatible prompt
|
|
template in Llama Stack.
|
|
|
|
|
|
:param prompt: The system prompt text with variable placeholders. Variables
|
|
are only supported when using the Responses API.
|
|
|
|
:param version: Version (integer starting at 1, incremented on save)
|
|
|
|
:param prompt_id: Unique identifier formatted as ''pmpt_<48-digit-hash>''
|
|
|
|
:param variables: List of prompt variable names that can be used in the prompt
|
|
template
|
|
|
|
:param is_default: Boolean indicating whether this version is the default
|
|
version for this prompt'
|
|
ProviderInfo:
|
|
properties:
|
|
api:
|
|
type: string
|
|
title: Api
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
provider_type:
|
|
type: string
|
|
title: Provider Type
|
|
config:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Config
|
|
health:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Health
|
|
type: object
|
|
required:
|
|
- api
|
|
- provider_id
|
|
- provider_type
|
|
- config
|
|
- health
|
|
title: ProviderInfo
|
|
description: 'Information about a registered provider including its configuration
|
|
and health status.
|
|
|
|
|
|
:param api: The API name this provider implements
|
|
|
|
:param provider_id: Unique identifier for the provider
|
|
|
|
:param provider_type: The type of provider implementation
|
|
|
|
:param config: Configuration parameters for the provider
|
|
|
|
:param health: Current health status of the provider'
|
|
QueryChunksResponse:
|
|
properties:
|
|
chunks:
|
|
items:
|
|
$ref: '#/components/schemas/Chunk-Output'
|
|
type: array
|
|
title: Chunks
|
|
scores:
|
|
items:
|
|
type: number
|
|
type: array
|
|
title: Scores
|
|
type: object
|
|
required:
|
|
- chunks
|
|
- scores
|
|
title: QueryChunksResponse
|
|
description: 'Response from querying chunks in a vector database.
|
|
|
|
|
|
:param chunks: List of content chunks returned from the query
|
|
|
|
:param scores: Relevance scores corresponding to each returned chunk'
|
|
RAGQueryConfig:
|
|
properties:
|
|
query_generator_config:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/DefaultRAGQueryGeneratorConfig'
|
|
- $ref: '#/components/schemas/LLMRAGQueryGeneratorConfig'
|
|
title: Query Generator Config
|
|
default:
|
|
type: default
|
|
separator: ' '
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
default: '#/components/schemas/DefaultRAGQueryGeneratorConfig'
|
|
llm: '#/components/schemas/LLMRAGQueryGeneratorConfig'
|
|
max_tokens_in_context:
|
|
type: integer
|
|
title: Max Tokens In Context
|
|
default: 4096
|
|
max_chunks:
|
|
type: integer
|
|
title: Max Chunks
|
|
default: 5
|
|
chunk_template:
|
|
type: string
|
|
title: Chunk Template
|
|
default: 'Result {index}
|
|
|
|
Content: {chunk.content}
|
|
|
|
Metadata: {metadata}
|
|
|
|
'
|
|
mode:
|
|
default: vector
|
|
$ref: '#/components/schemas/RAGSearchMode'
|
|
ranker:
|
|
title: Ranker
|
|
oneOf:
|
|
- $ref: '#/components/schemas/RRFRanker'
|
|
- $ref: '#/components/schemas/WeightedRanker'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
rrf: '#/components/schemas/RRFRanker'
|
|
weighted: '#/components/schemas/WeightedRanker'
|
|
type: object
|
|
title: RAGQueryConfig
|
|
description: "Configuration for the RAG query generation.\n\n:param query_generator_config:\
|
|
\ Configuration for the query generator.\n:param max_tokens_in_context: Maximum\
|
|
\ number of tokens in the context.\n:param max_chunks: Maximum number of chunks\
|
|
\ to retrieve.\n:param chunk_template: Template for formatting each retrieved\
|
|
\ chunk in the context.\n Available placeholders: {index} (1-based chunk\
|
|
\ ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata\
|
|
\ dict).\n Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata:\
|
|
\ {metadata}\\n\"\n:param mode: Search mode for retrieval\u2014either \"vector\"\
|
|
, \"keyword\", or \"hybrid\". Default \"vector\".\n:param ranker: Configuration\
|
|
\ for the ranker to use in hybrid search. Defaults to RRF ranker."
|
|
RAGQueryResult:
|
|
properties:
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem-Output'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem-Output'
|
|
text: '#/components/schemas/TextContentItem'
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem-Output'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem-Output'
|
|
text: '#/components/schemas/TextContentItem'
|
|
type: array
|
|
title: Content
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
type: object
|
|
title: RAGQueryResult
|
|
description: 'Result of a RAG query containing retrieved content and metadata.
|
|
|
|
|
|
:param content: (Optional) The retrieved content from the query
|
|
|
|
:param metadata: Additional metadata about the query result'
|
|
RAGSearchMode:
|
|
type: string
|
|
enum:
|
|
- vector
|
|
- keyword
|
|
- hybrid
|
|
title: RAGSearchMode
|
|
description: 'Search modes for RAG query retrieval:
|
|
|
|
- VECTOR: Uses vector similarity search for semantic matching
|
|
|
|
- KEYWORD: Uses keyword-based search for exact matching
|
|
|
|
- HYBRID: Combines both vector and keyword search for better results'
|
|
RRFRanker:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: rrf
|
|
title: Type
|
|
default: rrf
|
|
impact_factor:
|
|
type: number
|
|
title: Impact Factor
|
|
default: 60.0
|
|
minimum: 0.0
|
|
type: object
|
|
title: RRFRanker
|
|
description: "Reciprocal Rank Fusion (RRF) ranker configuration.\n\n:param type:\
|
|
\ The type of ranker, always \"rrf\"\n:param impact_factor: The impact factor\
|
|
\ for RRF scoring. Higher values give more weight to higher-ranked results.\n\
|
|
\ Must be greater than 0"
|
|
RegexParserScoringFnParams:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: regex_parser
|
|
title: Type
|
|
default: regex_parser
|
|
parsing_regexes:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: Parsing Regexes
|
|
description: Regex to extract the answer from generated response
|
|
aggregation_functions:
|
|
items:
|
|
$ref: '#/components/schemas/AggregationFunctionType'
|
|
type: array
|
|
title: Aggregation Functions
|
|
description: Aggregation functions to apply to the scores of each row
|
|
type: object
|
|
title: RegexParserScoringFnParams
|
|
description: 'Parameters for regex parser scoring function configuration.
|
|
|
|
:param type: The type of scoring function parameters, always regex_parser
|
|
|
|
:param parsing_regexes: Regex to extract the answer from generated response
|
|
|
|
:param aggregation_functions: Aggregation functions to apply to the scores
|
|
of each row'
|
|
RouteInfo:
|
|
properties:
|
|
route:
|
|
type: string
|
|
title: Route
|
|
method:
|
|
type: string
|
|
title: Method
|
|
provider_types:
|
|
items:
|
|
type: string
|
|
type: array
|
|
title: Provider Types
|
|
type: object
|
|
required:
|
|
- route
|
|
- method
|
|
- provider_types
|
|
title: RouteInfo
|
|
description: 'Information about an API route including its path, method, and
|
|
implementing providers.
|
|
|
|
|
|
:param route: The API endpoint path
|
|
|
|
:param method: HTTP method for the route
|
|
|
|
:param provider_types: List of provider types that implement this route'
|
|
RunShieldResponse:
|
|
properties:
|
|
violation:
|
|
$ref: '#/components/schemas/SafetyViolation'
|
|
type: object
|
|
title: RunShieldResponse
|
|
description: 'Response from running a safety shield.
|
|
|
|
|
|
:param violation: (Optional) Safety violation detected by the shield, if any'
|
|
SafetyViolation:
|
|
properties:
|
|
violation_level:
|
|
$ref: '#/components/schemas/ViolationLevel'
|
|
user_message:
|
|
title: User Message
|
|
type: string
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
type: object
|
|
required:
|
|
- violation_level
|
|
title: SafetyViolation
|
|
description: 'Details of a safety violation detected by content moderation.
|
|
|
|
|
|
:param violation_level: Severity level of the violation
|
|
|
|
:param user_message: (Optional) Message to convey to the user about the violation
|
|
|
|
:param metadata: Additional metadata including specific violation codes for
|
|
debugging and telemetry'
|
|
ScoreBatchResponse:
|
|
properties:
|
|
dataset_id:
|
|
title: Dataset Id
|
|
type: string
|
|
results:
|
|
additionalProperties:
|
|
$ref: '#/components/schemas/ScoringResult'
|
|
type: object
|
|
title: Results
|
|
type: object
|
|
required:
|
|
- results
|
|
title: ScoreBatchResponse
|
|
description: 'Response from batch scoring operations on datasets.
|
|
|
|
|
|
:param dataset_id: (Optional) The identifier of the dataset that was scored
|
|
|
|
:param results: A map of scoring function name to ScoringResult'
|
|
ScoreResponse:
|
|
properties:
|
|
results:
|
|
additionalProperties:
|
|
$ref: '#/components/schemas/ScoringResult'
|
|
type: object
|
|
title: Results
|
|
type: object
|
|
required:
|
|
- results
|
|
title: ScoreResponse
|
|
description: 'The response from scoring.
|
|
|
|
|
|
:param results: A map of scoring function name to ScoringResult.'
|
|
ScoringFn-Output:
|
|
properties:
|
|
identifier:
|
|
type: string
|
|
title: Identifier
|
|
description: Unique identifier for this resource in llama stack
|
|
provider_resource_id:
|
|
title: Provider Resource Id
|
|
description: Unique identifier for this resource in the provider
|
|
type: string
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
description: ID of the provider that owns this resource
|
|
type:
|
|
type: string
|
|
const: scoring_function
|
|
title: Type
|
|
default: scoring_function
|
|
description:
|
|
title: Description
|
|
type: string
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
description: Any additional metadata for this definition
|
|
return_type:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/StringType'
|
|
- $ref: '#/components/schemas/NumberType'
|
|
- $ref: '#/components/schemas/BooleanType'
|
|
- $ref: '#/components/schemas/ArrayType'
|
|
- $ref: '#/components/schemas/ObjectType'
|
|
- $ref: '#/components/schemas/JsonType'
|
|
- $ref: '#/components/schemas/UnionType'
|
|
- $ref: '#/components/schemas/ChatCompletionInputType'
|
|
- $ref: '#/components/schemas/CompletionInputType'
|
|
- $ref: '#/components/schemas/AgentTurnInputType'
|
|
title: Return Type
|
|
description: The return type of the deterministic function
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
agent_turn_input: '#/components/schemas/AgentTurnInputType'
|
|
array: '#/components/schemas/ArrayType'
|
|
boolean: '#/components/schemas/BooleanType'
|
|
chat_completion_input: '#/components/schemas/ChatCompletionInputType'
|
|
completion_input: '#/components/schemas/CompletionInputType'
|
|
json: '#/components/schemas/JsonType'
|
|
number: '#/components/schemas/NumberType'
|
|
object: '#/components/schemas/ObjectType'
|
|
string: '#/components/schemas/StringType'
|
|
union: '#/components/schemas/UnionType'
|
|
params:
|
|
title: Params
|
|
description: The parameters for the scoring function for benchmark eval,
|
|
these can be overridden for app eval
|
|
oneOf:
|
|
- $ref: '#/components/schemas/LLMAsJudgeScoringFnParams'
|
|
- $ref: '#/components/schemas/RegexParserScoringFnParams'
|
|
- $ref: '#/components/schemas/BasicScoringFnParams'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
basic: '#/components/schemas/BasicScoringFnParams'
|
|
llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams'
|
|
regex_parser: '#/components/schemas/RegexParserScoringFnParams'
|
|
type: object
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
- return_type
|
|
title: ScoringFn
|
|
description: 'A scoring function resource for evaluating model outputs.
|
|
|
|
:param type: The resource type, always scoring_function'
|
|
ScoringResult:
|
|
properties:
|
|
score_rows:
|
|
items:
|
|
additionalProperties: true
|
|
type: object
|
|
type: array
|
|
title: Score Rows
|
|
aggregated_results:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Aggregated Results
|
|
type: object
|
|
required:
|
|
- score_rows
|
|
- aggregated_results
|
|
title: ScoringResult
|
|
description: 'A scoring result for a single row.
|
|
|
|
|
|
:param score_rows: The scoring result for each row. Each row is a map of column
|
|
name to value.
|
|
|
|
:param aggregated_results: Map of metric name to aggregated value'
|
|
SearchRankingOptions:
|
|
properties:
|
|
ranker:
|
|
title: Ranker
|
|
type: string
|
|
score_threshold:
|
|
title: Score Threshold
|
|
default: 0.0
|
|
type: number
|
|
type: object
|
|
title: SearchRankingOptions
|
|
description: 'Options for ranking and filtering search results.
|
|
|
|
|
|
:param ranker: (Optional) Name of the ranking algorithm to use
|
|
|
|
:param score_threshold: (Optional) Minimum relevance score threshold for results'
|
|
Shield:
|
|
properties:
|
|
identifier:
|
|
type: string
|
|
title: Identifier
|
|
description: Unique identifier for this resource in llama stack
|
|
provider_resource_id:
|
|
title: Provider Resource Id
|
|
description: Unique identifier for this resource in the provider
|
|
type: string
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
description: ID of the provider that owns this resource
|
|
type:
|
|
type: string
|
|
const: shield
|
|
title: Type
|
|
default: shield
|
|
params:
|
|
title: Params
|
|
additionalProperties: true
|
|
type: object
|
|
type: object
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
title: Shield
|
|
description: 'A safety shield resource that can be used to check content.
|
|
|
|
|
|
:param params: (Optional) Configuration parameters for the shield
|
|
|
|
:param type: The resource type, always shield'
|
|
StringType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: string
|
|
title: Type
|
|
default: string
|
|
type: object
|
|
title: StringType
|
|
description: 'Parameter type for string values.
|
|
|
|
|
|
:param type: Discriminator type. Always "string"'
|
|
TextContentItem:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
title: Type
|
|
default: text
|
|
text:
|
|
type: string
|
|
title: Text
|
|
type: object
|
|
required:
|
|
- text
|
|
title: TextContentItem
|
|
description: 'A text content item
|
|
|
|
|
|
:param type: Discriminator type of the content item. Always "text"
|
|
|
|
:param text: Text content'
|
|
ToolDef:
|
|
properties:
|
|
toolgroup_id:
|
|
title: Toolgroup Id
|
|
type: string
|
|
name:
|
|
type: string
|
|
title: Name
|
|
description:
|
|
title: Description
|
|
type: string
|
|
input_schema:
|
|
title: Input Schema
|
|
additionalProperties: true
|
|
type: object
|
|
output_schema:
|
|
title: Output Schema
|
|
additionalProperties: true
|
|
type: object
|
|
metadata:
|
|
title: Metadata
|
|
additionalProperties: true
|
|
type: object
|
|
type: object
|
|
required:
|
|
- name
|
|
title: ToolDef
|
|
description: 'Tool definition used in runtime contexts.
|
|
|
|
|
|
:param name: Name of the tool
|
|
|
|
:param description: (Optional) Human-readable description of what the tool
|
|
does
|
|
|
|
:param input_schema: (Optional) JSON Schema for tool inputs (MCP inputSchema)
|
|
|
|
:param output_schema: (Optional) JSON Schema for tool outputs (MCP outputSchema)
|
|
|
|
:param metadata: (Optional) Additional metadata about the tool
|
|
|
|
:param toolgroup_id: (Optional) ID of the tool group this tool belongs to'
|
|
ToolGroup:
|
|
properties:
|
|
identifier:
|
|
type: string
|
|
title: Identifier
|
|
description: Unique identifier for this resource in llama stack
|
|
provider_resource_id:
|
|
title: Provider Resource Id
|
|
description: Unique identifier for this resource in the provider
|
|
type: string
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
description: ID of the provider that owns this resource
|
|
type:
|
|
type: string
|
|
const: tool_group
|
|
title: Type
|
|
default: tool_group
|
|
mcp_endpoint:
|
|
$ref: '#/components/schemas/URL'
|
|
args:
|
|
title: Args
|
|
additionalProperties: true
|
|
type: object
|
|
type: object
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
title: ToolGroup
|
|
description: 'A group of related tools managed together.
|
|
|
|
|
|
:param type: Type of resource, always ''tool_group''
|
|
|
|
:param mcp_endpoint: (Optional) Model Context Protocol endpoint for remote
|
|
tools
|
|
|
|
:param args: (Optional) Additional arguments for the tool group'
|
|
ToolInvocationResult:
|
|
properties:
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem-Output'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem-Output'
|
|
text: '#/components/schemas/TextContentItem'
|
|
- items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/ImageContentItem-Output'
|
|
- $ref: '#/components/schemas/TextContentItem'
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
image: '#/components/schemas/ImageContentItem-Output'
|
|
text: '#/components/schemas/TextContentItem'
|
|
type: array
|
|
title: Content
|
|
error_message:
|
|
title: Error Message
|
|
type: string
|
|
error_code:
|
|
title: Error Code
|
|
type: integer
|
|
metadata:
|
|
title: Metadata
|
|
additionalProperties: true
|
|
type: object
|
|
type: object
|
|
title: ToolInvocationResult
|
|
description: 'Result of a tool invocation.
|
|
|
|
|
|
:param content: (Optional) The output content from the tool execution
|
|
|
|
:param error_message: (Optional) Error message if the tool execution failed
|
|
|
|
:param error_code: (Optional) Numeric error code if the tool execution failed
|
|
|
|
:param metadata: (Optional) Additional metadata about the tool execution'
|
|
URL:
|
|
properties:
|
|
uri:
|
|
type: string
|
|
title: Uri
|
|
type: object
|
|
required:
|
|
- uri
|
|
title: URL
|
|
description: 'A URL reference to external content.
|
|
|
|
|
|
:param uri: The URL string pointing to the resource'
|
|
UnionType:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: union
|
|
title: Type
|
|
default: union
|
|
type: object
|
|
title: UnionType
|
|
description: 'Parameter type for union values.
|
|
|
|
|
|
:param type: Discriminator type. Always "union"'
|
|
VectorStoreChunkingStrategyAuto:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: auto
|
|
title: Type
|
|
default: auto
|
|
type: object
|
|
title: VectorStoreChunkingStrategyAuto
|
|
description: 'Automatic chunking strategy for vector store files.
|
|
|
|
|
|
:param type: Strategy type, always "auto" for automatic chunking'
|
|
VectorStoreChunkingStrategyStatic:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: static
|
|
title: Type
|
|
default: static
|
|
static:
|
|
$ref: '#/components/schemas/VectorStoreChunkingStrategyStaticConfig'
|
|
type: object
|
|
required:
|
|
- static
|
|
title: VectorStoreChunkingStrategyStatic
|
|
description: 'Static chunking strategy with configurable parameters.
|
|
|
|
|
|
:param type: Strategy type, always "static" for static chunking
|
|
|
|
:param static: Configuration parameters for the static chunking strategy'
|
|
VectorStoreChunkingStrategyStaticConfig:
|
|
properties:
|
|
chunk_overlap_tokens:
|
|
type: integer
|
|
title: Chunk Overlap Tokens
|
|
default: 400
|
|
max_chunk_size_tokens:
|
|
type: integer
|
|
maximum: 4096.0
|
|
minimum: 100.0
|
|
title: Max Chunk Size Tokens
|
|
default: 800
|
|
type: object
|
|
title: VectorStoreChunkingStrategyStaticConfig
|
|
description: 'Configuration for static chunking strategy.
|
|
|
|
|
|
:param chunk_overlap_tokens: Number of tokens to overlap between adjacent
|
|
chunks
|
|
|
|
:param max_chunk_size_tokens: Maximum number of tokens per chunk, must be
|
|
between 100 and 4096'
|
|
VectorStoreContent:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: text
|
|
title: Type
|
|
text:
|
|
type: string
|
|
title: Text
|
|
type: object
|
|
required:
|
|
- type
|
|
- text
|
|
title: VectorStoreContent
|
|
description: 'Content item from a vector store file or search result.
|
|
|
|
|
|
:param type: Content type, currently only "text" is supported
|
|
|
|
:param text: The actual text content'
|
|
VectorStoreFileBatchObject:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
object:
|
|
type: string
|
|
title: Object
|
|
default: vector_store.file_batch
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
status:
|
|
anyOf:
|
|
- type: string
|
|
const: completed
|
|
- type: string
|
|
const: in_progress
|
|
- type: string
|
|
const: cancelled
|
|
- type: string
|
|
const: failed
|
|
title: Status
|
|
file_counts:
|
|
$ref: '#/components/schemas/VectorStoreFileCounts'
|
|
type: object
|
|
required:
|
|
- id
|
|
- created_at
|
|
- vector_store_id
|
|
- status
|
|
- file_counts
|
|
title: VectorStoreFileBatchObject
|
|
description: 'OpenAI Vector Store File Batch object.
|
|
|
|
|
|
:param id: Unique identifier for the file batch
|
|
|
|
:param object: Object type identifier, always "vector_store.file_batch"
|
|
|
|
:param created_at: Timestamp when the file batch was created
|
|
|
|
:param vector_store_id: ID of the vector store containing the file batch
|
|
|
|
:param status: Current processing status of the file batch
|
|
|
|
:param file_counts: File processing status counts for the batch'
|
|
VectorStoreFileCounts:
|
|
properties:
|
|
completed:
|
|
type: integer
|
|
title: Completed
|
|
cancelled:
|
|
type: integer
|
|
title: Cancelled
|
|
failed:
|
|
type: integer
|
|
title: Failed
|
|
in_progress:
|
|
type: integer
|
|
title: In Progress
|
|
total:
|
|
type: integer
|
|
title: Total
|
|
type: object
|
|
required:
|
|
- completed
|
|
- cancelled
|
|
- failed
|
|
- in_progress
|
|
- total
|
|
title: VectorStoreFileCounts
|
|
description: 'File processing status counts for a vector store.
|
|
|
|
|
|
:param completed: Number of files that have been successfully processed
|
|
|
|
:param cancelled: Number of files that had their processing cancelled
|
|
|
|
:param failed: Number of files that failed to process
|
|
|
|
:param in_progress: Number of files currently being processed
|
|
|
|
:param total: Total number of files in the vector store'
|
|
VectorStoreFileLastError:
|
|
properties:
|
|
code:
|
|
anyOf:
|
|
- type: string
|
|
const: server_error
|
|
- type: string
|
|
const: rate_limit_exceeded
|
|
title: Code
|
|
message:
|
|
type: string
|
|
title: Message
|
|
type: object
|
|
required:
|
|
- code
|
|
- message
|
|
title: VectorStoreFileLastError
|
|
description: 'Error information for failed vector store file processing.
|
|
|
|
|
|
:param code: Error code indicating the type of failure
|
|
|
|
:param message: Human-readable error message describing the failure'
|
|
VectorStoreFileObject:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
object:
|
|
type: string
|
|
title: Object
|
|
default: vector_store.file
|
|
attributes:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Attributes
|
|
chunking_strategy:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
|
|
- $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic'
|
|
title: Chunking Strategy
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
auto: '#/components/schemas/VectorStoreChunkingStrategyAuto'
|
|
static: '#/components/schemas/VectorStoreChunkingStrategyStatic'
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
last_error:
|
|
$ref: '#/components/schemas/VectorStoreFileLastError'
|
|
status:
|
|
anyOf:
|
|
- type: string
|
|
const: completed
|
|
- type: string
|
|
const: in_progress
|
|
- type: string
|
|
const: cancelled
|
|
- type: string
|
|
const: failed
|
|
title: Status
|
|
usage_bytes:
|
|
type: integer
|
|
title: Usage Bytes
|
|
default: 0
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
type: object
|
|
required:
|
|
- id
|
|
- chunking_strategy
|
|
- created_at
|
|
- status
|
|
- vector_store_id
|
|
title: VectorStoreFileObject
|
|
description: 'OpenAI Vector Store File object.
|
|
|
|
|
|
:param id: Unique identifier for the file
|
|
|
|
:param object: Object type identifier, always "vector_store.file"
|
|
|
|
:param attributes: Key-value attributes associated with the file
|
|
|
|
:param chunking_strategy: Strategy used for splitting the file into chunks
|
|
|
|
:param created_at: Timestamp when the file was added to the vector store
|
|
|
|
:param last_error: (Optional) Error information if file processing failed
|
|
|
|
:param status: Current processing status of the file
|
|
|
|
:param usage_bytes: Storage space used by this file in bytes
|
|
|
|
:param vector_store_id: ID of the vector store containing this file'
|
|
VectorStoreObject:
|
|
properties:
|
|
id:
|
|
type: string
|
|
title: Id
|
|
object:
|
|
type: string
|
|
title: Object
|
|
default: vector_store
|
|
created_at:
|
|
type: integer
|
|
title: Created At
|
|
name:
|
|
title: Name
|
|
type: string
|
|
usage_bytes:
|
|
type: integer
|
|
title: Usage Bytes
|
|
default: 0
|
|
file_counts:
|
|
$ref: '#/components/schemas/VectorStoreFileCounts'
|
|
status:
|
|
type: string
|
|
title: Status
|
|
default: completed
|
|
expires_after:
|
|
title: Expires After
|
|
additionalProperties: true
|
|
type: object
|
|
expires_at:
|
|
title: Expires At
|
|
type: integer
|
|
last_active_at:
|
|
title: Last Active At
|
|
type: integer
|
|
metadata:
|
|
additionalProperties: true
|
|
type: object
|
|
title: Metadata
|
|
type: object
|
|
required:
|
|
- id
|
|
- created_at
|
|
- file_counts
|
|
title: VectorStoreObject
|
|
description: 'OpenAI Vector Store object.
|
|
|
|
|
|
:param id: Unique identifier for the vector store
|
|
|
|
:param object: Object type identifier, always "vector_store"
|
|
|
|
:param created_at: Timestamp when the vector store was created
|
|
|
|
:param name: (Optional) Name of the vector store
|
|
|
|
:param usage_bytes: Storage space used by the vector store in bytes
|
|
|
|
:param file_counts: File processing status counts for the vector store
|
|
|
|
:param status: Current status of the vector store
|
|
|
|
:param expires_after: (Optional) Expiration policy for the vector store
|
|
|
|
:param expires_at: (Optional) Timestamp when the vector store will expire
|
|
|
|
:param last_active_at: (Optional) Timestamp of last activity on the vector
|
|
store
|
|
|
|
:param metadata: Set of key-value pairs that can be attached to the vector
|
|
store'
|
|
VectorStoreSearchResponse:
|
|
properties:
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
filename:
|
|
type: string
|
|
title: Filename
|
|
score:
|
|
type: number
|
|
title: Score
|
|
attributes:
|
|
title: Attributes
|
|
additionalProperties:
|
|
anyOf:
|
|
- type: string
|
|
- type: number
|
|
- type: boolean
|
|
type: object
|
|
content:
|
|
items:
|
|
$ref: '#/components/schemas/VectorStoreContent'
|
|
type: array
|
|
title: Content
|
|
type: object
|
|
required:
|
|
- file_id
|
|
- filename
|
|
- score
|
|
- content
|
|
title: VectorStoreSearchResponse
|
|
description: 'Response from searching a vector store.
|
|
|
|
|
|
:param file_id: Unique identifier of the file containing the result
|
|
|
|
:param filename: Name of the file containing the result
|
|
|
|
:param score: Relevance score for this search result
|
|
|
|
:param attributes: (Optional) Key-value attributes associated with the file
|
|
|
|
:param content: List of content items matching the search query'
|
|
VectorStoreSearchResponsePage:
|
|
properties:
|
|
object:
|
|
type: string
|
|
title: Object
|
|
default: vector_store.search_results.page
|
|
search_query:
|
|
type: string
|
|
title: Search Query
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/VectorStoreSearchResponse'
|
|
type: array
|
|
title: Data
|
|
has_more:
|
|
type: boolean
|
|
title: Has More
|
|
default: false
|
|
next_page:
|
|
title: Next Page
|
|
type: string
|
|
type: object
|
|
required:
|
|
- search_query
|
|
- data
|
|
title: VectorStoreSearchResponsePage
|
|
description: 'Paginated response from searching a vector store.
|
|
|
|
|
|
:param object: Object type identifier for the search results page
|
|
|
|
:param search_query: The original search query that was executed
|
|
|
|
:param data: List of search result objects
|
|
|
|
:param has_more: Whether there are more results available beyond this page
|
|
|
|
:param next_page: (Optional) Token for retrieving the next page of results'
|
|
VersionInfo:
|
|
properties:
|
|
version:
|
|
type: string
|
|
title: Version
|
|
type: object
|
|
required:
|
|
- version
|
|
title: VersionInfo
|
|
description: 'Version information for the service.
|
|
|
|
|
|
:param version: Version number of the service'
|
|
ViolationLevel:
|
|
type: string
|
|
enum:
|
|
- info
|
|
- warn
|
|
- error
|
|
title: ViolationLevel
|
|
description: 'Severity level of a safety violation.
|
|
|
|
|
|
:cvar INFO: Informational level violation that does not require action
|
|
|
|
:cvar WARN: Warning level violation that suggests caution but allows continuation
|
|
|
|
:cvar ERROR: Error level violation that requires blocking or intervention'
|
|
WeightedRanker:
|
|
properties:
|
|
type:
|
|
type: string
|
|
const: weighted
|
|
title: Type
|
|
default: weighted
|
|
alpha:
|
|
type: number
|
|
maximum: 1.0
|
|
minimum: 0.0
|
|
title: Alpha
|
|
description: Weight factor between 0 and 1. 0 means only keyword scores,
|
|
1 means only vector scores.
|
|
default: 0.5
|
|
type: object
|
|
title: WeightedRanker
|
|
description: "Weighted ranker configuration that combines vector and keyword\
|
|
\ scores.\n\n:param type: The type of ranker, always \"weighted\"\n:param\
|
|
\ alpha: Weight factor between 0 and 1.\n 0 means only use keyword\
|
|
\ scores,\n 1 means only use vector scores,\n values\
|
|
\ in between blend both scores."
|
|
_URLOrData:
|
|
properties:
|
|
url:
|
|
$ref: '#/components/schemas/URL'
|
|
data:
|
|
contentEncoding: base64
|
|
title: Data
|
|
type: string
|
|
type: object
|
|
title: _URLOrData
|
|
description: 'A URL or a base64 encoded string
|
|
|
|
|
|
:param url: A URL of the image or data URL in the format of data:image/{type};base64,{data}.
|
|
Note that URL could have length limits.
|
|
|
|
:param data: base64 encoded image data as string'
|
|
_batches_Request:
|
|
properties:
|
|
input_file_id:
|
|
type: string
|
|
title: Input File Id
|
|
endpoint:
|
|
type: string
|
|
title: Endpoint
|
|
completion_window:
|
|
type: string
|
|
title: Completion Window
|
|
metadata:
|
|
type: string
|
|
title: Metadata
|
|
idempotency_key:
|
|
type: string
|
|
title: Idempotency Key
|
|
type: object
|
|
required:
|
|
- input_file_id
|
|
- endpoint
|
|
- completion_window
|
|
- metadata
|
|
- idempotency_key
|
|
title: _batches_Request
|
|
_batches_batch_id_cancel_Request:
|
|
properties:
|
|
batch_id:
|
|
type: string
|
|
title: Batch Id
|
|
type: object
|
|
required:
|
|
- batch_id
|
|
title: _batches_batch_id_cancel_Request
|
|
_conversations_Request:
|
|
properties:
|
|
items:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage-Input'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
title: Items
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
function_call_output: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
mcp_approval_response: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
message: '#/components/schemas/OpenAIResponseMessage-Input'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
metadata:
|
|
type: string
|
|
title: Metadata
|
|
type: object
|
|
required:
|
|
- items
|
|
- metadata
|
|
title: _conversations_Request
|
|
_conversations_conversation_id_Request:
|
|
properties:
|
|
conversation_id:
|
|
type: string
|
|
title: Conversation Id
|
|
metadata:
|
|
type: string
|
|
title: Metadata
|
|
type: object
|
|
required:
|
|
- conversation_id
|
|
- metadata
|
|
title: _conversations_conversation_id_Request
|
|
_conversations_conversation_id_items_Request:
|
|
properties:
|
|
conversation_id:
|
|
type: string
|
|
title: Conversation Id
|
|
items:
|
|
anyOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage-Input'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
title: Items
|
|
type: object
|
|
required:
|
|
- conversation_id
|
|
- items
|
|
title: _conversations_conversation_id_items_Request
|
|
_models_Request:
|
|
properties:
|
|
model_id:
|
|
type: string
|
|
title: Model Id
|
|
provider_model_id:
|
|
type: string
|
|
title: Provider Model Id
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
metadata:
|
|
type: string
|
|
title: Metadata
|
|
model_type:
|
|
$ref: '#/components/schemas/ModelType'
|
|
type: object
|
|
required:
|
|
- model_id
|
|
- provider_model_id
|
|
- provider_id
|
|
- metadata
|
|
- model_type
|
|
title: _models_Request
|
|
_moderations_Request:
|
|
properties:
|
|
input:
|
|
type: string
|
|
title: Input
|
|
model:
|
|
type: string
|
|
title: Model
|
|
type: object
|
|
required:
|
|
- input
|
|
- model
|
|
title: _moderations_Request
|
|
_prompts_Request:
|
|
properties:
|
|
prompt:
|
|
type: string
|
|
title: Prompt
|
|
variables:
|
|
type: string
|
|
title: Variables
|
|
type: object
|
|
required:
|
|
- prompt
|
|
- variables
|
|
title: _prompts_Request
|
|
_prompts_prompt_id_Request:
|
|
properties:
|
|
prompt_id:
|
|
type: string
|
|
title: Prompt Id
|
|
prompt:
|
|
type: string
|
|
title: Prompt
|
|
version:
|
|
type: integer
|
|
title: Version
|
|
variables:
|
|
type: string
|
|
title: Variables
|
|
set_as_default:
|
|
type: boolean
|
|
title: Set As Default
|
|
default: true
|
|
type: object
|
|
required:
|
|
- prompt_id
|
|
- prompt
|
|
- version
|
|
- variables
|
|
title: _prompts_prompt_id_Request
|
|
_prompts_prompt_id_set_default_version_Request:
|
|
properties:
|
|
prompt_id:
|
|
type: string
|
|
title: Prompt Id
|
|
version:
|
|
type: integer
|
|
title: Version
|
|
type: object
|
|
required:
|
|
- prompt_id
|
|
- version
|
|
title: _prompts_prompt_id_set_default_version_Request
|
|
_responses_Request:
|
|
properties:
|
|
input:
|
|
type: string
|
|
title: Input
|
|
model:
|
|
type: string
|
|
title: Model
|
|
prompt:
|
|
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
|
instructions:
|
|
type: string
|
|
title: Instructions
|
|
previous_response_id:
|
|
type: string
|
|
title: Previous Response Id
|
|
conversation:
|
|
type: string
|
|
title: Conversation
|
|
store:
|
|
type: boolean
|
|
title: Store
|
|
default: true
|
|
stream:
|
|
type: boolean
|
|
title: Stream
|
|
default: false
|
|
temperature:
|
|
type: number
|
|
title: Temperature
|
|
text:
|
|
$ref: '#/components/schemas/OpenAIResponseText'
|
|
tools:
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolMCP'
|
|
title: Tools
|
|
discriminator:
|
|
propertyName: type
|
|
mapping:
|
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
mcp: '#/components/schemas/OpenAIResponseInputToolMCP'
|
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
web_search_preview: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
web_search_preview_2025_03_11: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
include:
|
|
type: string
|
|
title: Include
|
|
max_infer_iters:
|
|
type: integer
|
|
title: Max Infer Iters
|
|
default: 10
|
|
type: object
|
|
required:
|
|
- input
|
|
- model
|
|
- prompt
|
|
- instructions
|
|
- previous_response_id
|
|
- conversation
|
|
- temperature
|
|
- text
|
|
- tools
|
|
- include
|
|
title: _responses_Request
|
|
_scoring_score_Request:
|
|
properties:
|
|
input_rows:
|
|
type: string
|
|
title: Input Rows
|
|
scoring_functions:
|
|
type: string
|
|
title: Scoring Functions
|
|
type: object
|
|
required:
|
|
- input_rows
|
|
- scoring_functions
|
|
title: _scoring_score_Request
|
|
_scoring_score_batch_Request:
|
|
properties:
|
|
dataset_id:
|
|
type: string
|
|
title: Dataset Id
|
|
scoring_functions:
|
|
type: string
|
|
title: Scoring Functions
|
|
save_results_dataset:
|
|
type: boolean
|
|
title: Save Results Dataset
|
|
default: false
|
|
type: object
|
|
required:
|
|
- dataset_id
|
|
- scoring_functions
|
|
title: _scoring_score_batch_Request
|
|
_shields_Request:
|
|
properties:
|
|
shield_id:
|
|
type: string
|
|
title: Shield Id
|
|
provider_shield_id:
|
|
type: string
|
|
title: Provider Shield Id
|
|
provider_id:
|
|
type: string
|
|
title: Provider Id
|
|
params:
|
|
type: string
|
|
title: Params
|
|
type: object
|
|
required:
|
|
- shield_id
|
|
- provider_shield_id
|
|
- provider_id
|
|
- params
|
|
title: _shields_Request
|
|
_tool_runtime_invoke_Request:
|
|
properties:
|
|
tool_name:
|
|
type: string
|
|
title: Tool Name
|
|
kwargs:
|
|
type: string
|
|
title: Kwargs
|
|
type: object
|
|
required:
|
|
- tool_name
|
|
- kwargs
|
|
title: _tool_runtime_invoke_Request
|
|
_tool_runtime_rag_tool_query_Request:
|
|
properties:
|
|
content:
|
|
type: string
|
|
title: Content
|
|
vector_store_ids:
|
|
type: string
|
|
title: Vector Store Ids
|
|
query_config:
|
|
$ref: '#/components/schemas/RAGQueryConfig'
|
|
type: object
|
|
required:
|
|
- content
|
|
- vector_store_ids
|
|
- query_config
|
|
title: _tool_runtime_rag_tool_query_Request
|
|
_vector_io_query_Request:
|
|
properties:
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
query:
|
|
type: string
|
|
title: Query
|
|
params:
|
|
type: string
|
|
title: Params
|
|
type: object
|
|
required:
|
|
- vector_store_id
|
|
- query
|
|
- params
|
|
title: _vector_io_query_Request
|
|
_vector_stores_vector_store_id_Request:
|
|
properties:
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
name:
|
|
type: string
|
|
title: Name
|
|
expires_after:
|
|
type: string
|
|
title: Expires After
|
|
metadata:
|
|
type: string
|
|
title: Metadata
|
|
type: object
|
|
required:
|
|
- vector_store_id
|
|
- name
|
|
- expires_after
|
|
- metadata
|
|
title: _vector_stores_vector_store_id_Request
|
|
_vector_stores_vector_store_id_file_batches_batch_id_cancel_Request:
|
|
properties:
|
|
batch_id:
|
|
type: string
|
|
title: Batch Id
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
type: object
|
|
required:
|
|
- batch_id
|
|
- vector_store_id
|
|
title: _vector_stores_vector_store_id_file_batches_batch_id_cancel_Request
|
|
_vector_stores_vector_store_id_files_Request:
|
|
properties:
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
attributes:
|
|
type: string
|
|
title: Attributes
|
|
chunking_strategy:
|
|
anyOf:
|
|
- $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto'
|
|
- $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic'
|
|
title: Chunking Strategy
|
|
type: object
|
|
required:
|
|
- vector_store_id
|
|
- file_id
|
|
- attributes
|
|
- chunking_strategy
|
|
title: _vector_stores_vector_store_id_files_Request
|
|
_vector_stores_vector_store_id_files_file_id_Request:
|
|
properties:
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
file_id:
|
|
type: string
|
|
title: File Id
|
|
attributes:
|
|
type: string
|
|
title: Attributes
|
|
type: object
|
|
required:
|
|
- vector_store_id
|
|
- file_id
|
|
- attributes
|
|
title: _vector_stores_vector_store_id_files_file_id_Request
|
|
_vector_stores_vector_store_id_search_Request:
|
|
properties:
|
|
vector_store_id:
|
|
type: string
|
|
title: Vector Store Id
|
|
query:
|
|
type: string
|
|
title: Query
|
|
filters:
|
|
type: string
|
|
title: Filters
|
|
max_num_results:
|
|
type: integer
|
|
title: Max Num Results
|
|
default: 10
|
|
ranking_options:
|
|
$ref: '#/components/schemas/SearchRankingOptions'
|
|
rewrite_query:
|
|
type: boolean
|
|
title: Rewrite Query
|
|
default: false
|
|
search_mode:
|
|
type: string
|
|
title: Search Mode
|
|
default: vector
|
|
type: object
|
|
required:
|
|
- vector_store_id
|
|
- query
|
|
- filters
|
|
- ranking_options
|
|
title: _vector_stores_vector_store_id_search_Request
|
|
Error:
|
|
description: 'Error response from the API. Roughly follows RFC 7807.
|
|
|
|
|
|
:param status: HTTP status code
|
|
|
|
:param title: Error title, a short summary of the error which is invariant
|
|
for an error type
|
|
|
|
:param detail: Error detail, a longer human-readable description of the error
|
|
|
|
:param instance: (Optional) A URL which can be used to retrieve more information
|
|
about the specific occurrence of the error'
|
|
properties:
|
|
status:
|
|
title: Status
|
|
type: integer
|
|
title:
|
|
title: Title
|
|
type: string
|
|
detail:
|
|
title: Detail
|
|
type: string
|
|
instance:
|
|
title: Instance
|
|
type: string
|
|
nullable: true
|
|
required:
|
|
- status
|
|
- title
|
|
- detail
|
|
title: Error
|
|
type: object
|
|
ListOpenAIResponseInputItem:
|
|
description: 'List container for OpenAI response input items.
|
|
|
|
|
|
:param data: List of input items
|
|
|
|
:param object: Object type identifier, always "list"'
|
|
properties:
|
|
data:
|
|
items:
|
|
anyOf:
|
|
- discriminator:
|
|
mapping:
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
title: Data
|
|
type: array
|
|
object:
|
|
const: list
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
required:
|
|
- data
|
|
title: ListOpenAIResponseInputItem
|
|
type: object
|
|
ListOpenAIResponseObject:
|
|
description: 'Paginated list of OpenAI response objects with navigation metadata.
|
|
|
|
|
|
:param data: List of response objects with their input context
|
|
|
|
:param has_more: Whether there are more results available beyond this page
|
|
|
|
:param first_id: Identifier of the first item in this page
|
|
|
|
:param last_id: Identifier of the last item in this page
|
|
|
|
:param object: Object type identifier, always "list"'
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIResponseObjectWithInput'
|
|
title: Data
|
|
type: array
|
|
has_more:
|
|
title: Has More
|
|
type: boolean
|
|
first_id:
|
|
title: First Id
|
|
type: string
|
|
last_id:
|
|
title: Last Id
|
|
type: string
|
|
object:
|
|
const: list
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
required:
|
|
- data
|
|
- has_more
|
|
- first_id
|
|
- last_id
|
|
title: ListOpenAIResponseObject
|
|
type: object
|
|
OpenAIDeleteResponseObject:
|
|
description: 'Response object confirming deletion of an OpenAI response.
|
|
|
|
|
|
:param id: Unique identifier of the deleted response
|
|
|
|
:param object: Object type identifier, always "response"
|
|
|
|
:param deleted: Deletion confirmation flag, always True'
|
|
properties:
|
|
id:
|
|
title: Id
|
|
type: string
|
|
object:
|
|
const: response
|
|
default: response
|
|
title: Object
|
|
type: string
|
|
deleted:
|
|
default: true
|
|
title: Deleted
|
|
type: boolean
|
|
required:
|
|
- id
|
|
title: OpenAIDeleteResponseObject
|
|
type: object
|
|
ListBatchesResponse:
|
|
description: Response containing a list of batch objects.
|
|
properties:
|
|
object:
|
|
const: list
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
data:
|
|
description: List of batch objects
|
|
items:
|
|
$ref: '#/components/schemas/Batch'
|
|
title: Data
|
|
type: array
|
|
first_id:
|
|
description: ID of the first batch in the list
|
|
title: First Id
|
|
type: string
|
|
nullable: true
|
|
last_id:
|
|
description: ID of the last batch in the list
|
|
title: Last Id
|
|
type: string
|
|
nullable: true
|
|
has_more:
|
|
default: false
|
|
description: Whether there are more batches available
|
|
title: Has More
|
|
type: boolean
|
|
required:
|
|
- data
|
|
title: ListBatchesResponse
|
|
type: object
|
|
ConversationDeletedResource:
|
|
description: Response for deleted conversation.
|
|
properties:
|
|
id:
|
|
description: The deleted conversation identifier
|
|
title: Id
|
|
type: string
|
|
object:
|
|
default: conversation.deleted
|
|
description: Object type
|
|
title: Object
|
|
type: string
|
|
deleted:
|
|
default: true
|
|
description: Whether the object was deleted
|
|
title: Deleted
|
|
type: boolean
|
|
required:
|
|
- id
|
|
title: ConversationDeletedResource
|
|
type: object
|
|
ConversationItemDeletedResource:
|
|
description: Response for deleted conversation item.
|
|
properties:
|
|
id:
|
|
description: The deleted item identifier
|
|
title: Id
|
|
type: string
|
|
object:
|
|
default: conversation.item.deleted
|
|
description: Object type
|
|
title: Object
|
|
type: string
|
|
deleted:
|
|
default: true
|
|
description: Whether the object was deleted
|
|
title: Deleted
|
|
type: boolean
|
|
required:
|
|
- id
|
|
title: ConversationItemDeletedResource
|
|
type: object
|
|
ListOpenAIFileResponse:
|
|
description: 'Response for listing files in OpenAI Files API.
|
|
|
|
|
|
:param data: List of file objects
|
|
|
|
:param has_more: Whether there are more files available beyond this page
|
|
|
|
:param first_id: ID of the first file in the list for pagination
|
|
|
|
:param last_id: ID of the last file in the list for pagination
|
|
|
|
:param object: The object type, which is always "list"'
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIFileObject'
|
|
title: Data
|
|
type: array
|
|
has_more:
|
|
title: Has More
|
|
type: boolean
|
|
first_id:
|
|
title: First Id
|
|
type: string
|
|
last_id:
|
|
title: Last Id
|
|
type: string
|
|
object:
|
|
const: list
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
required:
|
|
- data
|
|
- has_more
|
|
- first_id
|
|
- last_id
|
|
title: ListOpenAIFileResponse
|
|
type: object
|
|
OpenAIFileDeleteResponse:
|
|
description: 'Response for deleting a file in OpenAI Files API.
|
|
|
|
|
|
:param id: The file identifier that was deleted
|
|
|
|
:param object: The object type, which is always "file"
|
|
|
|
:param deleted: Whether the file was successfully deleted'
|
|
properties:
|
|
id:
|
|
title: Id
|
|
type: string
|
|
object:
|
|
const: file
|
|
default: file
|
|
title: Object
|
|
type: string
|
|
deleted:
|
|
title: Deleted
|
|
type: boolean
|
|
required:
|
|
- id
|
|
- deleted
|
|
title: OpenAIFileDeleteResponse
|
|
type: object
|
|
ListOpenAIChatCompletionResponse:
|
|
description: 'Response from listing OpenAI-compatible chat completions.
|
|
|
|
|
|
:param data: List of chat completion objects with their input messages
|
|
|
|
:param has_more: Whether there are more completions available beyond this
|
|
list
|
|
|
|
:param first_id: ID of the first completion in this list
|
|
|
|
:param last_id: ID of the last completion in this list
|
|
|
|
:param object: Must be "list" to identify this as a list response'
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAICompletionWithInputMessages'
|
|
title: Data
|
|
type: array
|
|
has_more:
|
|
title: Has More
|
|
type: boolean
|
|
first_id:
|
|
title: First Id
|
|
type: string
|
|
last_id:
|
|
title: Last Id
|
|
type: string
|
|
object:
|
|
const: list
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
required:
|
|
- data
|
|
- has_more
|
|
- first_id
|
|
- last_id
|
|
title: ListOpenAIChatCompletionResponse
|
|
type: object
|
|
OpenAIAssistantMessageParam:
|
|
description: 'A message containing the model''s (assistant) response in an OpenAI-compatible
|
|
chat completion request.
|
|
|
|
|
|
:param role: Must be "assistant" to identify this as the model''s response
|
|
|
|
:param content: The content of the model''s response
|
|
|
|
:param name: (Optional) The name of the assistant message participant.
|
|
|
|
:param tool_calls: List of tool calls. Each tool call is an OpenAIChatCompletionToolCall
|
|
object.'
|
|
properties:
|
|
role:
|
|
const: assistant
|
|
default: assistant
|
|
title: Role
|
|
type: string
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
type: array
|
|
title: Content
|
|
nullable: true
|
|
name:
|
|
title: Name
|
|
type: string
|
|
nullable: true
|
|
tool_calls:
|
|
title: Tool Calls
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionToolCall'
|
|
type: array
|
|
nullable: true
|
|
title: OpenAIAssistantMessageParam
|
|
type: object
|
|
OpenAIChoice:
|
|
description: 'A choice from an OpenAI-compatible chat completion response.
|
|
|
|
|
|
:param message: The message from the model
|
|
|
|
:param finish_reason: The reason the model stopped generating
|
|
|
|
:param index: The index of the choice
|
|
|
|
:param logprobs: (Optional) The log probabilities for the tokens in the message'
|
|
properties:
|
|
message:
|
|
discriminator:
|
|
mapping:
|
|
assistant: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
developer: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
system: '#/components/schemas/OpenAISystemMessageParam'
|
|
tool: '#/components/schemas/OpenAIToolMessageParam'
|
|
user: '#/components/schemas/OpenAIUserMessageParam'
|
|
propertyName: role
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
title: Message
|
|
finish_reason:
|
|
title: Finish Reason
|
|
type: string
|
|
index:
|
|
title: Index
|
|
type: integer
|
|
logprobs:
|
|
$ref: '#/components/schemas/OpenAIChoiceLogprobs'
|
|
nullable: true
|
|
required:
|
|
- message
|
|
- finish_reason
|
|
- index
|
|
title: OpenAIChoice
|
|
type: object
|
|
OpenAIChoiceLogprobs:
|
|
description: 'The log probabilities for the tokens in the message from an OpenAI-compatible
|
|
chat completion response.
|
|
|
|
|
|
:param content: (Optional) The log probabilities for the tokens in the message
|
|
|
|
:param refusal: (Optional) The log probabilities for the tokens in the message'
|
|
properties:
|
|
content:
|
|
title: Content
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITokenLogProb'
|
|
type: array
|
|
nullable: true
|
|
refusal:
|
|
title: Refusal
|
|
items:
|
|
$ref: '#/components/schemas/OpenAITokenLogProb'
|
|
type: array
|
|
nullable: true
|
|
title: OpenAIChoiceLogprobs
|
|
type: object
|
|
OpenAICompletionWithInputMessages:
|
|
properties:
|
|
id:
|
|
title: Id
|
|
type: string
|
|
choices:
|
|
items:
|
|
$ref: '#/components/schemas/OpenAIChoice'
|
|
title: Choices
|
|
type: array
|
|
object:
|
|
const: chat.completion
|
|
default: chat.completion
|
|
title: Object
|
|
type: string
|
|
created:
|
|
title: Created
|
|
type: integer
|
|
model:
|
|
title: Model
|
|
type: string
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIChatCompletionUsage'
|
|
nullable: true
|
|
input_messages:
|
|
items:
|
|
discriminator:
|
|
mapping:
|
|
assistant: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
developer: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
system: '#/components/schemas/OpenAISystemMessageParam'
|
|
tool: '#/components/schemas/OpenAIToolMessageParam'
|
|
user: '#/components/schemas/OpenAIUserMessageParam'
|
|
propertyName: role
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
title: Input Messages
|
|
type: array
|
|
required:
|
|
- id
|
|
- choices
|
|
- created
|
|
- model
|
|
- input_messages
|
|
title: OpenAICompletionWithInputMessages
|
|
type: object
|
|
OpenAIUserMessageParam:
|
|
description: 'A message from the user in an OpenAI-compatible chat completion
|
|
request.
|
|
|
|
|
|
:param role: Must be "user" to identify this as a user message
|
|
|
|
:param content: The content of the message, which can include text and other
|
|
media
|
|
|
|
:param name: (Optional) The name of the user message participant.'
|
|
properties:
|
|
role:
|
|
const: user
|
|
default: user
|
|
title: Role
|
|
type: string
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
discriminator:
|
|
mapping:
|
|
file: '#/components/schemas/OpenAIFile'
|
|
image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam'
|
|
- $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam'
|
|
- $ref: '#/components/schemas/OpenAIFile'
|
|
type: array
|
|
title: Content
|
|
name:
|
|
title: Name
|
|
type: string
|
|
nullable: true
|
|
required:
|
|
- content
|
|
title: OpenAIUserMessageParam
|
|
type: object
|
|
ScoringFn:
|
|
description: 'A scoring function resource for evaluating model outputs.
|
|
|
|
:param type: The resource type, always scoring_function'
|
|
properties:
|
|
identifier:
|
|
description: Unique identifier for this resource in llama stack
|
|
title: Identifier
|
|
type: string
|
|
provider_resource_id:
|
|
description: Unique identifier for this resource in the provider
|
|
title: Provider Resource Id
|
|
type: string
|
|
nullable: true
|
|
provider_id:
|
|
description: ID of the provider that owns this resource
|
|
title: Provider Id
|
|
type: string
|
|
type:
|
|
const: scoring_function
|
|
default: scoring_function
|
|
title: Type
|
|
type: string
|
|
description:
|
|
title: Description
|
|
type: string
|
|
nullable: true
|
|
metadata:
|
|
additionalProperties: true
|
|
description: Any additional metadata for this definition
|
|
title: Metadata
|
|
type: object
|
|
return_type:
|
|
description: The return type of the deterministic function
|
|
discriminator:
|
|
mapping:
|
|
agent_turn_input: '#/components/schemas/AgentTurnInputType'
|
|
array: '#/components/schemas/ArrayType'
|
|
boolean: '#/components/schemas/BooleanType'
|
|
chat_completion_input: '#/components/schemas/ChatCompletionInputType'
|
|
completion_input: '#/components/schemas/CompletionInputType'
|
|
json: '#/components/schemas/JsonType'
|
|
number: '#/components/schemas/NumberType'
|
|
object: '#/components/schemas/ObjectType'
|
|
string: '#/components/schemas/StringType'
|
|
union: '#/components/schemas/UnionType'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/StringType'
|
|
- $ref: '#/components/schemas/NumberType'
|
|
- $ref: '#/components/schemas/BooleanType'
|
|
- $ref: '#/components/schemas/ArrayType'
|
|
- $ref: '#/components/schemas/ObjectType'
|
|
- $ref: '#/components/schemas/JsonType'
|
|
- $ref: '#/components/schemas/UnionType'
|
|
- $ref: '#/components/schemas/ChatCompletionInputType'
|
|
- $ref: '#/components/schemas/CompletionInputType'
|
|
- $ref: '#/components/schemas/AgentTurnInputType'
|
|
title: Return Type
|
|
params:
|
|
description: The parameters for the scoring function for benchmark eval,
|
|
these can be overridden for app eval
|
|
title: Params
|
|
discriminator:
|
|
mapping:
|
|
basic: '#/components/schemas/BasicScoringFnParams'
|
|
llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams'
|
|
regex_parser: '#/components/schemas/RegexParserScoringFnParams'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/LLMAsJudgeScoringFnParams'
|
|
- $ref: '#/components/schemas/RegexParserScoringFnParams'
|
|
- $ref: '#/components/schemas/BasicScoringFnParams'
|
|
nullable: true
|
|
required:
|
|
- identifier
|
|
- provider_id
|
|
- return_type
|
|
title: ScoringFn
|
|
type: object
|
|
ListToolDefsResponse:
|
|
description: 'Response containing a list of tool definitions.
|
|
|
|
|
|
:param data: List of tool definitions'
|
|
properties:
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/ToolDef'
|
|
title: Data
|
|
type: array
|
|
required:
|
|
- data
|
|
title: ListToolDefsResponse
|
|
type: object
|
|
VectorStoreDeleteResponse:
|
|
description: 'Response from deleting a vector store.
|
|
|
|
|
|
:param id: Unique identifier of the deleted vector store
|
|
|
|
:param object: Object type identifier for the deletion response
|
|
|
|
:param deleted: Whether the deletion operation was successful'
|
|
properties:
|
|
id:
|
|
title: Id
|
|
type: string
|
|
object:
|
|
default: vector_store.deleted
|
|
title: Object
|
|
type: string
|
|
deleted:
|
|
default: true
|
|
title: Deleted
|
|
type: boolean
|
|
required:
|
|
- id
|
|
title: VectorStoreDeleteResponse
|
|
type: object
|
|
VectorStoreFileContentsResponse:
|
|
description: 'Response from retrieving the contents of a vector store file.
|
|
|
|
|
|
:param file_id: Unique identifier for the file
|
|
|
|
:param filename: Name of the file
|
|
|
|
:param attributes: Key-value attributes associated with the file
|
|
|
|
:param content: List of content items from the file'
|
|
properties:
|
|
file_id:
|
|
title: File Id
|
|
type: string
|
|
filename:
|
|
title: Filename
|
|
type: string
|
|
attributes:
|
|
additionalProperties: true
|
|
title: Attributes
|
|
type: object
|
|
content:
|
|
items:
|
|
$ref: '#/components/schemas/VectorStoreContent'
|
|
title: Content
|
|
type: array
|
|
required:
|
|
- file_id
|
|
- filename
|
|
- attributes
|
|
- content
|
|
title: VectorStoreFileContentsResponse
|
|
type: object
|
|
VectorStoreFileDeleteResponse:
|
|
description: 'Response from deleting a vector store file.
|
|
|
|
|
|
:param id: Unique identifier of the deleted file
|
|
|
|
:param object: Object type identifier for the deletion response
|
|
|
|
:param deleted: Whether the deletion operation was successful'
|
|
properties:
|
|
id:
|
|
title: Id
|
|
type: string
|
|
object:
|
|
default: vector_store.file.deleted
|
|
title: Object
|
|
type: string
|
|
deleted:
|
|
default: true
|
|
title: Deleted
|
|
type: boolean
|
|
required:
|
|
- id
|
|
title: VectorStoreFileDeleteResponse
|
|
type: object
|
|
VectorStoreFilesListInBatchResponse:
|
|
description: 'Response from listing files in a vector store file batch.
|
|
|
|
|
|
:param object: Object type identifier, always "list"
|
|
|
|
:param data: List of vector store file objects in the batch
|
|
|
|
:param first_id: (Optional) ID of the first file in the list for pagination
|
|
|
|
:param last_id: (Optional) ID of the last file in the list for pagination
|
|
|
|
:param has_more: Whether there are more files available beyond this page'
|
|
properties:
|
|
object:
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/VectorStoreFileObject'
|
|
title: Data
|
|
type: array
|
|
first_id:
|
|
title: First Id
|
|
type: string
|
|
nullable: true
|
|
last_id:
|
|
title: Last Id
|
|
type: string
|
|
nullable: true
|
|
has_more:
|
|
default: false
|
|
title: Has More
|
|
type: boolean
|
|
required:
|
|
- data
|
|
title: VectorStoreFilesListInBatchResponse
|
|
type: object
|
|
VectorStoreListFilesResponse:
|
|
description: 'Response from listing files in a vector store.
|
|
|
|
|
|
:param object: Object type identifier, always "list"
|
|
|
|
:param data: List of vector store file objects
|
|
|
|
:param first_id: (Optional) ID of the first file in the list for pagination
|
|
|
|
:param last_id: (Optional) ID of the last file in the list for pagination
|
|
|
|
:param has_more: Whether there are more files available beyond this page'
|
|
properties:
|
|
object:
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/VectorStoreFileObject'
|
|
title: Data
|
|
type: array
|
|
first_id:
|
|
title: First Id
|
|
type: string
|
|
nullable: true
|
|
last_id:
|
|
title: Last Id
|
|
type: string
|
|
nullable: true
|
|
has_more:
|
|
default: false
|
|
title: Has More
|
|
type: boolean
|
|
required:
|
|
- data
|
|
title: VectorStoreListFilesResponse
|
|
type: object
|
|
VectorStoreListResponse:
|
|
description: 'Response from listing vector stores.
|
|
|
|
|
|
:param object: Object type identifier, always "list"
|
|
|
|
:param data: List of vector store objects
|
|
|
|
:param first_id: (Optional) ID of the first vector store in the list for pagination
|
|
|
|
:param last_id: (Optional) ID of the last vector store in the list for pagination
|
|
|
|
:param has_more: Whether there are more vector stores available beyond this
|
|
page'
|
|
properties:
|
|
object:
|
|
default: list
|
|
title: Object
|
|
type: string
|
|
data:
|
|
items:
|
|
$ref: '#/components/schemas/VectorStoreObject'
|
|
title: Data
|
|
type: array
|
|
first_id:
|
|
title: First Id
|
|
type: string
|
|
nullable: true
|
|
last_id:
|
|
title: Last Id
|
|
type: string
|
|
nullable: true
|
|
has_more:
|
|
default: false
|
|
title: Has More
|
|
type: boolean
|
|
required:
|
|
- data
|
|
title: VectorStoreListResponse
|
|
type: object
|
|
OpenAIResponseMessage:
|
|
description: 'Corresponds to the various Message types in the Responses API.
|
|
|
|
They are all under one type because the Responses API gives them all
|
|
|
|
the same "type" value, and there is no way to tell them apart in certain
|
|
|
|
scenarios.'
|
|
properties:
|
|
content:
|
|
anyOf:
|
|
- type: string
|
|
- items:
|
|
discriminator:
|
|
mapping:
|
|
input_file: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
input_image: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
input_text: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentText'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentImage'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputMessageContentFile'
|
|
type: array
|
|
- items:
|
|
discriminator:
|
|
mapping:
|
|
output_text: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
refusal: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageContentOutputText'
|
|
- $ref: '#/components/schemas/OpenAIResponseContentPartRefusal'
|
|
type: array
|
|
title: Content
|
|
role:
|
|
anyOf:
|
|
- const: system
|
|
type: string
|
|
- const: developer
|
|
type: string
|
|
- const: user
|
|
type: string
|
|
- const: assistant
|
|
type: string
|
|
title: Role
|
|
type:
|
|
const: message
|
|
default: message
|
|
title: Type
|
|
type: string
|
|
id:
|
|
title: Id
|
|
type: string
|
|
nullable: true
|
|
status:
|
|
title: Status
|
|
type: string
|
|
nullable: true
|
|
required:
|
|
- content
|
|
- role
|
|
title: OpenAIResponseMessage
|
|
type: object
|
|
OpenAIResponseObjectWithInput:
|
|
description: 'OpenAI response object extended with input context information.
|
|
|
|
|
|
:param input: List of input items that led to this response'
|
|
properties:
|
|
created_at:
|
|
title: Created At
|
|
type: integer
|
|
error:
|
|
$ref: '#/components/schemas/OpenAIResponseError'
|
|
nullable: true
|
|
id:
|
|
title: Id
|
|
type: string
|
|
model:
|
|
title: Model
|
|
type: string
|
|
object:
|
|
const: response
|
|
default: response
|
|
title: Object
|
|
type: string
|
|
output:
|
|
items:
|
|
discriminator:
|
|
mapping:
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
title: Output
|
|
type: array
|
|
parallel_tool_calls:
|
|
default: false
|
|
title: Parallel Tool Calls
|
|
type: boolean
|
|
previous_response_id:
|
|
title: Previous Response Id
|
|
type: string
|
|
nullable: true
|
|
prompt:
|
|
$ref: '#/components/schemas/OpenAIResponsePrompt'
|
|
nullable: true
|
|
status:
|
|
title: Status
|
|
type: string
|
|
temperature:
|
|
title: Temperature
|
|
type: number
|
|
nullable: true
|
|
text:
|
|
$ref: '#/components/schemas/OpenAIResponseText'
|
|
default:
|
|
format:
|
|
type: text
|
|
top_p:
|
|
title: Top P
|
|
type: number
|
|
nullable: true
|
|
tools:
|
|
title: Tools
|
|
items:
|
|
discriminator:
|
|
mapping:
|
|
file_search: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
function: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
mcp: '#/components/schemas/OpenAIResponseToolMCP'
|
|
web_search: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
web_search_preview: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
web_search_preview_2025_03_11: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFileSearch'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputToolFunction'
|
|
- $ref: '#/components/schemas/OpenAIResponseToolMCP'
|
|
type: array
|
|
nullable: true
|
|
truncation:
|
|
title: Truncation
|
|
type: string
|
|
nullable: true
|
|
usage:
|
|
$ref: '#/components/schemas/OpenAIResponseUsage'
|
|
nullable: true
|
|
instructions:
|
|
title: Instructions
|
|
type: string
|
|
nullable: true
|
|
input:
|
|
items:
|
|
anyOf:
|
|
- discriminator:
|
|
mapping:
|
|
file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
message: '#/components/schemas/OpenAIResponseMessage'
|
|
web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
propertyName: type
|
|
oneOf:
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall'
|
|
- $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest'
|
|
- $ref: '#/components/schemas/OpenAIResponseInputFunctionToolCallOutput'
|
|
- $ref: '#/components/schemas/OpenAIResponseMCPApprovalResponse'
|
|
- $ref: '#/components/schemas/OpenAIResponseMessage'
|
|
title: Input
|
|
type: array
|
|
required:
|
|
- created_at
|
|
- id
|
|
- model
|
|
- output
|
|
- status
|
|
- input
|
|
title: OpenAIResponseObjectWithInput
|
|
type: object
|
|
_safety_run_shield_Request:
|
|
properties:
|
|
shield_id:
|
|
title: Shield Id
|
|
type: string
|
|
messages:
|
|
anyOf:
|
|
- $ref: '#/components/schemas/OpenAIUserMessageParam'
|
|
- $ref: '#/components/schemas/OpenAISystemMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIAssistantMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIToolMessageParam'
|
|
- $ref: '#/components/schemas/OpenAIDeveloperMessageParam'
|
|
title: Messages
|
|
params:
|
|
title: Params
|
|
type: string
|
|
required:
|
|
- shield_id
|
|
- messages
|
|
- params
|
|
title: _safety_run_shield_Request
|
|
type: object
|
|
responses:
|
|
BadRequest400:
|
|
description: The request was invalid or malformed
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Error'
|
|
example:
|
|
status: 400
|
|
title: Bad Request
|
|
detail: The request was invalid or malformed
|
|
TooManyRequests429:
|
|
description: The client has sent too many requests in a given amount of time
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Error'
|
|
example:
|
|
status: 429
|
|
title: Too Many Requests
|
|
detail: You have exceeded the rate limit. Please try again later.
|
|
InternalServerError500:
|
|
description: The server encountered an unexpected error
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Error'
|
|
example:
|
|
status: 500
|
|
title: Internal Server Error
|
|
detail: An unexpected error occurred
|
|
DefaultError:
|
|
description: An error occurred
|
|
content:
|
|
application/json:
|
|
schema:
|
|
$ref: '#/components/schemas/Error'
|
|
example:
|
|
status: 0
|
|
title: Error
|
|
detail: An unexpected error occurred
|
|
security:
|
|
- Default: []
|
|
tags:
|
|
- name: Agents
|
|
description: >-
|
|
APIs for creating and interacting with agentic systems.
|
|
|
|
|
|
## Responses API
|
|
|
|
|
|
The Responses API provides OpenAI-compatible functionality with enhanced capabilities
|
|
for dynamic, stateful interactions.
|
|
|
|
|
|
> **✅ STABLE**: This API is production-ready with backward compatibility guarantees.
|
|
Recommended for production applications.
|
|
|
|
|
|
### ✅ Supported Tools
|
|
|
|
|
|
The Responses API supports the following tool types:
|
|
|
|
|
|
- **`web_search`**: Search the web for current information and real-time data
|
|
|
|
- **`file_search`**: Search through uploaded files and vector stores
|
|
- Supports dynamic `vector_store_ids` per call
|
|
- Compatible with OpenAI file search patterns
|
|
- **`function`**: Call custom functions with JSON schema validation
|
|
|
|
- **`mcp_tool`**: Model Context Protocol integration
|
|
|
|
|
|
### ✅ Supported Fields & Features
|
|
|
|
|
|
**Core Capabilities:**
|
|
|
|
- **Dynamic Configuration**: Switch models, vector stores, and tools per request
|
|
without pre-configuration
|
|
|
|
- **Conversation Branching**: Use `previous_response_id` to branch conversations
|
|
and explore different paths
|
|
|
|
- **Rich Annotations**: Automatic file citations, URL citations, and container
|
|
file citations
|
|
|
|
- **Status Tracking**: Monitor tool call execution status and handle failures
|
|
gracefully
|
|
|
|
|
|
### 🚧 Work in Progress
|
|
|
|
|
|
- Full real-time response streaming support
|
|
|
|
- `tool_choice` parameter
|
|
|
|
- `max_tool_calls` parameter
|
|
|
|
- Built-in tools (code interpreter, containers API)
|
|
|
|
- Safety & guardrails
|
|
|
|
- `reasoning` capabilities
|
|
|
|
- `service_tier`
|
|
|
|
- `logprobs`
|
|
|
|
- `max_output_tokens`
|
|
|
|
- `metadata` handling
|
|
|
|
- `instructions`
|
|
|
|
- `incomplete_details`
|
|
|
|
- `background`
|
|
x-displayName: Agents
|
|
- name: Batches
|
|
description: >-
|
|
The API is designed to allow use of openai client libraries for seamless integration.
|
|
|
|
|
|
This API provides the following extensions:
|
|
- idempotent batch creation
|
|
|
|
Note: This API is currently under active development and may undergo changes.
|
|
x-displayName: >-
|
|
The Batches API enables efficient processing of multiple requests in a single
|
|
operation, particularly useful for processing large datasets, batch evaluation
|
|
workflows, and cost-effective inference at scale.
|
|
- name: Conversations
|
|
description: >-
|
|
Protocol for conversation management operations.
|
|
x-displayName: Conversations
|
|
- name: Files
|
|
description: >-
|
|
This API is used to upload documents that can be used with other Llama Stack
|
|
APIs.
|
|
x-displayName: Files
|
|
- name: Inference
|
|
description: >-
|
|
Llama Stack Inference API for generating completions, chat completions, and
|
|
embeddings.
|
|
|
|
|
|
This API provides the raw interface to the underlying models. Three kinds of
|
|
models are supported:
|
|
|
|
- LLM models: these models generate "raw" and "chat" (conversational) completions.
|
|
|
|
- Embedding models: these models generate embeddings to be used for semantic
|
|
search.
|
|
|
|
- Rerank models: these models reorder the documents based on their relevance
|
|
to a query.
|
|
x-displayName: Inference
|
|
- name: Inspect
|
|
description: >-
|
|
APIs for inspecting the Llama Stack service, including health status, available
|
|
API routes with methods and implementing providers.
|
|
x-displayName: Inspect
|
|
- name: Models
|
|
description: ''
|
|
- name: Prompts
|
|
description: >-
|
|
Protocol for prompt management operations.
|
|
x-displayName: Prompts
|
|
- name: Providers
|
|
description: >-
|
|
Providers API for inspecting, listing, and modifying providers and their configurations.
|
|
x-displayName: Providers
|
|
- name: Safety
|
|
description: OpenAI-compatible Moderations API.
|
|
x-displayName: Safety
|
|
- name: Scoring
|
|
description: ''
|
|
- name: ScoringFunctions
|
|
description: ''
|
|
- name: Shields
|
|
description: ''
|
|
- name: ToolGroups
|
|
description: ''
|
|
- name: ToolRuntime
|
|
description: ''
|
|
- name: VectorIO
|
|
description: ''
|
|
x-tagGroups:
|
|
- name: Operations
|
|
tags:
|
|
- Agents
|
|
- Batches
|
|
- Conversations
|
|
- Files
|
|
- Inference
|
|
- Inspect
|
|
- Models
|
|
- Prompts
|
|
- Providers
|
|
- Safety
|
|
- Scoring
|
|
- ScoringFunctions
|
|
- Shields
|
|
- ToolGroups
|
|
- ToolRuntime
|
|
- VectorIO
|