docs: API docstrings cleanup for better documentation rendering

This commit is contained in:
Alexey Rybak 2025-10-02 13:04:37 -07:00
parent 0e13512dd7
commit 5613779568
14 changed files with 591 additions and 409 deletions

View file

@ -1443,8 +1443,8 @@
"tags": [
"Inference"
],
"summary": "List all chat completions.",
"description": "List all chat completions.",
"summary": "List chat completions.",
"description": "List chat completions.",
"parameters": [
{
"name": "after",
@ -1520,8 +1520,8 @@
"tags": [
"Inference"
],
"summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"summary": "Create chat completions.",
"description": "Create chat completions.\nGenerate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -1565,8 +1565,8 @@
"tags": [
"Inference"
],
"summary": "Describe a chat completion by its ID.",
"description": "Describe a chat completion by its ID.",
"summary": "Get chat completion.",
"description": "Get chat completion.\nDescribe a chat completion by its ID.",
"parameters": [
{
"name": "completion_id",
@ -1610,8 +1610,8 @@
"tags": [
"Inference"
],
"summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"summary": "Create completion.",
"description": "Create completion.\nGenerate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -1655,8 +1655,8 @@
"tags": [
"Inference"
],
"summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"summary": "Create embeddings.",
"description": "Create embeddings.\nGenerate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -1700,8 +1700,8 @@
"tags": [
"Files"
],
"summary": "Returns a list of files that belong to the user's organization.",
"description": "Returns a list of files that belong to the user's organization.",
"summary": "List files.",
"description": "List files.\nReturns a list of files that belong to the user's organization.",
"parameters": [
{
"name": "after",
@ -1770,8 +1770,8 @@
"tags": [
"Files"
],
"summary": "Upload a file that can be used across various endpoints.",
"description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"summary": "Upload file.",
"description": "Upload file.\nUpload a file that can be used across various endpoints.\n\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
@ -1831,8 +1831,8 @@
"tags": [
"Files"
],
"summary": "Returns information about a specific file.",
"description": "Returns information about a specific file.",
"summary": "Retrieve file.",
"description": "Retrieve file.\nReturns information about a specific file.",
"parameters": [
{
"name": "file_id",
@ -1874,8 +1874,8 @@
"tags": [
"Files"
],
"summary": "Delete a file.",
"description": "Delete a file.",
"summary": "Delete file.",
"description": "Delete file.",
"parameters": [
{
"name": "file_id",
@ -1919,8 +1919,8 @@
"tags": [
"Files"
],
"summary": "Returns the contents of the specified file.",
"description": "Returns the contents of the specified file.",
"summary": "Retrieve file content.",
"description": "Retrieve file content.\nReturns the contents of the specified file.",
"parameters": [
{
"name": "file_id",
@ -1999,8 +1999,8 @@
"tags": [
"Safety"
],
"summary": "Classifies if text and/or image inputs are potentially harmful.",
"description": "Classifies if text and/or image inputs are potentially harmful.",
"summary": "Create moderation.",
"description": "Create moderation.\nClassifies if text and/or image inputs are potentially harmful.",
"parameters": [],
"requestBody": {
"content": {
@ -2044,8 +2044,8 @@
"tags": [
"Agents"
],
"summary": "List all OpenAI responses.",
"description": "List all OpenAI responses.",
"summary": "List all responses.",
"description": "List all responses.",
"parameters": [
{
"name": "after",
@ -2114,8 +2114,8 @@
"tags": [
"Agents"
],
"summary": "List all OpenAI responses.",
"description": "List all OpenAI responses.",
"summary": "List all responses.",
"description": "List all responses.",
"parameters": [],
"requestBody": {
"content": {
@ -2159,8 +2159,8 @@
"tags": [
"Agents"
],
"summary": "Retrieve an OpenAI response by its ID.",
"description": "Retrieve an OpenAI response by its ID.",
"summary": "Get a model response.",
"description": "Get a model response.",
"parameters": [
{
"name": "response_id",
@ -2202,8 +2202,8 @@
"tags": [
"Agents"
],
"summary": "Delete an OpenAI response by its ID.",
"description": "Delete an OpenAI response by its ID.",
"summary": "Delete a response.",
"description": "Delete a response.",
"parameters": [
{
"name": "response_id",
@ -2247,8 +2247,8 @@
"tags": [
"Agents"
],
"summary": "List input items for a given OpenAI response.",
"description": "List input items for a given OpenAI response.",
"summary": "List input items.",
"description": "List input items.",
"parameters": [
{
"name": "response_id",
@ -13439,12 +13439,13 @@
},
{
"name": "Files",
"description": ""
"description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
"x-displayName": "Files"
},
{
"name": "Inference",
"description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
"x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
"description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n\nThis API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
"x-displayName": "Inference"
},
{
"name": "Models",
@ -13456,7 +13457,8 @@
},
{
"name": "Safety",
"description": ""
"description": "OpenAI-compatible Moderations API.",
"x-displayName": "Safety"
},
{
"name": "Telemetry",

View file

@ -1033,8 +1033,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: List all chat completions.
description: List all chat completions.
summary: List chat completions.
description: List chat completions.
parameters:
- name: after
in: query
@ -1087,10 +1087,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
summary: Create chat completions.
description: >-
Create chat completions.
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
parameters: []
@ -1122,8 +1122,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: Describe a chat completion by its ID.
description: Describe a chat completion by its ID.
summary: Get chat completion.
description: >-
Get chat completion.
Describe a chat completion by its ID.
parameters:
- name: completion_id
in: path
@ -1153,10 +1156,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
summary: Create completion.
description: >-
Create completion.
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
parameters: []
@ -1189,10 +1192,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate OpenAI-compatible embeddings for the given input using the specified
model.
summary: Create embeddings.
description: >-
Create embeddings.
Generate OpenAI-compatible embeddings for the given input using the specified
model.
parameters: []
@ -1225,9 +1228,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns a list of files that belong to the user's organization.
summary: List files.
description: >-
List files.
Returns a list of files that belong to the user's organization.
parameters:
- name: after
@ -1285,11 +1289,13 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Upload a file that can be used across various endpoints.
summary: Upload file.
description: >-
Upload file.
Upload a file that can be used across various endpoints.
The file upload should be a multipart form request with:
- file: The File object (not file name) to be uploaded.
@ -1338,9 +1344,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns information about a specific file.
summary: Retrieve file.
description: >-
Retrieve file.
Returns information about a specific file.
parameters:
- name: file_id
@ -1372,8 +1379,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: Delete a file.
description: Delete a file.
summary: Delete file.
description: Delete file.
parameters:
- name: file_id
in: path
@ -1405,9 +1412,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns the contents of the specified file.
summary: Retrieve file content.
description: >-
Retrieve file content.
Returns the contents of the specified file.
parameters:
- name: file_id
@ -1464,9 +1472,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
summary: >-
Classifies if text and/or image inputs are potentially harmful.
summary: Create moderation.
description: >-
Create moderation.
Classifies if text and/or image inputs are potentially harmful.
parameters: []
requestBody:
@ -1497,8 +1506,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: List all OpenAI responses.
description: List all OpenAI responses.
summary: List all responses.
description: List all responses.
parameters:
- name: after
in: query
@ -1546,8 +1555,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: List all OpenAI responses.
description: List all OpenAI responses.
summary: List all responses.
description: List all responses.
parameters: []
requestBody:
content:
@ -1577,8 +1586,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: Retrieve an OpenAI response by its ID.
description: Retrieve an OpenAI response by its ID.
summary: Get a model response.
description: Get a model response.
parameters:
- name: response_id
in: path
@ -1608,8 +1617,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: Delete an OpenAI response by its ID.
description: Delete an OpenAI response by its ID.
summary: Delete a response.
description: Delete a response.
parameters:
- name: response_id
in: path
@ -1639,10 +1648,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: >-
List input items for a given OpenAI response.
description: >-
List input items for a given OpenAI response.
summary: List input items.
description: List input items.
parameters:
- name: response_id
in: path
@ -10054,9 +10061,16 @@ tags:
x-displayName: >-
Llama Stack Evaluation API for running evaluations on model and agent candidates.
- name: Files
description: ''
description: >-
This API is used to upload documents that can be used with other Llama Stack
APIs.
x-displayName: Files
- name: Inference
description: >-
Llama Stack Inference API for generating completions, chat completions, and
embeddings.
This API provides the raw interface to the underlying models. Two kinds of models
are supported:
@ -10064,15 +10078,14 @@ tags:
- Embedding models: these models generate embeddings to be used for semantic
search.
x-displayName: >-
Llama Stack Inference API for generating completions, chat completions, and
embeddings.
x-displayName: Inference
- name: Models
description: ''
- name: PostTraining (Coming Soon)
description: ''
- name: Safety
description: ''
description: OpenAI-compatible Moderations API.
x-displayName: Safety
- name: Telemetry
description: ''
- name: VectorIO

View file

@ -69,8 +69,8 @@
"tags": [
"Inference"
],
"summary": "List all chat completions.",
"description": "List all chat completions.",
"summary": "List chat completions.",
"description": "List chat completions.",
"parameters": [
{
"name": "after",
@ -146,8 +146,8 @@
"tags": [
"Inference"
],
"summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"summary": "Create chat completions.",
"description": "Create chat completions.\nGenerate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -191,8 +191,8 @@
"tags": [
"Inference"
],
"summary": "Describe a chat completion by its ID.",
"description": "Describe a chat completion by its ID.",
"summary": "Get chat completion.",
"description": "Get chat completion.\nDescribe a chat completion by its ID.",
"parameters": [
{
"name": "completion_id",
@ -236,8 +236,8 @@
"tags": [
"Inference"
],
"summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"summary": "Create completion.",
"description": "Create completion.\nGenerate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -281,8 +281,8 @@
"tags": [
"Inference"
],
"summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"summary": "Create embeddings.",
"description": "Create embeddings.\nGenerate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -326,8 +326,8 @@
"tags": [
"Files"
],
"summary": "Returns a list of files that belong to the user's organization.",
"description": "Returns a list of files that belong to the user's organization.",
"summary": "List files.",
"description": "List files.\nReturns a list of files that belong to the user's organization.",
"parameters": [
{
"name": "after",
@ -396,8 +396,8 @@
"tags": [
"Files"
],
"summary": "Upload a file that can be used across various endpoints.",
"description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"summary": "Upload file.",
"description": "Upload file.\nUpload a file that can be used across various endpoints.\n\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
@ -457,8 +457,8 @@
"tags": [
"Files"
],
"summary": "Returns information about a specific file.",
"description": "Returns information about a specific file.",
"summary": "Retrieve file.",
"description": "Retrieve file.\nReturns information about a specific file.",
"parameters": [
{
"name": "file_id",
@ -500,8 +500,8 @@
"tags": [
"Files"
],
"summary": "Delete a file.",
"description": "Delete a file.",
"summary": "Delete file.",
"description": "Delete file.",
"parameters": [
{
"name": "file_id",
@ -545,8 +545,8 @@
"tags": [
"Files"
],
"summary": "Returns the contents of the specified file.",
"description": "Returns the contents of the specified file.",
"summary": "Retrieve file content.",
"description": "Retrieve file content.\nReturns the contents of the specified file.",
"parameters": [
{
"name": "file_id",
@ -590,8 +590,8 @@
"tags": [
"Inspect"
],
"summary": "Get the current health status of the service.",
"description": "Get the current health status of the service.",
"summary": "Get health status.",
"description": "Get health status.\nGet the current health status of the service.",
"parameters": [],
"deprecated": false
}
@ -625,8 +625,8 @@
"tags": [
"Inspect"
],
"summary": "List all available API routes with their methods and implementing providers.",
"description": "List all available API routes with their methods and implementing providers.",
"summary": "List routes.",
"description": "List routes.\nList all available API routes with their methods and implementing providers.",
"parameters": [],
"deprecated": false
}
@ -693,8 +693,8 @@
"tags": [
"Models"
],
"summary": "Register a model.",
"description": "Register a model.",
"summary": "Register model.",
"description": "Register model.\nRegister a model.",
"parameters": [],
"requestBody": {
"content": {
@ -738,8 +738,8 @@
"tags": [
"Models"
],
"summary": "Get a model by its identifier.",
"description": "Get a model by its identifier.",
"summary": "Get model.",
"description": "Get model.\nGet a model by its identifier.",
"parameters": [
{
"name": "model_id",
@ -774,8 +774,8 @@
"tags": [
"Models"
],
"summary": "Unregister a model.",
"description": "Unregister a model.",
"summary": "Unregister model.",
"description": "Unregister model.\nUnregister a model.",
"parameters": [
{
"name": "model_id",
@ -819,8 +819,8 @@
"tags": [
"Safety"
],
"summary": "Classifies if text and/or image inputs are potentially harmful.",
"description": "Classifies if text and/or image inputs are potentially harmful.",
"summary": "Create moderation.",
"description": "Create moderation.\nClassifies if text and/or image inputs are potentially harmful.",
"parameters": [],
"requestBody": {
"content": {
@ -897,8 +897,8 @@
"tags": [
"Prompts"
],
"summary": "Create a new prompt.",
"description": "Create a new prompt.",
"summary": "Create prompt.",
"description": "Create prompt.\nCreate a new prompt.",
"parameters": [],
"requestBody": {
"content": {
@ -942,8 +942,8 @@
"tags": [
"Prompts"
],
"summary": "Get a prompt by its identifier and optional version.",
"description": "Get a prompt by its identifier and optional version.",
"summary": "Get prompt.",
"description": "Get prompt.\nGet a prompt by its identifier and optional version.",
"parameters": [
{
"name": "prompt_id",
@ -994,8 +994,8 @@
"tags": [
"Prompts"
],
"summary": "Update an existing prompt (increments version).",
"description": "Update an existing prompt (increments version).",
"summary": "Update prompt.",
"description": "Update prompt.\nUpdate an existing prompt (increments version).",
"parameters": [
{
"name": "prompt_id",
@ -1040,8 +1040,8 @@
"tags": [
"Prompts"
],
"summary": "Delete a prompt.",
"description": "Delete a prompt.",
"summary": "Delete prompt.",
"description": "Delete prompt.\nDelete a prompt.",
"parameters": [
{
"name": "prompt_id",
@ -1085,8 +1085,8 @@
"tags": [
"Prompts"
],
"summary": "Set which version of a prompt should be the default in get_prompt (latest).",
"description": "Set which version of a prompt should be the default in get_prompt (latest).",
"summary": "Set prompt version.",
"description": "Set prompt version.\nSet which version of a prompt should be the default in get_prompt (latest).",
"parameters": [
{
"name": "prompt_id",
@ -1140,8 +1140,8 @@
"tags": [
"Prompts"
],
"summary": "List all versions of a specific prompt.",
"description": "List all versions of a specific prompt.",
"summary": "List prompt versions.",
"description": "List prompt versions.\nList all versions of a specific prompt.",
"parameters": [
{
"name": "prompt_id",
@ -1185,8 +1185,8 @@
"tags": [
"Providers"
],
"summary": "List all available providers.",
"description": "List all available providers.",
"summary": "List providers.",
"description": "List providers.\nList all available providers.",
"parameters": [],
"deprecated": false
}
@ -1220,8 +1220,8 @@
"tags": [
"Providers"
],
"summary": "Get detailed information about a specific provider.",
"description": "Get detailed information about a specific provider.",
"summary": "Get provider.",
"description": "Get provider.\nGet detailed information about a specific provider.",
"parameters": [
{
"name": "provider_id",
@ -1265,8 +1265,8 @@
"tags": [
"Agents"
],
"summary": "List all OpenAI responses.",
"description": "List all OpenAI responses.",
"summary": "List all responses.",
"description": "List all responses.",
"parameters": [
{
"name": "after",
@ -1335,8 +1335,8 @@
"tags": [
"Agents"
],
"summary": "List all OpenAI responses.",
"description": "List all OpenAI responses.",
"summary": "List all responses.",
"description": "List all responses.",
"parameters": [],
"requestBody": {
"content": {
@ -1380,8 +1380,8 @@
"tags": [
"Agents"
],
"summary": "Retrieve an OpenAI response by its ID.",
"description": "Retrieve an OpenAI response by its ID.",
"summary": "Get a model response.",
"description": "Get a model response.",
"parameters": [
{
"name": "response_id",
@ -1423,8 +1423,8 @@
"tags": [
"Agents"
],
"summary": "Delete an OpenAI response by its ID.",
"description": "Delete an OpenAI response by its ID.",
"summary": "Delete a response.",
"description": "Delete a response.",
"parameters": [
{
"name": "response_id",
@ -1468,8 +1468,8 @@
"tags": [
"Agents"
],
"summary": "List input items for a given OpenAI response.",
"description": "List input items for a given OpenAI response.",
"summary": "List input items.",
"description": "List input items.",
"parameters": [
{
"name": "response_id",
@ -1561,8 +1561,8 @@
"tags": [
"Safety"
],
"summary": "Run a shield.",
"description": "Run a shield.",
"summary": "Run shield.",
"description": "Run shield.\nRun a shield.",
"parameters": [],
"requestBody": {
"content": {
@ -3694,8 +3694,8 @@
"tags": [
"Inspect"
],
"summary": "Get the version of the service.",
"description": "Get the version of the service.",
"summary": "Get version.",
"description": "Get version.\nGet the version of the service.",
"parameters": [],
"deprecated": false
}
@ -12399,16 +12399,18 @@
},
{
"name": "Files",
"description": ""
"description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
"x-displayName": "Files"
},
{
"name": "Inference",
"description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
"x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
"description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n\nThis API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
"x-displayName": "Inference"
},
{
"name": "Inspect",
"description": ""
"description": "APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.",
"x-displayName": "Inspect"
},
{
"name": "Models",
@ -12416,17 +12418,18 @@
},
{
"name": "Prompts",
"description": "",
"x-displayName": "Protocol for prompt management operations."
"description": "Protocol for prompt management operations.",
"x-displayName": "Prompts"
},
{
"name": "Providers",
"description": "",
"x-displayName": "Providers API for inspecting, listing, and modifying providers and their configurations."
"description": "Providers API for inspecting, listing, and modifying providers and their configurations.",
"x-displayName": "Providers"
},
{
"name": "Safety",
"description": ""
"description": "OpenAI-compatible Moderations API.",
"x-displayName": "Safety"
},
{
"name": "Scoring",

View file

@ -33,8 +33,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: List all chat completions.
description: List all chat completions.
summary: List chat completions.
description: List chat completions.
parameters:
- name: after
in: query
@ -87,10 +87,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
summary: Create chat completions.
description: >-
Create chat completions.
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
parameters: []
@ -122,8 +122,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: Describe a chat completion by its ID.
description: Describe a chat completion by its ID.
summary: Get chat completion.
description: >-
Get chat completion.
Describe a chat completion by its ID.
parameters:
- name: completion_id
in: path
@ -153,10 +156,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
summary: Create completion.
description: >-
Create completion.
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
parameters: []
@ -189,10 +192,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate OpenAI-compatible embeddings for the given input using the specified
model.
summary: Create embeddings.
description: >-
Create embeddings.
Generate OpenAI-compatible embeddings for the given input using the specified
model.
parameters: []
@ -225,9 +228,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns a list of files that belong to the user's organization.
summary: List files.
description: >-
List files.
Returns a list of files that belong to the user's organization.
parameters:
- name: after
@ -285,11 +289,13 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Upload a file that can be used across various endpoints.
summary: Upload file.
description: >-
Upload file.
Upload a file that can be used across various endpoints.
The file upload should be a multipart form request with:
- file: The File object (not file name) to be uploaded.
@ -338,9 +344,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns information about a specific file.
summary: Retrieve file.
description: >-
Retrieve file.
Returns information about a specific file.
parameters:
- name: file_id
@ -372,8 +379,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: Delete a file.
description: Delete a file.
summary: Delete file.
description: Delete file.
parameters:
- name: file_id
in: path
@ -405,9 +412,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns the contents of the specified file.
summary: Retrieve file content.
description: >-
Retrieve file content.
Returns the contents of the specified file.
parameters:
- name: file_id
@ -440,9 +448,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
summary: >-
Get the current health status of the service.
summary: Get health status.
description: >-
Get health status.
Get the current health status of the service.
parameters: []
deprecated: false
@ -468,9 +477,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
summary: >-
List all available API routes with their methods and implementing providers.
summary: List routes.
description: >-
List routes.
List all available API routes with their methods and implementing providers.
parameters: []
deprecated: false
@ -519,8 +529,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
summary: Register a model.
description: Register a model.
summary: Register model.
description: >-
Register model.
Register a model.
parameters: []
requestBody:
content:
@ -550,8 +563,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
summary: Get a model by its identifier.
description: Get a model by its identifier.
summary: Get model.
description: >-
Get model.
Get a model by its identifier.
parameters:
- name: model_id
in: path
@ -576,8 +592,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
summary: Unregister a model.
description: Unregister a model.
summary: Unregister model.
description: >-
Unregister model.
Unregister a model.
parameters:
- name: model_id
in: path
@ -608,9 +627,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
summary: >-
Classifies if text and/or image inputs are potentially harmful.
summary: Create moderation.
description: >-
Create moderation.
Classifies if text and/or image inputs are potentially harmful.
parameters: []
requestBody:
@ -666,8 +686,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: Create a new prompt.
description: Create a new prompt.
summary: Create prompt.
description: >-
Create prompt.
Create a new prompt.
parameters: []
requestBody:
content:
@ -697,9 +720,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: >-
Get a prompt by its identifier and optional version.
summary: Get prompt.
description: >-
Get prompt.
Get a prompt by its identifier and optional version.
parameters:
- name: prompt_id
@ -737,9 +761,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: >-
Update an existing prompt (increments version).
summary: Update prompt.
description: >-
Update prompt.
Update an existing prompt (increments version).
parameters:
- name: prompt_id
@ -771,8 +796,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: Delete a prompt.
description: Delete a prompt.
summary: Delete prompt.
description: >-
Delete prompt.
Delete a prompt.
parameters:
- name: prompt_id
in: path
@ -803,9 +831,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: >-
Set which version of a prompt should be the default in get_prompt (latest).
summary: Set prompt version.
description: >-
Set prompt version.
Set which version of a prompt should be the default in get_prompt (latest).
parameters:
- name: prompt_id
@ -843,8 +872,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: List all versions of a specific prompt.
description: List all versions of a specific prompt.
summary: List prompt versions.
description: >-
List prompt versions.
List all versions of a specific prompt.
parameters:
- name: prompt_id
in: path
@ -876,8 +908,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
summary: List all available providers.
description: List all available providers.
summary: List providers.
description: >-
List providers.
List all available providers.
parameters: []
deprecated: false
/v1/providers/{provider_id}:
@ -902,9 +937,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
summary: >-
Get detailed information about a specific provider.
summary: Get provider.
description: >-
Get provider.
Get detailed information about a specific provider.
parameters:
- name: provider_id
@ -935,8 +971,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: List all OpenAI responses.
description: List all OpenAI responses.
summary: List all responses.
description: List all responses.
parameters:
- name: after
in: query
@ -984,8 +1020,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: List all OpenAI responses.
description: List all OpenAI responses.
summary: List all responses.
description: List all responses.
parameters: []
requestBody:
content:
@ -1015,8 +1051,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: Retrieve an OpenAI response by its ID.
description: Retrieve an OpenAI response by its ID.
summary: Get a model response.
description: Get a model response.
parameters:
- name: response_id
in: path
@ -1046,8 +1082,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: Delete an OpenAI response by its ID.
description: Delete an OpenAI response by its ID.
summary: Delete a response.
description: Delete a response.
parameters:
- name: response_id
in: path
@ -1077,10 +1113,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: >-
List input items for a given OpenAI response.
description: >-
List input items for a given OpenAI response.
summary: List input items.
description: List input items.
parameters:
- name: response_id
in: path
@ -1149,8 +1183,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
summary: Run a shield.
description: Run a shield.
summary: Run shield.
description: >-
Run shield.
Run a shield.
parameters: []
requestBody:
content:
@ -2706,8 +2743,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
summary: Get the version of the service.
description: Get the version of the service.
summary: Get version.
description: >-
Get version.
Get the version of the service.
parameters: []
deprecated: false
jsonSchemaDialect: >-
@ -9292,9 +9332,16 @@ tags:
- `background`
x-displayName: Agents
- name: Files
description: ''
description: >-
This API is used to upload documents that can be used with other Llama Stack
APIs.
x-displayName: Files
- name: Inference
description: >-
Llama Stack Inference API for generating completions, chat completions, and
embeddings.
This API provides the raw interface to the underlying models. Two kinds of models
are supported:
@ -9302,23 +9349,25 @@ tags:
- Embedding models: these models generate embeddings to be used for semantic
search.
x-displayName: >-
Llama Stack Inference API for generating completions, chat completions, and
embeddings.
x-displayName: Inference
- name: Inspect
description: ''
description: >-
APIs for inspecting the Llama Stack service, including health status, available
API routes with methods and implementing providers.
x-displayName: Inspect
- name: Models
description: ''
- name: Prompts
description: ''
x-displayName: >-
description: >-
Protocol for prompt management operations.
x-displayName: Prompts
- name: Providers
description: ''
x-displayName: >-
description: >-
Providers API for inspecting, listing, and modifying providers and their configurations.
x-displayName: Providers
- name: Safety
description: ''
description: OpenAI-compatible Moderations API.
x-displayName: Safety
- name: Scoring
description: ''
- name: ScoringFunctions

View file

@ -69,8 +69,8 @@
"tags": [
"Inference"
],
"summary": "List all chat completions.",
"description": "List all chat completions.",
"summary": "List chat completions.",
"description": "List chat completions.",
"parameters": [
{
"name": "after",
@ -146,8 +146,8 @@
"tags": [
"Inference"
],
"summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
"summary": "Create chat completions.",
"description": "Create chat completions.\nGenerate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -191,8 +191,8 @@
"tags": [
"Inference"
],
"summary": "Describe a chat completion by its ID.",
"description": "Describe a chat completion by its ID.",
"summary": "Get chat completion.",
"description": "Get chat completion.\nDescribe a chat completion by its ID.",
"parameters": [
{
"name": "completion_id",
@ -236,8 +236,8 @@
"tags": [
"Inference"
],
"summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
"summary": "Create completion.",
"description": "Create completion.\nGenerate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -281,8 +281,8 @@
"tags": [
"Inference"
],
"summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
"summary": "Create embeddings.",
"description": "Create embeddings.\nGenerate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@ -326,8 +326,8 @@
"tags": [
"Files"
],
"summary": "Returns a list of files that belong to the user's organization.",
"description": "Returns a list of files that belong to the user's organization.",
"summary": "List files.",
"description": "List files.\nReturns a list of files that belong to the user's organization.",
"parameters": [
{
"name": "after",
@ -396,8 +396,8 @@
"tags": [
"Files"
],
"summary": "Upload a file that can be used across various endpoints.",
"description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"summary": "Upload file.",
"description": "Upload file.\nUpload a file that can be used across various endpoints.\n\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
@ -457,8 +457,8 @@
"tags": [
"Files"
],
"summary": "Returns information about a specific file.",
"description": "Returns information about a specific file.",
"summary": "Retrieve file.",
"description": "Retrieve file.\nReturns information about a specific file.",
"parameters": [
{
"name": "file_id",
@ -500,8 +500,8 @@
"tags": [
"Files"
],
"summary": "Delete a file.",
"description": "Delete a file.",
"summary": "Delete file.",
"description": "Delete file.",
"parameters": [
{
"name": "file_id",
@ -545,8 +545,8 @@
"tags": [
"Files"
],
"summary": "Returns the contents of the specified file.",
"description": "Returns the contents of the specified file.",
"summary": "Retrieve file content.",
"description": "Retrieve file content.\nReturns the contents of the specified file.",
"parameters": [
{
"name": "file_id",
@ -590,8 +590,8 @@
"tags": [
"Inspect"
],
"summary": "Get the current health status of the service.",
"description": "Get the current health status of the service.",
"summary": "Get health status.",
"description": "Get health status.\nGet the current health status of the service.",
"parameters": [],
"deprecated": false
}
@ -625,8 +625,8 @@
"tags": [
"Inspect"
],
"summary": "List all available API routes with their methods and implementing providers.",
"description": "List all available API routes with their methods and implementing providers.",
"summary": "List routes.",
"description": "List routes.\nList all available API routes with their methods and implementing providers.",
"parameters": [],
"deprecated": false
}
@ -693,8 +693,8 @@
"tags": [
"Models"
],
"summary": "Register a model.",
"description": "Register a model.",
"summary": "Register model.",
"description": "Register model.\nRegister a model.",
"parameters": [],
"requestBody": {
"content": {
@ -738,8 +738,8 @@
"tags": [
"Models"
],
"summary": "Get a model by its identifier.",
"description": "Get a model by its identifier.",
"summary": "Get model.",
"description": "Get model.\nGet a model by its identifier.",
"parameters": [
{
"name": "model_id",
@ -774,8 +774,8 @@
"tags": [
"Models"
],
"summary": "Unregister a model.",
"description": "Unregister a model.",
"summary": "Unregister model.",
"description": "Unregister model.\nUnregister a model.",
"parameters": [
{
"name": "model_id",
@ -819,8 +819,8 @@
"tags": [
"Safety"
],
"summary": "Classifies if text and/or image inputs are potentially harmful.",
"description": "Classifies if text and/or image inputs are potentially harmful.",
"summary": "Create moderation.",
"description": "Create moderation.\nClassifies if text and/or image inputs are potentially harmful.",
"parameters": [],
"requestBody": {
"content": {
@ -897,8 +897,8 @@
"tags": [
"Prompts"
],
"summary": "Create a new prompt.",
"description": "Create a new prompt.",
"summary": "Create prompt.",
"description": "Create prompt.\nCreate a new prompt.",
"parameters": [],
"requestBody": {
"content": {
@ -942,8 +942,8 @@
"tags": [
"Prompts"
],
"summary": "Get a prompt by its identifier and optional version.",
"description": "Get a prompt by its identifier and optional version.",
"summary": "Get prompt.",
"description": "Get prompt.\nGet a prompt by its identifier and optional version.",
"parameters": [
{
"name": "prompt_id",
@ -994,8 +994,8 @@
"tags": [
"Prompts"
],
"summary": "Update an existing prompt (increments version).",
"description": "Update an existing prompt (increments version).",
"summary": "Update prompt.",
"description": "Update prompt.\nUpdate an existing prompt (increments version).",
"parameters": [
{
"name": "prompt_id",
@ -1040,8 +1040,8 @@
"tags": [
"Prompts"
],
"summary": "Delete a prompt.",
"description": "Delete a prompt.",
"summary": "Delete prompt.",
"description": "Delete prompt.\nDelete a prompt.",
"parameters": [
{
"name": "prompt_id",
@ -1085,8 +1085,8 @@
"tags": [
"Prompts"
],
"summary": "Set which version of a prompt should be the default in get_prompt (latest).",
"description": "Set which version of a prompt should be the default in get_prompt (latest).",
"summary": "Set prompt version.",
"description": "Set prompt version.\nSet which version of a prompt should be the default in get_prompt (latest).",
"parameters": [
{
"name": "prompt_id",
@ -1140,8 +1140,8 @@
"tags": [
"Prompts"
],
"summary": "List all versions of a specific prompt.",
"description": "List all versions of a specific prompt.",
"summary": "List prompt versions.",
"description": "List prompt versions.\nList all versions of a specific prompt.",
"parameters": [
{
"name": "prompt_id",
@ -1185,8 +1185,8 @@
"tags": [
"Providers"
],
"summary": "List all available providers.",
"description": "List all available providers.",
"summary": "List providers.",
"description": "List providers.\nList all available providers.",
"parameters": [],
"deprecated": false
}
@ -1220,8 +1220,8 @@
"tags": [
"Providers"
],
"summary": "Get detailed information about a specific provider.",
"description": "Get detailed information about a specific provider.",
"summary": "Get provider.",
"description": "Get provider.\nGet detailed information about a specific provider.",
"parameters": [
{
"name": "provider_id",
@ -1265,8 +1265,8 @@
"tags": [
"Agents"
],
"summary": "List all OpenAI responses.",
"description": "List all OpenAI responses.",
"summary": "List all responses.",
"description": "List all responses.",
"parameters": [
{
"name": "after",
@ -1335,8 +1335,8 @@
"tags": [
"Agents"
],
"summary": "List all OpenAI responses.",
"description": "List all OpenAI responses.",
"summary": "List all responses.",
"description": "List all responses.",
"parameters": [],
"requestBody": {
"content": {
@ -1380,8 +1380,8 @@
"tags": [
"Agents"
],
"summary": "Retrieve an OpenAI response by its ID.",
"description": "Retrieve an OpenAI response by its ID.",
"summary": "Get a model response.",
"description": "Get a model response.",
"parameters": [
{
"name": "response_id",
@ -1423,8 +1423,8 @@
"tags": [
"Agents"
],
"summary": "Delete an OpenAI response by its ID.",
"description": "Delete an OpenAI response by its ID.",
"summary": "Delete a response.",
"description": "Delete a response.",
"parameters": [
{
"name": "response_id",
@ -1468,8 +1468,8 @@
"tags": [
"Agents"
],
"summary": "List input items for a given OpenAI response.",
"description": "List input items for a given OpenAI response.",
"summary": "List input items.",
"description": "List input items.",
"parameters": [
{
"name": "response_id",
@ -1561,8 +1561,8 @@
"tags": [
"Safety"
],
"summary": "Run a shield.",
"description": "Run a shield.",
"summary": "Run shield.",
"description": "Run shield.\nRun a shield.",
"parameters": [],
"requestBody": {
"content": {
@ -3694,8 +3694,8 @@
"tags": [
"Inspect"
],
"summary": "Get the version of the service.",
"description": "Get the version of the service.",
"summary": "Get version.",
"description": "Get version.\nGet the version of the service.",
"parameters": [],
"deprecated": false
}
@ -17972,16 +17972,18 @@
},
{
"name": "Files",
"description": ""
"description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
"x-displayName": "Files"
},
{
"name": "Inference",
"description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
"x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
"description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n\nThis API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
"x-displayName": "Inference"
},
{
"name": "Inspect",
"description": ""
"description": "APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.",
"x-displayName": "Inspect"
},
{
"name": "Models",
@ -17993,17 +17995,18 @@
},
{
"name": "Prompts",
"description": "",
"x-displayName": "Protocol for prompt management operations."
"description": "Protocol for prompt management operations.",
"x-displayName": "Prompts"
},
{
"name": "Providers",
"description": "",
"x-displayName": "Providers API for inspecting, listing, and modifying providers and their configurations."
"description": "Providers API for inspecting, listing, and modifying providers and their configurations.",
"x-displayName": "Providers"
},
{
"name": "Safety",
"description": ""
"description": "OpenAI-compatible Moderations API.",
"x-displayName": "Safety"
},
{
"name": "Scoring",

View file

@ -36,8 +36,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: List all chat completions.
description: List all chat completions.
summary: List chat completions.
description: List chat completions.
parameters:
- name: after
in: query
@ -90,10 +90,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
summary: Create chat completions.
description: >-
Create chat completions.
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
parameters: []
@ -125,8 +125,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: Describe a chat completion by its ID.
description: Describe a chat completion by its ID.
summary: Get chat completion.
description: >-
Get chat completion.
Describe a chat completion by its ID.
parameters:
- name: completion_id
in: path
@ -156,10 +159,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
summary: Create completion.
description: >-
Create completion.
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
parameters: []
@ -192,10 +195,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
summary: >-
Generate OpenAI-compatible embeddings for the given input using the specified
model.
summary: Create embeddings.
description: >-
Create embeddings.
Generate OpenAI-compatible embeddings for the given input using the specified
model.
parameters: []
@ -228,9 +231,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns a list of files that belong to the user's organization.
summary: List files.
description: >-
List files.
Returns a list of files that belong to the user's organization.
parameters:
- name: after
@ -288,11 +292,13 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Upload a file that can be used across various endpoints.
summary: Upload file.
description: >-
Upload file.
Upload a file that can be used across various endpoints.
The file upload should be a multipart form request with:
- file: The File object (not file name) to be uploaded.
@ -341,9 +347,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns information about a specific file.
summary: Retrieve file.
description: >-
Retrieve file.
Returns information about a specific file.
parameters:
- name: file_id
@ -375,8 +382,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: Delete a file.
description: Delete a file.
summary: Delete file.
description: Delete file.
parameters:
- name: file_id
in: path
@ -408,9 +415,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
summary: >-
Returns the contents of the specified file.
summary: Retrieve file content.
description: >-
Retrieve file content.
Returns the contents of the specified file.
parameters:
- name: file_id
@ -443,9 +451,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
summary: >-
Get the current health status of the service.
summary: Get health status.
description: >-
Get health status.
Get the current health status of the service.
parameters: []
deprecated: false
@ -471,9 +480,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
summary: >-
List all available API routes with their methods and implementing providers.
summary: List routes.
description: >-
List routes.
List all available API routes with their methods and implementing providers.
parameters: []
deprecated: false
@ -522,8 +532,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
summary: Register a model.
description: Register a model.
summary: Register model.
description: >-
Register model.
Register a model.
parameters: []
requestBody:
content:
@ -553,8 +566,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
summary: Get a model by its identifier.
description: Get a model by its identifier.
summary: Get model.
description: >-
Get model.
Get a model by its identifier.
parameters:
- name: model_id
in: path
@ -579,8 +595,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
summary: Unregister a model.
description: Unregister a model.
summary: Unregister model.
description: >-
Unregister model.
Unregister a model.
parameters:
- name: model_id
in: path
@ -611,9 +630,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
summary: >-
Classifies if text and/or image inputs are potentially harmful.
summary: Create moderation.
description: >-
Create moderation.
Classifies if text and/or image inputs are potentially harmful.
parameters: []
requestBody:
@ -669,8 +689,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: Create a new prompt.
description: Create a new prompt.
summary: Create prompt.
description: >-
Create prompt.
Create a new prompt.
parameters: []
requestBody:
content:
@ -700,9 +723,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: >-
Get a prompt by its identifier and optional version.
summary: Get prompt.
description: >-
Get prompt.
Get a prompt by its identifier and optional version.
parameters:
- name: prompt_id
@ -740,9 +764,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: >-
Update an existing prompt (increments version).
summary: Update prompt.
description: >-
Update prompt.
Update an existing prompt (increments version).
parameters:
- name: prompt_id
@ -774,8 +799,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: Delete a prompt.
description: Delete a prompt.
summary: Delete prompt.
description: >-
Delete prompt.
Delete a prompt.
parameters:
- name: prompt_id
in: path
@ -806,9 +834,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: >-
Set which version of a prompt should be the default in get_prompt (latest).
summary: Set prompt version.
description: >-
Set prompt version.
Set which version of a prompt should be the default in get_prompt (latest).
parameters:
- name: prompt_id
@ -846,8 +875,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
summary: List all versions of a specific prompt.
description: List all versions of a specific prompt.
summary: List prompt versions.
description: >-
List prompt versions.
List all versions of a specific prompt.
parameters:
- name: prompt_id
in: path
@ -879,8 +911,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
summary: List all available providers.
description: List all available providers.
summary: List providers.
description: >-
List providers.
List all available providers.
parameters: []
deprecated: false
/v1/providers/{provider_id}:
@ -905,9 +940,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
summary: >-
Get detailed information about a specific provider.
summary: Get provider.
description: >-
Get provider.
Get detailed information about a specific provider.
parameters:
- name: provider_id
@ -938,8 +974,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: List all OpenAI responses.
description: List all OpenAI responses.
summary: List all responses.
description: List all responses.
parameters:
- name: after
in: query
@ -987,8 +1023,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: List all OpenAI responses.
description: List all OpenAI responses.
summary: List all responses.
description: List all responses.
parameters: []
requestBody:
content:
@ -1018,8 +1054,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: Retrieve an OpenAI response by its ID.
description: Retrieve an OpenAI response by its ID.
summary: Get a model response.
description: Get a model response.
parameters:
- name: response_id
in: path
@ -1049,8 +1085,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: Delete an OpenAI response by its ID.
description: Delete an OpenAI response by its ID.
summary: Delete a response.
description: Delete a response.
parameters:
- name: response_id
in: path
@ -1080,10 +1116,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
summary: >-
List input items for a given OpenAI response.
description: >-
List input items for a given OpenAI response.
summary: List input items.
description: List input items.
parameters:
- name: response_id
in: path
@ -1152,8 +1186,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
summary: Run a shield.
description: Run a shield.
summary: Run shield.
description: >-
Run shield.
Run a shield.
parameters: []
requestBody:
content:
@ -2709,8 +2746,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
summary: Get the version of the service.
description: Get the version of the service.
summary: Get version.
description: >-
Get version.
Get the version of the service.
parameters: []
deprecated: false
/v1beta/datasetio/append-rows/{dataset_id}:
@ -13338,9 +13378,16 @@ tags:
x-displayName: >-
Llama Stack Evaluation API for running evaluations on model and agent candidates.
- name: Files
description: ''
description: >-
This API is used to upload documents that can be used with other Llama Stack
APIs.
x-displayName: Files
- name: Inference
description: >-
Llama Stack Inference API for generating completions, chat completions, and
embeddings.
This API provides the raw interface to the underlying models. Two kinds of models
are supported:
@ -13348,25 +13395,27 @@ tags:
- Embedding models: these models generate embeddings to be used for semantic
search.
x-displayName: >-
Llama Stack Inference API for generating completions, chat completions, and
embeddings.
x-displayName: Inference
- name: Inspect
description: ''
description: >-
APIs for inspecting the Llama Stack service, including health status, available
API routes with methods and implementing providers.
x-displayName: Inspect
- name: Models
description: ''
- name: PostTraining (Coming Soon)
description: ''
- name: Prompts
description: ''
x-displayName: >-
description: >-
Protocol for prompt management operations.
x-displayName: Prompts
- name: Providers
description: ''
x-displayName: >-
description: >-
Providers API for inspecting, listing, and modifying providers and their configurations.
x-displayName: Providers
- name: Safety
description: ''
description: OpenAI-compatible Moderations API.
x-displayName: Safety
- name: Scoring
description: ''
- name: ScoringFunctions

View file

@ -783,7 +783,7 @@ class Agents(Protocol):
self,
response_id: str,
) -> OpenAIResponseObject:
"""Retrieve an OpenAI response by its ID.
"""Get a model response.
:param response_id: The ID of the OpenAI response to retrieve.
:returns: An OpenAIResponseObject.
@ -806,7 +806,7 @@ class Agents(Protocol):
include: list[str] | None = None,
max_infer_iters: int | None = 10, # this is an extension to the OpenAI API
) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
"""Create a new OpenAI response.
"""Create a model response.
:param input: Input message(s) to create the response.
:param model: The underlying LLM used for completions.
@ -825,7 +825,7 @@ class Agents(Protocol):
model: str | None = None,
order: Order | None = Order.desc,
) -> ListOpenAIResponseObject:
"""List all OpenAI responses.
"""List all responses.
:param after: The ID of the last response to return.
:param limit: The number of responses to return.
@ -848,7 +848,7 @@ class Agents(Protocol):
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
"""List input items for a given OpenAI response.
"""List input items.
:param response_id: The ID of the response to retrieve input items for.
:param after: An item ID to list items after, used for pagination.
@ -863,7 +863,7 @@ class Agents(Protocol):
@webmethod(route="/openai/v1/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1, deprecated=True)
@webmethod(route="/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1)
async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
"""Delete an OpenAI response by its ID.
"""Delete a response.
:param response_id: The ID of the OpenAI response to delete.
:returns: An OpenAIDeleteResponseObject

View file

@ -104,6 +104,11 @@ class OpenAIFileDeleteResponse(BaseModel):
@runtime_checkable
@trace_protocol
class Files(Protocol):
"""Files
This API is used to upload documents that can be used with other Llama Stack APIs.
"""
# OpenAI Files API Endpoints
@webmethod(route="/openai/v1/files", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
@webmethod(route="/files", method="POST", level=LLAMA_STACK_API_V1)
@ -113,7 +118,8 @@ class Files(Protocol):
purpose: Annotated[OpenAIFilePurpose, Form()],
expires_after: Annotated[ExpiresAfter | None, Form()] = None,
) -> OpenAIFileObject:
"""
"""Upload file.
Upload a file that can be used across various endpoints.
The file upload should be a multipart form request with:
@ -137,7 +143,8 @@ class Files(Protocol):
order: Order | None = Order.desc,
purpose: OpenAIFilePurpose | None = None,
) -> ListOpenAIFileResponse:
"""
"""List files.
Returns a list of files that belong to the user's organization.
:param after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
@ -154,7 +161,8 @@ class Files(Protocol):
self,
file_id: str,
) -> OpenAIFileObject:
"""
"""Retrieve file.
Returns information about a specific file.
:param file_id: The ID of the file to use for this request.
@ -168,8 +176,7 @@ class Files(Protocol):
self,
file_id: str,
) -> OpenAIFileDeleteResponse:
"""
Delete a file.
"""Delete file.
:param file_id: The ID of the file to use for this request.
:returns: An OpenAIFileDeleteResponse indicating successful deletion.
@ -182,7 +189,8 @@ class Files(Protocol):
self,
file_id: str,
) -> Response:
"""
"""Retrieve file content.
Returns the contents of the specified file.
:param file_id: The ID of the file to use for this request.

View file

@ -1094,7 +1094,9 @@ class InferenceProvider(Protocol):
# for fill-in-the-middle type completion
suffix: str | None = None,
) -> OpenAICompletion:
"""Generate an OpenAI-compatible completion for the given prompt using the specified model.
"""Create completion.
Generate an OpenAI-compatible completion for the given prompt using the specified model.
:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
:param prompt: The prompt to generate a completion for.
@ -1146,7 +1148,9 @@ class InferenceProvider(Protocol):
top_p: float | None = None,
user: str | None = None,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
"""Generate an OpenAI-compatible chat completion for the given messages using the specified model.
"""Create chat completions.
Generate an OpenAI-compatible chat completion for the given messages using the specified model.
:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
:param messages: List of messages in the conversation.
@ -1185,7 +1189,9 @@ class InferenceProvider(Protocol):
dimensions: int | None = None,
user: str | None = None,
) -> OpenAIEmbeddingsResponse:
"""Generate OpenAI-compatible embeddings for the given input using the specified model.
"""Create embeddings.
Generate OpenAI-compatible embeddings for the given input using the specified model.
:param model: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.
:param input: Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings.
@ -1198,7 +1204,9 @@ class InferenceProvider(Protocol):
class Inference(InferenceProvider):
"""Llama Stack Inference API for generating completions, chat completions, and embeddings.
"""Inference
Llama Stack Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Two kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@ -1214,7 +1222,7 @@ class Inference(InferenceProvider):
model: str | None = None,
order: Order | None = Order.desc,
) -> ListOpenAIChatCompletionResponse:
"""List all chat completions.
"""List chat completions.
:param after: The ID of the last chat completion to return.
:param limit: The maximum number of chat completions to return.
@ -1229,7 +1237,9 @@ class Inference(InferenceProvider):
)
@webmethod(route="/chat/completions/{completion_id}", method="GET", level=LLAMA_STACK_API_V1)
async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
"""Describe a chat completion by its ID.
"""Get chat completion.
Describe a chat completion by its ID.
:param completion_id: ID of the chat completion.
:returns: A OpenAICompletionWithInputMessages.

View file

@ -58,9 +58,16 @@ class ListRoutesResponse(BaseModel):
@runtime_checkable
class Inspect(Protocol):
"""Inspect
APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
"""
@webmethod(route="/inspect/routes", method="GET", level=LLAMA_STACK_API_V1)
async def list_routes(self) -> ListRoutesResponse:
"""List all available API routes with their methods and implementing providers.
"""List routes.
List all available API routes with their methods and implementing providers.
:returns: Response containing information about all available routes.
"""
@ -68,7 +75,9 @@ class Inspect(Protocol):
@webmethod(route="/health", method="GET", level=LLAMA_STACK_API_V1)
async def health(self) -> HealthInfo:
"""Get the current health status of the service.
"""Get health status.
Get the current health status of the service.
:returns: Health information indicating if the service is operational.
"""
@ -76,7 +85,9 @@ class Inspect(Protocol):
@webmethod(route="/version", method="GET", level=LLAMA_STACK_API_V1)
async def version(self) -> VersionInfo:
"""Get the version of the service.
"""Get version.
Get the version of the service.
:returns: Version information containing the service version number.
"""

View file

@ -124,7 +124,9 @@ class Models(Protocol):
self,
model_id: str,
) -> Model:
"""Get a model by its identifier.
"""Get model.
Get a model by its identifier.
:param model_id: The identifier of the model to get.
:returns: A Model.
@ -140,7 +142,9 @@ class Models(Protocol):
metadata: dict[str, Any] | None = None,
model_type: ModelType | None = None,
) -> Model:
"""Register a model.
"""Register model.
Register a model.
:param model_id: The identifier of the model to register.
:param provider_model_id: The identifier of the model in the provider.
@ -156,7 +160,9 @@ class Models(Protocol):
self,
model_id: str,
) -> None:
"""Unregister a model.
"""Unregister model.
Unregister a model.
:param model_id: The identifier of the model to unregister.
"""

View file

@ -94,7 +94,9 @@ class ListPromptsResponse(BaseModel):
@runtime_checkable
@trace_protocol
class Prompts(Protocol):
"""Protocol for prompt management operations."""
"""Prompts
Protocol for prompt management operations."""
@webmethod(route="/prompts", method="GET", level=LLAMA_STACK_API_V1)
async def list_prompts(self) -> ListPromptsResponse:
@ -109,7 +111,9 @@ class Prompts(Protocol):
self,
prompt_id: str,
) -> ListPromptsResponse:
"""List all versions of a specific prompt.
"""List prompt versions.
List all versions of a specific prompt.
:param prompt_id: The identifier of the prompt to list versions for.
:returns: A ListPromptsResponse containing all versions of the prompt.
@ -122,7 +126,9 @@ class Prompts(Protocol):
prompt_id: str,
version: int | None = None,
) -> Prompt:
"""Get a prompt by its identifier and optional version.
"""Get prompt.
Get a prompt by its identifier and optional version.
:param prompt_id: The identifier of the prompt to get.
:param version: The version of the prompt to get (defaults to latest).
@ -136,7 +142,9 @@ class Prompts(Protocol):
prompt: str,
variables: list[str] | None = None,
) -> Prompt:
"""Create a new prompt.
"""Create prompt.
Create a new prompt.
:param prompt: The prompt text content with variable placeholders.
:param variables: List of variable names that can be used in the prompt template.
@ -153,7 +161,9 @@ class Prompts(Protocol):
variables: list[str] | None = None,
set_as_default: bool = True,
) -> Prompt:
"""Update an existing prompt (increments version).
"""Update prompt.
Update an existing prompt (increments version).
:param prompt_id: The identifier of the prompt to update.
:param prompt: The updated prompt text content.
@ -169,7 +179,9 @@ class Prompts(Protocol):
self,
prompt_id: str,
) -> None:
"""Delete a prompt.
"""Delete prompt.
Delete a prompt.
:param prompt_id: The identifier of the prompt to delete.
"""
@ -181,7 +193,9 @@ class Prompts(Protocol):
prompt_id: str,
version: int,
) -> Prompt:
"""Set which version of a prompt should be the default in get_prompt (latest).
"""Set prompt version.
Set which version of a prompt should be the default in get_prompt (latest).
:param prompt_id: The identifier of the prompt.
:param version: The version to set as default.

View file

@ -42,13 +42,16 @@ class ListProvidersResponse(BaseModel):
@runtime_checkable
class Providers(Protocol):
"""
"""Providers
Providers API for inspecting, listing, and modifying providers and their configurations.
"""
@webmethod(route="/providers", method="GET", level=LLAMA_STACK_API_V1)
async def list_providers(self) -> ListProvidersResponse:
"""List all available providers.
"""List providers.
List all available providers.
:returns: A ListProvidersResponse containing information about all providers.
"""
@ -56,7 +59,9 @@ class Providers(Protocol):
@webmethod(route="/providers/{provider_id}", method="GET", level=LLAMA_STACK_API_V1)
async def inspect_provider(self, provider_id: str) -> ProviderInfo:
"""Get detailed information about a specific provider.
"""Get provider.
Get detailed information about a specific provider.
:param provider_id: The ID of the provider to inspect.
:returns: A ProviderInfo object containing the provider's details.

View file

@ -96,6 +96,11 @@ class ShieldStore(Protocol):
@runtime_checkable
@trace_protocol
class Safety(Protocol):
"""Safety
OpenAI-compatible Moderations API.
"""
shield_store: ShieldStore
@webmethod(route="/safety/run-shield", method="POST", level=LLAMA_STACK_API_V1)
@ -105,7 +110,9 @@ class Safety(Protocol):
messages: list[Message],
params: dict[str, Any],
) -> RunShieldResponse:
"""Run a shield.
"""Run shield.
Run a shield.
:param shield_id: The identifier of the shield to run.
:param messages: The messages to run the shield on.
@ -117,7 +124,9 @@ class Safety(Protocol):
@webmethod(route="/openai/v1/moderations", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
@webmethod(route="/moderations", method="POST", level=LLAMA_STACK_API_V1)
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
"""Classifies if text and/or image inputs are potentially harmful.
"""Create moderation.
Classifies if text and/or image inputs are potentially harmful.
:param input: Input (or inputs) to classify.
Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models.
:param model: The content moderation model you would like to use.