From a8da6ba3a76f9e0fe81204f6a015aa62f7015e19 Mon Sep 17 00:00:00 2001
From: Alexey Rybak <50731695+reluctantfuturist@users.noreply.github.com>
Date: Mon, 6 Oct 2025 10:46:33 -0700
Subject: [PATCH] docs: API docstrings cleanup for better documentation
rendering (#3661)
# What does this PR do?
* Cleans up API docstrings for better documentation rendering
## Test Plan
* Manual testing
---------
Signed-off-by: Doug Edgar
Signed-off-by: Charlie Doern
Signed-off-by: Francisco Javier Arceo
Signed-off-by: dependabot[bot]
Co-authored-by: ehhuang
Co-authored-by: Ashwin Bharambe
Co-authored-by: Matthew Farrellee
Co-authored-by: Doug Edgar
Co-authored-by: Christian Zaccaria <73656840+ChristianZaccaria@users.noreply.github.com>
Co-authored-by: Anastas Stoyanovsky
Co-authored-by: Charlie Doern
Co-authored-by: Francisco Arceo
Co-authored-by: Claude
Co-authored-by: Young Han <110819238+seyeong-han@users.noreply.github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
docs/docs/providers/files/index.mdx | 7 +
docs/docs/providers/inference/index.mdx | 8 +-
docs/docs/providers/safety/index.mdx | 7 +
docs/static/deprecated-llama-stack-spec.html | 74 +++----
docs/static/deprecated-llama-stack-spec.yaml | 97 +++++----
docs/static/llama-stack-spec.html | 145 ++++++-------
docs/static/llama-stack-spec.yaml | 203 ++++++++++++-------
docs/static/stainless-llama-stack-spec.html | 145 ++++++-------
docs/static/stainless-llama-stack-spec.yaml | 203 ++++++++++++-------
llama_stack/apis/agents/agents.py | 10 +-
llama_stack/apis/files/files.py | 20 +-
llama_stack/apis/inference/inference.py | 22 +-
llama_stack/apis/inspect/inspect.py | 17 +-
llama_stack/apis/models/models.py | 12 +-
llama_stack/apis/prompts/prompts.py | 28 ++-
llama_stack/apis/providers/providers.py | 11 +-
llama_stack/apis/safety/safety.py | 13 +-
17 files changed, 611 insertions(+), 411 deletions(-)
diff --git a/docs/docs/providers/files/index.mdx b/docs/docs/providers/files/index.mdx
index 7d729d90f..19e338035 100644
--- a/docs/docs/providers/files/index.mdx
+++ b/docs/docs/providers/files/index.mdx
@@ -1,4 +1,7 @@
---
+description: "Files
+
+ This API is used to upload documents that can be used with other Llama Stack APIs."
sidebar_label: Files
title: Files
---
@@ -7,4 +10,8 @@ title: Files
## Overview
+Files
+
+ This API is used to upload documents that can be used with other Llama Stack APIs.
+
This section contains documentation for all available providers for the **files** API.
diff --git a/docs/docs/providers/inference/index.mdx b/docs/docs/providers/inference/index.mdx
index ebbaf1be1..c2bf69962 100644
--- a/docs/docs/providers/inference/index.mdx
+++ b/docs/docs/providers/inference/index.mdx
@@ -1,5 +1,7 @@
---
-description: "Llama Stack Inference API for generating completions, chat completions, and embeddings.
+description: "Inference
+
+ Llama Stack Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Two kinds of models are supported:
- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.
@@ -12,7 +14,9 @@ title: Inference
## Overview
-Llama Stack Inference API for generating completions, chat completions, and embeddings.
+Inference
+
+ Llama Stack Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Two kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
diff --git a/docs/docs/providers/safety/index.mdx b/docs/docs/providers/safety/index.mdx
index 3445b17e6..4e2de4f33 100644
--- a/docs/docs/providers/safety/index.mdx
+++ b/docs/docs/providers/safety/index.mdx
@@ -1,4 +1,7 @@
---
+description: "Safety
+
+ OpenAI-compatible Moderations API."
sidebar_label: Safety
title: Safety
---
@@ -7,4 +10,8 @@ title: Safety
## Overview
+Safety
+
+ OpenAI-compatible Moderations API.
+
This section contains documentation for all available providers for the **safety** API.
diff --git a/docs/static/deprecated-llama-stack-spec.html b/docs/static/deprecated-llama-stack-spec.html
index ffda7552b..04a3dca9b 100644
--- a/docs/static/deprecated-llama-stack-spec.html
+++ b/docs/static/deprecated-llama-stack-spec.html
@@ -1443,8 +1443,8 @@
"tags": [
"Inference"
],
- "summary": "List all chat completions.",
- "description": "List all chat completions.",
+ "summary": "List chat completions.",
+ "description": "List chat completions.",
"parameters": [
{
"name": "after",
@@ -1520,8 +1520,8 @@
"tags": [
"Inference"
],
- "summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
- "description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
+ "summary": "Create chat completions.",
+ "description": "Create chat completions.\nGenerate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -1565,8 +1565,8 @@
"tags": [
"Inference"
],
- "summary": "Describe a chat completion by its ID.",
- "description": "Describe a chat completion by its ID.",
+ "summary": "Get chat completion.",
+ "description": "Get chat completion.\nDescribe a chat completion by its ID.",
"parameters": [
{
"name": "completion_id",
@@ -1610,8 +1610,8 @@
"tags": [
"Inference"
],
- "summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
- "description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
+ "summary": "Create completion.",
+ "description": "Create completion.\nGenerate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -1655,8 +1655,8 @@
"tags": [
"Inference"
],
- "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
- "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "summary": "Create embeddings.",
+ "description": "Create embeddings.\nGenerate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -1700,8 +1700,8 @@
"tags": [
"Files"
],
- "summary": "Returns a list of files that belong to the user's organization.",
- "description": "Returns a list of files that belong to the user's organization.",
+ "summary": "List files.",
+ "description": "List files.\nReturns a list of files that belong to the user's organization.",
"parameters": [
{
"name": "after",
@@ -1770,8 +1770,8 @@
"tags": [
"Files"
],
- "summary": "Upload a file that can be used across various endpoints.",
- "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
+ "summary": "Upload file.",
+ "description": "Upload file.\nUpload a file that can be used across various endpoints.\n\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
@@ -1831,8 +1831,8 @@
"tags": [
"Files"
],
- "summary": "Returns information about a specific file.",
- "description": "Returns information about a specific file.",
+ "summary": "Retrieve file.",
+ "description": "Retrieve file.\nReturns information about a specific file.",
"parameters": [
{
"name": "file_id",
@@ -1874,8 +1874,8 @@
"tags": [
"Files"
],
- "summary": "Delete a file.",
- "description": "Delete a file.",
+ "summary": "Delete file.",
+ "description": "Delete file.",
"parameters": [
{
"name": "file_id",
@@ -1919,8 +1919,8 @@
"tags": [
"Files"
],
- "summary": "Returns the contents of the specified file.",
- "description": "Returns the contents of the specified file.",
+ "summary": "Retrieve file content.",
+ "description": "Retrieve file content.\nReturns the contents of the specified file.",
"parameters": [
{
"name": "file_id",
@@ -1999,8 +1999,8 @@
"tags": [
"Safety"
],
- "summary": "Classifies if text and/or image inputs are potentially harmful.",
- "description": "Classifies if text and/or image inputs are potentially harmful.",
+ "summary": "Create moderation.",
+ "description": "Create moderation.\nClassifies if text and/or image inputs are potentially harmful.",
"parameters": [],
"requestBody": {
"content": {
@@ -2044,8 +2044,8 @@
"tags": [
"Agents"
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
+ "summary": "List all responses.",
+ "description": "List all responses.",
"parameters": [
{
"name": "after",
@@ -2119,8 +2119,8 @@
"tags": [
"Agents"
],
- "summary": "Create a new OpenAI response.",
- "description": "Create a new OpenAI response.",
+ "summary": "Create a model response.",
+ "description": "Create a model response.",
"parameters": [],
"requestBody": {
"content": {
@@ -2184,8 +2184,8 @@
"tags": [
"Agents"
],
- "summary": "Retrieve an OpenAI response by its ID.",
- "description": "Retrieve an OpenAI response by its ID.",
+ "summary": "Get a model response.",
+ "description": "Get a model response.",
"parameters": [
{
"name": "response_id",
@@ -2227,8 +2227,8 @@
"tags": [
"Agents"
],
- "summary": "Delete an OpenAI response by its ID.",
- "description": "Delete an OpenAI response by its ID.",
+ "summary": "Delete a response.",
+ "description": "Delete a response.",
"parameters": [
{
"name": "response_id",
@@ -2272,8 +2272,8 @@
"tags": [
"Agents"
],
- "summary": "List input items for a given OpenAI response.",
- "description": "List input items for a given OpenAI response.",
+ "summary": "List input items.",
+ "description": "List input items.",
"parameters": [
{
"name": "response_id",
@@ -13366,12 +13366,13 @@
},
{
"name": "Files",
- "description": ""
+ "description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
+ "x-displayName": "Files"
},
{
"name": "Inference",
- "description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
- "x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
+ "description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n\nThis API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
+ "x-displayName": "Inference"
},
{
"name": "Models",
@@ -13383,7 +13384,8 @@
},
{
"name": "Safety",
- "description": ""
+ "description": "OpenAI-compatible Moderations API.",
+ "x-displayName": "Safety"
},
{
"name": "Telemetry",
diff --git a/docs/static/deprecated-llama-stack-spec.yaml b/docs/static/deprecated-llama-stack-spec.yaml
index 0e672f914..1a215b877 100644
--- a/docs/static/deprecated-llama-stack-spec.yaml
+++ b/docs/static/deprecated-llama-stack-spec.yaml
@@ -1033,8 +1033,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: List all chat completions.
- description: List all chat completions.
+ summary: List chat completions.
+ description: List chat completions.
parameters:
- name: after
in: query
@@ -1087,10 +1087,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate an OpenAI-compatible chat completion for the given messages using
- the specified model.
+ summary: Create chat completions.
description: >-
+ Create chat completions.
+
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
parameters: []
@@ -1122,8 +1122,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: Describe a chat completion by its ID.
- description: Describe a chat completion by its ID.
+ summary: Get chat completion.
+ description: >-
+ Get chat completion.
+
+ Describe a chat completion by its ID.
parameters:
- name: completion_id
in: path
@@ -1153,10 +1156,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate an OpenAI-compatible completion for the given prompt using the specified
- model.
+ summary: Create completion.
description: >-
+ Create completion.
+
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
parameters: []
@@ -1189,10 +1192,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
+ summary: Create embeddings.
description: >-
+ Create embeddings.
+
Generate OpenAI-compatible embeddings for the given input using the specified
model.
parameters: []
@@ -1225,9 +1228,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns a list of files that belong to the user's organization.
+ summary: List files.
description: >-
+ List files.
+
Returns a list of files that belong to the user's organization.
parameters:
- name: after
@@ -1285,11 +1289,13 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Upload a file that can be used across various endpoints.
+ summary: Upload file.
description: >-
+ Upload file.
+
Upload a file that can be used across various endpoints.
+
The file upload should be a multipart form request with:
- file: The File object (not file name) to be uploaded.
@@ -1338,9 +1344,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns information about a specific file.
+ summary: Retrieve file.
description: >-
+ Retrieve file.
+
Returns information about a specific file.
parameters:
- name: file_id
@@ -1372,8 +1379,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: Delete a file.
- description: Delete a file.
+ summary: Delete file.
+ description: Delete file.
parameters:
- name: file_id
in: path
@@ -1405,9 +1412,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns the contents of the specified file.
+ summary: Retrieve file content.
description: >-
+ Retrieve file content.
+
Returns the contents of the specified file.
parameters:
- name: file_id
@@ -1464,9 +1472,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
- summary: >-
- Classifies if text and/or image inputs are potentially harmful.
+ summary: Create moderation.
description: >-
+ Create moderation.
+
Classifies if text and/or image inputs are potentially harmful.
parameters: []
requestBody:
@@ -1497,8 +1506,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: List all OpenAI responses.
- description: List all OpenAI responses.
+ summary: List all responses.
+ description: List all responses.
parameters:
- name: after
in: query
@@ -1549,8 +1558,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Create a new OpenAI response.
- description: Create a new OpenAI response.
+ summary: Create a model response.
+ description: Create a model response.
parameters: []
requestBody:
content:
@@ -1592,8 +1601,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Retrieve an OpenAI response by its ID.
- description: Retrieve an OpenAI response by its ID.
+ summary: Get a model response.
+ description: Get a model response.
parameters:
- name: response_id
in: path
@@ -1623,8 +1632,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Delete an OpenAI response by its ID.
- description: Delete an OpenAI response by its ID.
+ summary: Delete a response.
+ description: Delete a response.
parameters:
- name: response_id
in: path
@@ -1654,10 +1663,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: >-
- List input items for a given OpenAI response.
- description: >-
- List input items for a given OpenAI response.
+ summary: List input items.
+ description: List input items.
parameters:
- name: response_id
in: path
@@ -10011,9 +10018,16 @@ tags:
x-displayName: >-
Llama Stack Evaluation API for running evaluations on model and agent candidates.
- name: Files
- description: ''
+ description: >-
+ This API is used to upload documents that can be used with other Llama Stack
+ APIs.
+ x-displayName: Files
- name: Inference
description: >-
+ Llama Stack Inference API for generating completions, chat completions, and
+ embeddings.
+
+
This API provides the raw interface to the underlying models. Two kinds of models
are supported:
@@ -10021,15 +10035,14 @@ tags:
- Embedding models: these models generate embeddings to be used for semantic
search.
- x-displayName: >-
- Llama Stack Inference API for generating completions, chat completions, and
- embeddings.
+ x-displayName: Inference
- name: Models
description: ''
- name: PostTraining (Coming Soon)
description: ''
- name: Safety
- description: ''
+ description: OpenAI-compatible Moderations API.
+ x-displayName: Safety
- name: Telemetry
description: ''
- name: VectorIO
diff --git a/docs/static/llama-stack-spec.html b/docs/static/llama-stack-spec.html
index c570dcddf..9cd526176 100644
--- a/docs/static/llama-stack-spec.html
+++ b/docs/static/llama-stack-spec.html
@@ -69,8 +69,8 @@
"tags": [
"Inference"
],
- "summary": "List all chat completions.",
- "description": "List all chat completions.",
+ "summary": "List chat completions.",
+ "description": "List chat completions.",
"parameters": [
{
"name": "after",
@@ -146,8 +146,8 @@
"tags": [
"Inference"
],
- "summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
- "description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
+ "summary": "Create chat completions.",
+ "description": "Create chat completions.\nGenerate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -191,8 +191,8 @@
"tags": [
"Inference"
],
- "summary": "Describe a chat completion by its ID.",
- "description": "Describe a chat completion by its ID.",
+ "summary": "Get chat completion.",
+ "description": "Get chat completion.\nDescribe a chat completion by its ID.",
"parameters": [
{
"name": "completion_id",
@@ -236,8 +236,8 @@
"tags": [
"Inference"
],
- "summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
- "description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
+ "summary": "Create completion.",
+ "description": "Create completion.\nGenerate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -758,8 +758,8 @@
"tags": [
"Inference"
],
- "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
- "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "summary": "Create embeddings.",
+ "description": "Create embeddings.\nGenerate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -803,8 +803,8 @@
"tags": [
"Files"
],
- "summary": "Returns a list of files that belong to the user's organization.",
- "description": "Returns a list of files that belong to the user's organization.",
+ "summary": "List files.",
+ "description": "List files.\nReturns a list of files that belong to the user's organization.",
"parameters": [
{
"name": "after",
@@ -873,8 +873,8 @@
"tags": [
"Files"
],
- "summary": "Upload a file that can be used across various endpoints.",
- "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
+ "summary": "Upload file.",
+ "description": "Upload file.\nUpload a file that can be used across various endpoints.\n\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
@@ -934,8 +934,8 @@
"tags": [
"Files"
],
- "summary": "Returns information about a specific file.",
- "description": "Returns information about a specific file.",
+ "summary": "Retrieve file.",
+ "description": "Retrieve file.\nReturns information about a specific file.",
"parameters": [
{
"name": "file_id",
@@ -977,8 +977,8 @@
"tags": [
"Files"
],
- "summary": "Delete a file.",
- "description": "Delete a file.",
+ "summary": "Delete file.",
+ "description": "Delete file.",
"parameters": [
{
"name": "file_id",
@@ -1022,8 +1022,8 @@
"tags": [
"Files"
],
- "summary": "Returns the contents of the specified file.",
- "description": "Returns the contents of the specified file.",
+ "summary": "Retrieve file content.",
+ "description": "Retrieve file content.\nReturns the contents of the specified file.",
"parameters": [
{
"name": "file_id",
@@ -1067,8 +1067,8 @@
"tags": [
"Inspect"
],
- "summary": "Get the current health status of the service.",
- "description": "Get the current health status of the service.",
+ "summary": "Get health status.",
+ "description": "Get health status.\nGet the current health status of the service.",
"parameters": [],
"deprecated": false
}
@@ -1102,8 +1102,8 @@
"tags": [
"Inspect"
],
- "summary": "List all available API routes with their methods and implementing providers.",
- "description": "List all available API routes with their methods and implementing providers.",
+ "summary": "List routes.",
+ "description": "List routes.\nList all available API routes with their methods and implementing providers.",
"parameters": [],
"deprecated": false
}
@@ -1170,8 +1170,8 @@
"tags": [
"Models"
],
- "summary": "Register a model.",
- "description": "Register a model.",
+ "summary": "Register model.",
+ "description": "Register model.\nRegister a model.",
"parameters": [],
"requestBody": {
"content": {
@@ -1215,8 +1215,8 @@
"tags": [
"Models"
],
- "summary": "Get a model by its identifier.",
- "description": "Get a model by its identifier.",
+ "summary": "Get model.",
+ "description": "Get model.\nGet a model by its identifier.",
"parameters": [
{
"name": "model_id",
@@ -1251,8 +1251,8 @@
"tags": [
"Models"
],
- "summary": "Unregister a model.",
- "description": "Unregister a model.",
+ "summary": "Unregister model.",
+ "description": "Unregister model.\nUnregister a model.",
"parameters": [
{
"name": "model_id",
@@ -1296,8 +1296,8 @@
"tags": [
"Safety"
],
- "summary": "Classifies if text and/or image inputs are potentially harmful.",
- "description": "Classifies if text and/or image inputs are potentially harmful.",
+ "summary": "Create moderation.",
+ "description": "Create moderation.\nClassifies if text and/or image inputs are potentially harmful.",
"parameters": [],
"requestBody": {
"content": {
@@ -1374,8 +1374,8 @@
"tags": [
"Prompts"
],
- "summary": "Create a new prompt.",
- "description": "Create a new prompt.",
+ "summary": "Create prompt.",
+ "description": "Create prompt.\nCreate a new prompt.",
"parameters": [],
"requestBody": {
"content": {
@@ -1419,8 +1419,8 @@
"tags": [
"Prompts"
],
- "summary": "Get a prompt by its identifier and optional version.",
- "description": "Get a prompt by its identifier and optional version.",
+ "summary": "Get prompt.",
+ "description": "Get prompt.\nGet a prompt by its identifier and optional version.",
"parameters": [
{
"name": "prompt_id",
@@ -1471,8 +1471,8 @@
"tags": [
"Prompts"
],
- "summary": "Update an existing prompt (increments version).",
- "description": "Update an existing prompt (increments version).",
+ "summary": "Update prompt.",
+ "description": "Update prompt.\nUpdate an existing prompt (increments version).",
"parameters": [
{
"name": "prompt_id",
@@ -1517,8 +1517,8 @@
"tags": [
"Prompts"
],
- "summary": "Delete a prompt.",
- "description": "Delete a prompt.",
+ "summary": "Delete prompt.",
+ "description": "Delete prompt.\nDelete a prompt.",
"parameters": [
{
"name": "prompt_id",
@@ -1562,8 +1562,8 @@
"tags": [
"Prompts"
],
- "summary": "Set which version of a prompt should be the default in get_prompt (latest).",
- "description": "Set which version of a prompt should be the default in get_prompt (latest).",
+ "summary": "Set prompt version.",
+ "description": "Set prompt version.\nSet which version of a prompt should be the default in get_prompt (latest).",
"parameters": [
{
"name": "prompt_id",
@@ -1617,8 +1617,8 @@
"tags": [
"Prompts"
],
- "summary": "List all versions of a specific prompt.",
- "description": "List all versions of a specific prompt.",
+ "summary": "List prompt versions.",
+ "description": "List prompt versions.\nList all versions of a specific prompt.",
"parameters": [
{
"name": "prompt_id",
@@ -1662,8 +1662,8 @@
"tags": [
"Providers"
],
- "summary": "List all available providers.",
- "description": "List all available providers.",
+ "summary": "List providers.",
+ "description": "List providers.\nList all available providers.",
"parameters": [],
"deprecated": false
}
@@ -1697,8 +1697,8 @@
"tags": [
"Providers"
],
- "summary": "Get detailed information about a specific provider.",
- "description": "Get detailed information about a specific provider.",
+ "summary": "Get provider.",
+ "description": "Get provider.\nGet detailed information about a specific provider.",
"parameters": [
{
"name": "provider_id",
@@ -1742,8 +1742,8 @@
"tags": [
"Agents"
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
+ "summary": "List all responses.",
+ "description": "List all responses.",
"parameters": [
{
"name": "after",
@@ -1817,8 +1817,8 @@
"tags": [
"Agents"
],
- "summary": "Create a new OpenAI response.",
- "description": "Create a new OpenAI response.",
+ "summary": "Create a model response.",
+ "description": "Create a model response.",
"parameters": [],
"requestBody": {
"content": {
@@ -1882,8 +1882,8 @@
"tags": [
"Agents"
],
- "summary": "Retrieve an OpenAI response by its ID.",
- "description": "Retrieve an OpenAI response by its ID.",
+ "summary": "Get a model response.",
+ "description": "Get a model response.",
"parameters": [
{
"name": "response_id",
@@ -1925,8 +1925,8 @@
"tags": [
"Agents"
],
- "summary": "Delete an OpenAI response by its ID.",
- "description": "Delete an OpenAI response by its ID.",
+ "summary": "Delete a response.",
+ "description": "Delete a response.",
"parameters": [
{
"name": "response_id",
@@ -1970,8 +1970,8 @@
"tags": [
"Agents"
],
- "summary": "List input items for a given OpenAI response.",
- "description": "List input items for a given OpenAI response.",
+ "summary": "List input items.",
+ "description": "List input items.",
"parameters": [
{
"name": "response_id",
@@ -2063,8 +2063,8 @@
"tags": [
"Safety"
],
- "summary": "Run a shield.",
- "description": "Run a shield.",
+ "summary": "Run shield.",
+ "description": "Run shield.\nRun a shield.",
"parameters": [],
"requestBody": {
"content": {
@@ -4196,8 +4196,8 @@
"tags": [
"Inspect"
],
- "summary": "Get the version of the service.",
- "description": "Get the version of the service.",
+ "summary": "Get version.",
+ "description": "Get version.\nGet the version of the service.",
"parameters": [],
"deprecated": false
}
@@ -12914,16 +12914,18 @@
},
{
"name": "Files",
- "description": ""
+ "description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
+ "x-displayName": "Files"
},
{
"name": "Inference",
- "description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
- "x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
+ "description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n\nThis API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
+ "x-displayName": "Inference"
},
{
"name": "Inspect",
- "description": ""
+ "description": "APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.",
+ "x-displayName": "Inspect"
},
{
"name": "Models",
@@ -12931,17 +12933,18 @@
},
{
"name": "Prompts",
- "description": "",
- "x-displayName": "Protocol for prompt management operations."
+ "description": "Protocol for prompt management operations.",
+ "x-displayName": "Prompts"
},
{
"name": "Providers",
- "description": "",
- "x-displayName": "Providers API for inspecting, listing, and modifying providers and their configurations."
+ "description": "Providers API for inspecting, listing, and modifying providers and their configurations.",
+ "x-displayName": "Providers"
},
{
"name": "Safety",
- "description": ""
+ "description": "OpenAI-compatible Moderations API.",
+ "x-displayName": "Safety"
},
{
"name": "Scoring",
diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml
index 3e1431b22..66ce8e38a 100644
--- a/docs/static/llama-stack-spec.yaml
+++ b/docs/static/llama-stack-spec.yaml
@@ -33,8 +33,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: List all chat completions.
- description: List all chat completions.
+ summary: List chat completions.
+ description: List chat completions.
parameters:
- name: after
in: query
@@ -87,10 +87,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate an OpenAI-compatible chat completion for the given messages using
- the specified model.
+ summary: Create chat completions.
description: >-
+ Create chat completions.
+
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
parameters: []
@@ -122,8 +122,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: Describe a chat completion by its ID.
- description: Describe a chat completion by its ID.
+ summary: Get chat completion.
+ description: >-
+ Get chat completion.
+
+ Describe a chat completion by its ID.
parameters:
- name: completion_id
in: path
@@ -153,10 +156,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate an OpenAI-compatible completion for the given prompt using the specified
- model.
+ summary: Create completion.
description: >-
+ Create completion.
+
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
parameters: []
@@ -603,10 +606,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
+ summary: Create embeddings.
description: >-
+ Create embeddings.
+
Generate OpenAI-compatible embeddings for the given input using the specified
model.
parameters: []
@@ -639,9 +642,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns a list of files that belong to the user's organization.
+ summary: List files.
description: >-
+ List files.
+
Returns a list of files that belong to the user's organization.
parameters:
- name: after
@@ -699,11 +703,13 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Upload a file that can be used across various endpoints.
+ summary: Upload file.
description: >-
+ Upload file.
+
Upload a file that can be used across various endpoints.
+
The file upload should be a multipart form request with:
- file: The File object (not file name) to be uploaded.
@@ -752,9 +758,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns information about a specific file.
+ summary: Retrieve file.
description: >-
+ Retrieve file.
+
Returns information about a specific file.
parameters:
- name: file_id
@@ -786,8 +793,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: Delete a file.
- description: Delete a file.
+ summary: Delete file.
+ description: Delete file.
parameters:
- name: file_id
in: path
@@ -819,9 +826,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns the contents of the specified file.
+ summary: Retrieve file content.
description: >-
+ Retrieve file content.
+
Returns the contents of the specified file.
parameters:
- name: file_id
@@ -854,9 +862,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
- summary: >-
- Get the current health status of the service.
+ summary: Get health status.
description: >-
+ Get health status.
+
Get the current health status of the service.
parameters: []
deprecated: false
@@ -882,9 +891,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
- summary: >-
- List all available API routes with their methods and implementing providers.
+ summary: List routes.
description: >-
+ List routes.
+
List all available API routes with their methods and implementing providers.
parameters: []
deprecated: false
@@ -933,8 +943,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
- summary: Register a model.
- description: Register a model.
+ summary: Register model.
+ description: >-
+ Register model.
+
+ Register a model.
parameters: []
requestBody:
content:
@@ -964,8 +977,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
- summary: Get a model by its identifier.
- description: Get a model by its identifier.
+ summary: Get model.
+ description: >-
+ Get model.
+
+ Get a model by its identifier.
parameters:
- name: model_id
in: path
@@ -990,8 +1006,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
- summary: Unregister a model.
- description: Unregister a model.
+ summary: Unregister model.
+ description: >-
+ Unregister model.
+
+ Unregister a model.
parameters:
- name: model_id
in: path
@@ -1022,9 +1041,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
- summary: >-
- Classifies if text and/or image inputs are potentially harmful.
+ summary: Create moderation.
description: >-
+ Create moderation.
+
Classifies if text and/or image inputs are potentially harmful.
parameters: []
requestBody:
@@ -1080,8 +1100,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: Create a new prompt.
- description: Create a new prompt.
+ summary: Create prompt.
+ description: >-
+ Create prompt.
+
+ Create a new prompt.
parameters: []
requestBody:
content:
@@ -1111,9 +1134,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: >-
- Get a prompt by its identifier and optional version.
+ summary: Get prompt.
description: >-
+ Get prompt.
+
Get a prompt by its identifier and optional version.
parameters:
- name: prompt_id
@@ -1151,9 +1175,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: >-
- Update an existing prompt (increments version).
+ summary: Update prompt.
description: >-
+ Update prompt.
+
Update an existing prompt (increments version).
parameters:
- name: prompt_id
@@ -1185,8 +1210,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: Delete a prompt.
- description: Delete a prompt.
+ summary: Delete prompt.
+ description: >-
+ Delete prompt.
+
+ Delete a prompt.
parameters:
- name: prompt_id
in: path
@@ -1217,9 +1245,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: >-
- Set which version of a prompt should be the default in get_prompt (latest).
+ summary: Set prompt version.
description: >-
+ Set prompt version.
+
Set which version of a prompt should be the default in get_prompt (latest).
parameters:
- name: prompt_id
@@ -1257,8 +1286,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: List all versions of a specific prompt.
- description: List all versions of a specific prompt.
+ summary: List prompt versions.
+ description: >-
+ List prompt versions.
+
+ List all versions of a specific prompt.
parameters:
- name: prompt_id
in: path
@@ -1290,8 +1322,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
- summary: List all available providers.
- description: List all available providers.
+ summary: List providers.
+ description: >-
+ List providers.
+
+ List all available providers.
parameters: []
deprecated: false
/v1/providers/{provider_id}:
@@ -1316,9 +1351,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
- summary: >-
- Get detailed information about a specific provider.
+ summary: Get provider.
description: >-
+ Get provider.
+
Get detailed information about a specific provider.
parameters:
- name: provider_id
@@ -1349,8 +1385,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: List all OpenAI responses.
- description: List all OpenAI responses.
+ summary: List all responses.
+ description: List all responses.
parameters:
- name: after
in: query
@@ -1401,8 +1437,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Create a new OpenAI response.
- description: Create a new OpenAI response.
+ summary: Create a model response.
+ description: Create a model response.
parameters: []
requestBody:
content:
@@ -1444,8 +1480,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Retrieve an OpenAI response by its ID.
- description: Retrieve an OpenAI response by its ID.
+ summary: Get a model response.
+ description: Get a model response.
parameters:
- name: response_id
in: path
@@ -1475,8 +1511,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Delete an OpenAI response by its ID.
- description: Delete an OpenAI response by its ID.
+ summary: Delete a response.
+ description: Delete a response.
parameters:
- name: response_id
in: path
@@ -1506,10 +1542,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: >-
- List input items for a given OpenAI response.
- description: >-
- List input items for a given OpenAI response.
+ summary: List input items.
+ description: List input items.
parameters:
- name: response_id
in: path
@@ -1578,8 +1612,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
- summary: Run a shield.
- description: Run a shield.
+ summary: Run shield.
+ description: >-
+ Run shield.
+
+ Run a shield.
parameters: []
requestBody:
content:
@@ -3135,8 +3172,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
- summary: Get the version of the service.
- description: Get the version of the service.
+ summary: Get version.
+ description: >-
+ Get version.
+
+ Get the version of the service.
parameters: []
deprecated: false
jsonSchemaDialect: >-
@@ -9749,9 +9789,16 @@ tags:
x-displayName: >-
Protocol for conversation management operations.
- name: Files
- description: ''
+ description: >-
+ This API is used to upload documents that can be used with other Llama Stack
+ APIs.
+ x-displayName: Files
- name: Inference
description: >-
+ Llama Stack Inference API for generating completions, chat completions, and
+ embeddings.
+
+
This API provides the raw interface to the underlying models. Two kinds of models
are supported:
@@ -9759,23 +9806,25 @@ tags:
- Embedding models: these models generate embeddings to be used for semantic
search.
- x-displayName: >-
- Llama Stack Inference API for generating completions, chat completions, and
- embeddings.
+ x-displayName: Inference
- name: Inspect
- description: ''
+ description: >-
+ APIs for inspecting the Llama Stack service, including health status, available
+ API routes with methods and implementing providers.
+ x-displayName: Inspect
- name: Models
description: ''
- name: Prompts
- description: ''
- x-displayName: >-
+ description: >-
Protocol for prompt management operations.
+ x-displayName: Prompts
- name: Providers
- description: ''
- x-displayName: >-
+ description: >-
Providers API for inspecting, listing, and modifying providers and their configurations.
+ x-displayName: Providers
- name: Safety
- description: ''
+ description: OpenAI-compatible Moderations API.
+ x-displayName: Safety
- name: Scoring
description: ''
- name: ScoringFunctions
diff --git a/docs/static/stainless-llama-stack-spec.html b/docs/static/stainless-llama-stack-spec.html
index 167a4aa3c..3478d3338 100644
--- a/docs/static/stainless-llama-stack-spec.html
+++ b/docs/static/stainless-llama-stack-spec.html
@@ -69,8 +69,8 @@
"tags": [
"Inference"
],
- "summary": "List all chat completions.",
- "description": "List all chat completions.",
+ "summary": "List chat completions.",
+ "description": "List chat completions.",
"parameters": [
{
"name": "after",
@@ -146,8 +146,8 @@
"tags": [
"Inference"
],
- "summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
- "description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.",
+ "summary": "Create chat completions.",
+ "description": "Create chat completions.\nGenerate an OpenAI-compatible chat completion for the given messages using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -191,8 +191,8 @@
"tags": [
"Inference"
],
- "summary": "Describe a chat completion by its ID.",
- "description": "Describe a chat completion by its ID.",
+ "summary": "Get chat completion.",
+ "description": "Get chat completion.\nDescribe a chat completion by its ID.",
"parameters": [
{
"name": "completion_id",
@@ -236,8 +236,8 @@
"tags": [
"Inference"
],
- "summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
- "description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.",
+ "summary": "Create completion.",
+ "description": "Create completion.\nGenerate an OpenAI-compatible completion for the given prompt using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -758,8 +758,8 @@
"tags": [
"Inference"
],
- "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
- "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.",
+ "summary": "Create embeddings.",
+ "description": "Create embeddings.\nGenerate OpenAI-compatible embeddings for the given input using the specified model.",
"parameters": [],
"requestBody": {
"content": {
@@ -803,8 +803,8 @@
"tags": [
"Files"
],
- "summary": "Returns a list of files that belong to the user's organization.",
- "description": "Returns a list of files that belong to the user's organization.",
+ "summary": "List files.",
+ "description": "List files.\nReturns a list of files that belong to the user's organization.",
"parameters": [
{
"name": "after",
@@ -873,8 +873,8 @@
"tags": [
"Files"
],
- "summary": "Upload a file that can be used across various endpoints.",
- "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
+ "summary": "Upload file.",
+ "description": "Upload file.\nUpload a file that can be used across various endpoints.\n\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.",
"parameters": [],
"requestBody": {
"content": {
@@ -934,8 +934,8 @@
"tags": [
"Files"
],
- "summary": "Returns information about a specific file.",
- "description": "Returns information about a specific file.",
+ "summary": "Retrieve file.",
+ "description": "Retrieve file.\nReturns information about a specific file.",
"parameters": [
{
"name": "file_id",
@@ -977,8 +977,8 @@
"tags": [
"Files"
],
- "summary": "Delete a file.",
- "description": "Delete a file.",
+ "summary": "Delete file.",
+ "description": "Delete file.",
"parameters": [
{
"name": "file_id",
@@ -1022,8 +1022,8 @@
"tags": [
"Files"
],
- "summary": "Returns the contents of the specified file.",
- "description": "Returns the contents of the specified file.",
+ "summary": "Retrieve file content.",
+ "description": "Retrieve file content.\nReturns the contents of the specified file.",
"parameters": [
{
"name": "file_id",
@@ -1067,8 +1067,8 @@
"tags": [
"Inspect"
],
- "summary": "Get the current health status of the service.",
- "description": "Get the current health status of the service.",
+ "summary": "Get health status.",
+ "description": "Get health status.\nGet the current health status of the service.",
"parameters": [],
"deprecated": false
}
@@ -1102,8 +1102,8 @@
"tags": [
"Inspect"
],
- "summary": "List all available API routes with their methods and implementing providers.",
- "description": "List all available API routes with their methods and implementing providers.",
+ "summary": "List routes.",
+ "description": "List routes.\nList all available API routes with their methods and implementing providers.",
"parameters": [],
"deprecated": false
}
@@ -1170,8 +1170,8 @@
"tags": [
"Models"
],
- "summary": "Register a model.",
- "description": "Register a model.",
+ "summary": "Register model.",
+ "description": "Register model.\nRegister a model.",
"parameters": [],
"requestBody": {
"content": {
@@ -1215,8 +1215,8 @@
"tags": [
"Models"
],
- "summary": "Get a model by its identifier.",
- "description": "Get a model by its identifier.",
+ "summary": "Get model.",
+ "description": "Get model.\nGet a model by its identifier.",
"parameters": [
{
"name": "model_id",
@@ -1251,8 +1251,8 @@
"tags": [
"Models"
],
- "summary": "Unregister a model.",
- "description": "Unregister a model.",
+ "summary": "Unregister model.",
+ "description": "Unregister model.\nUnregister a model.",
"parameters": [
{
"name": "model_id",
@@ -1296,8 +1296,8 @@
"tags": [
"Safety"
],
- "summary": "Classifies if text and/or image inputs are potentially harmful.",
- "description": "Classifies if text and/or image inputs are potentially harmful.",
+ "summary": "Create moderation.",
+ "description": "Create moderation.\nClassifies if text and/or image inputs are potentially harmful.",
"parameters": [],
"requestBody": {
"content": {
@@ -1374,8 +1374,8 @@
"tags": [
"Prompts"
],
- "summary": "Create a new prompt.",
- "description": "Create a new prompt.",
+ "summary": "Create prompt.",
+ "description": "Create prompt.\nCreate a new prompt.",
"parameters": [],
"requestBody": {
"content": {
@@ -1419,8 +1419,8 @@
"tags": [
"Prompts"
],
- "summary": "Get a prompt by its identifier and optional version.",
- "description": "Get a prompt by its identifier and optional version.",
+ "summary": "Get prompt.",
+ "description": "Get prompt.\nGet a prompt by its identifier and optional version.",
"parameters": [
{
"name": "prompt_id",
@@ -1471,8 +1471,8 @@
"tags": [
"Prompts"
],
- "summary": "Update an existing prompt (increments version).",
- "description": "Update an existing prompt (increments version).",
+ "summary": "Update prompt.",
+ "description": "Update prompt.\nUpdate an existing prompt (increments version).",
"parameters": [
{
"name": "prompt_id",
@@ -1517,8 +1517,8 @@
"tags": [
"Prompts"
],
- "summary": "Delete a prompt.",
- "description": "Delete a prompt.",
+ "summary": "Delete prompt.",
+ "description": "Delete prompt.\nDelete a prompt.",
"parameters": [
{
"name": "prompt_id",
@@ -1562,8 +1562,8 @@
"tags": [
"Prompts"
],
- "summary": "Set which version of a prompt should be the default in get_prompt (latest).",
- "description": "Set which version of a prompt should be the default in get_prompt (latest).",
+ "summary": "Set prompt version.",
+ "description": "Set prompt version.\nSet which version of a prompt should be the default in get_prompt (latest).",
"parameters": [
{
"name": "prompt_id",
@@ -1617,8 +1617,8 @@
"tags": [
"Prompts"
],
- "summary": "List all versions of a specific prompt.",
- "description": "List all versions of a specific prompt.",
+ "summary": "List prompt versions.",
+ "description": "List prompt versions.\nList all versions of a specific prompt.",
"parameters": [
{
"name": "prompt_id",
@@ -1662,8 +1662,8 @@
"tags": [
"Providers"
],
- "summary": "List all available providers.",
- "description": "List all available providers.",
+ "summary": "List providers.",
+ "description": "List providers.\nList all available providers.",
"parameters": [],
"deprecated": false
}
@@ -1697,8 +1697,8 @@
"tags": [
"Providers"
],
- "summary": "Get detailed information about a specific provider.",
- "description": "Get detailed information about a specific provider.",
+ "summary": "Get provider.",
+ "description": "Get provider.\nGet detailed information about a specific provider.",
"parameters": [
{
"name": "provider_id",
@@ -1742,8 +1742,8 @@
"tags": [
"Agents"
],
- "summary": "List all OpenAI responses.",
- "description": "List all OpenAI responses.",
+ "summary": "List all responses.",
+ "description": "List all responses.",
"parameters": [
{
"name": "after",
@@ -1817,8 +1817,8 @@
"tags": [
"Agents"
],
- "summary": "Create a new OpenAI response.",
- "description": "Create a new OpenAI response.",
+ "summary": "Create a model response.",
+ "description": "Create a model response.",
"parameters": [],
"requestBody": {
"content": {
@@ -1882,8 +1882,8 @@
"tags": [
"Agents"
],
- "summary": "Retrieve an OpenAI response by its ID.",
- "description": "Retrieve an OpenAI response by its ID.",
+ "summary": "Get a model response.",
+ "description": "Get a model response.",
"parameters": [
{
"name": "response_id",
@@ -1925,8 +1925,8 @@
"tags": [
"Agents"
],
- "summary": "Delete an OpenAI response by its ID.",
- "description": "Delete an OpenAI response by its ID.",
+ "summary": "Delete a response.",
+ "description": "Delete a response.",
"parameters": [
{
"name": "response_id",
@@ -1970,8 +1970,8 @@
"tags": [
"Agents"
],
- "summary": "List input items for a given OpenAI response.",
- "description": "List input items for a given OpenAI response.",
+ "summary": "List input items.",
+ "description": "List input items.",
"parameters": [
{
"name": "response_id",
@@ -2063,8 +2063,8 @@
"tags": [
"Safety"
],
- "summary": "Run a shield.",
- "description": "Run a shield.",
+ "summary": "Run shield.",
+ "description": "Run shield.\nRun a shield.",
"parameters": [],
"requestBody": {
"content": {
@@ -4196,8 +4196,8 @@
"tags": [
"Inspect"
],
- "summary": "Get the version of the service.",
- "description": "Get the version of the service.",
+ "summary": "Get version.",
+ "description": "Get version.\nGet the version of the service.",
"parameters": [],
"deprecated": false
}
@@ -18487,16 +18487,18 @@
},
{
"name": "Files",
- "description": ""
+ "description": "This API is used to upload documents that can be used with other Llama Stack APIs.",
+ "x-displayName": "Files"
},
{
"name": "Inference",
- "description": "This API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
- "x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings."
+ "description": "Llama Stack Inference API for generating completions, chat completions, and embeddings.\n\nThis API provides the raw interface to the underlying models. Two kinds of models are supported:\n- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.\n- Embedding models: these models generate embeddings to be used for semantic search.",
+ "x-displayName": "Inference"
},
{
"name": "Inspect",
- "description": ""
+ "description": "APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.",
+ "x-displayName": "Inspect"
},
{
"name": "Models",
@@ -18508,17 +18510,18 @@
},
{
"name": "Prompts",
- "description": "",
- "x-displayName": "Protocol for prompt management operations."
+ "description": "Protocol for prompt management operations.",
+ "x-displayName": "Prompts"
},
{
"name": "Providers",
- "description": "",
- "x-displayName": "Providers API for inspecting, listing, and modifying providers and their configurations."
+ "description": "Providers API for inspecting, listing, and modifying providers and their configurations.",
+ "x-displayName": "Providers"
},
{
"name": "Safety",
- "description": ""
+ "description": "OpenAI-compatible Moderations API.",
+ "x-displayName": "Safety"
},
{
"name": "Scoring",
diff --git a/docs/static/stainless-llama-stack-spec.yaml b/docs/static/stainless-llama-stack-spec.yaml
index 6dc1041f1..6c04542bf 100644
--- a/docs/static/stainless-llama-stack-spec.yaml
+++ b/docs/static/stainless-llama-stack-spec.yaml
@@ -36,8 +36,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: List all chat completions.
- description: List all chat completions.
+ summary: List chat completions.
+ description: List chat completions.
parameters:
- name: after
in: query
@@ -90,10 +90,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate an OpenAI-compatible chat completion for the given messages using
- the specified model.
+ summary: Create chat completions.
description: >-
+ Create chat completions.
+
Generate an OpenAI-compatible chat completion for the given messages using
the specified model.
parameters: []
@@ -125,8 +125,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: Describe a chat completion by its ID.
- description: Describe a chat completion by its ID.
+ summary: Get chat completion.
+ description: >-
+ Get chat completion.
+
+ Describe a chat completion by its ID.
parameters:
- name: completion_id
in: path
@@ -156,10 +159,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate an OpenAI-compatible completion for the given prompt using the specified
- model.
+ summary: Create completion.
description: >-
+ Create completion.
+
Generate an OpenAI-compatible completion for the given prompt using the specified
model.
parameters: []
@@ -606,10 +609,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inference
- summary: >-
- Generate OpenAI-compatible embeddings for the given input using the specified
- model.
+ summary: Create embeddings.
description: >-
+ Create embeddings.
+
Generate OpenAI-compatible embeddings for the given input using the specified
model.
parameters: []
@@ -642,9 +645,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns a list of files that belong to the user's organization.
+ summary: List files.
description: >-
+ List files.
+
Returns a list of files that belong to the user's organization.
parameters:
- name: after
@@ -702,11 +706,13 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Upload a file that can be used across various endpoints.
+ summary: Upload file.
description: >-
+ Upload file.
+
Upload a file that can be used across various endpoints.
+
The file upload should be a multipart form request with:
- file: The File object (not file name) to be uploaded.
@@ -755,9 +761,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns information about a specific file.
+ summary: Retrieve file.
description: >-
+ Retrieve file.
+
Returns information about a specific file.
parameters:
- name: file_id
@@ -789,8 +796,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: Delete a file.
- description: Delete a file.
+ summary: Delete file.
+ description: Delete file.
parameters:
- name: file_id
in: path
@@ -822,9 +829,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Files
- summary: >-
- Returns the contents of the specified file.
+ summary: Retrieve file content.
description: >-
+ Retrieve file content.
+
Returns the contents of the specified file.
parameters:
- name: file_id
@@ -857,9 +865,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
- summary: >-
- Get the current health status of the service.
+ summary: Get health status.
description: >-
+ Get health status.
+
Get the current health status of the service.
parameters: []
deprecated: false
@@ -885,9 +894,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
- summary: >-
- List all available API routes with their methods and implementing providers.
+ summary: List routes.
description: >-
+ List routes.
+
List all available API routes with their methods and implementing providers.
parameters: []
deprecated: false
@@ -936,8 +946,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
- summary: Register a model.
- description: Register a model.
+ summary: Register model.
+ description: >-
+ Register model.
+
+ Register a model.
parameters: []
requestBody:
content:
@@ -967,8 +980,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
- summary: Get a model by its identifier.
- description: Get a model by its identifier.
+ summary: Get model.
+ description: >-
+ Get model.
+
+ Get a model by its identifier.
parameters:
- name: model_id
in: path
@@ -993,8 +1009,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Models
- summary: Unregister a model.
- description: Unregister a model.
+ summary: Unregister model.
+ description: >-
+ Unregister model.
+
+ Unregister a model.
parameters:
- name: model_id
in: path
@@ -1025,9 +1044,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
- summary: >-
- Classifies if text and/or image inputs are potentially harmful.
+ summary: Create moderation.
description: >-
+ Create moderation.
+
Classifies if text and/or image inputs are potentially harmful.
parameters: []
requestBody:
@@ -1083,8 +1103,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: Create a new prompt.
- description: Create a new prompt.
+ summary: Create prompt.
+ description: >-
+ Create prompt.
+
+ Create a new prompt.
parameters: []
requestBody:
content:
@@ -1114,9 +1137,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: >-
- Get a prompt by its identifier and optional version.
+ summary: Get prompt.
description: >-
+ Get prompt.
+
Get a prompt by its identifier and optional version.
parameters:
- name: prompt_id
@@ -1154,9 +1178,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: >-
- Update an existing prompt (increments version).
+ summary: Update prompt.
description: >-
+ Update prompt.
+
Update an existing prompt (increments version).
parameters:
- name: prompt_id
@@ -1188,8 +1213,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: Delete a prompt.
- description: Delete a prompt.
+ summary: Delete prompt.
+ description: >-
+ Delete prompt.
+
+ Delete a prompt.
parameters:
- name: prompt_id
in: path
@@ -1220,9 +1248,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: >-
- Set which version of a prompt should be the default in get_prompt (latest).
+ summary: Set prompt version.
description: >-
+ Set prompt version.
+
Set which version of a prompt should be the default in get_prompt (latest).
parameters:
- name: prompt_id
@@ -1260,8 +1289,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Prompts
- summary: List all versions of a specific prompt.
- description: List all versions of a specific prompt.
+ summary: List prompt versions.
+ description: >-
+ List prompt versions.
+
+ List all versions of a specific prompt.
parameters:
- name: prompt_id
in: path
@@ -1293,8 +1325,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
- summary: List all available providers.
- description: List all available providers.
+ summary: List providers.
+ description: >-
+ List providers.
+
+ List all available providers.
parameters: []
deprecated: false
/v1/providers/{provider_id}:
@@ -1319,9 +1354,10 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Providers
- summary: >-
- Get detailed information about a specific provider.
+ summary: Get provider.
description: >-
+ Get provider.
+
Get detailed information about a specific provider.
parameters:
- name: provider_id
@@ -1352,8 +1388,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: List all OpenAI responses.
- description: List all OpenAI responses.
+ summary: List all responses.
+ description: List all responses.
parameters:
- name: after
in: query
@@ -1404,8 +1440,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Create a new OpenAI response.
- description: Create a new OpenAI response.
+ summary: Create a model response.
+ description: Create a model response.
parameters: []
requestBody:
content:
@@ -1447,8 +1483,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Retrieve an OpenAI response by its ID.
- description: Retrieve an OpenAI response by its ID.
+ summary: Get a model response.
+ description: Get a model response.
parameters:
- name: response_id
in: path
@@ -1478,8 +1514,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: Delete an OpenAI response by its ID.
- description: Delete an OpenAI response by its ID.
+ summary: Delete a response.
+ description: Delete a response.
parameters:
- name: response_id
in: path
@@ -1509,10 +1545,8 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Agents
- summary: >-
- List input items for a given OpenAI response.
- description: >-
- List input items for a given OpenAI response.
+ summary: List input items.
+ description: List input items.
parameters:
- name: response_id
in: path
@@ -1581,8 +1615,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Safety
- summary: Run a shield.
- description: Run a shield.
+ summary: Run shield.
+ description: >-
+ Run shield.
+
+ Run a shield.
parameters: []
requestBody:
content:
@@ -3138,8 +3175,11 @@ paths:
$ref: '#/components/responses/DefaultError'
tags:
- Inspect
- summary: Get the version of the service.
- description: Get the version of the service.
+ summary: Get version.
+ description: >-
+ Get version.
+
+ Get the version of the service.
parameters: []
deprecated: false
/v1beta/datasetio/append-rows/{dataset_id}:
@@ -13795,9 +13835,16 @@ tags:
x-displayName: >-
Llama Stack Evaluation API for running evaluations on model and agent candidates.
- name: Files
- description: ''
+ description: >-
+ This API is used to upload documents that can be used with other Llama Stack
+ APIs.
+ x-displayName: Files
- name: Inference
description: >-
+ Llama Stack Inference API for generating completions, chat completions, and
+ embeddings.
+
+
This API provides the raw interface to the underlying models. Two kinds of models
are supported:
@@ -13805,25 +13852,27 @@ tags:
- Embedding models: these models generate embeddings to be used for semantic
search.
- x-displayName: >-
- Llama Stack Inference API for generating completions, chat completions, and
- embeddings.
+ x-displayName: Inference
- name: Inspect
- description: ''
+ description: >-
+ APIs for inspecting the Llama Stack service, including health status, available
+ API routes with methods and implementing providers.
+ x-displayName: Inspect
- name: Models
description: ''
- name: PostTraining (Coming Soon)
description: ''
- name: Prompts
- description: ''
- x-displayName: >-
+ description: >-
Protocol for prompt management operations.
+ x-displayName: Prompts
- name: Providers
- description: ''
- x-displayName: >-
+ description: >-
Providers API for inspecting, listing, and modifying providers and their configurations.
+ x-displayName: Providers
- name: Safety
- description: ''
+ description: OpenAI-compatible Moderations API.
+ x-displayName: Safety
- name: Scoring
description: ''
- name: ScoringFunctions
diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py
index cdf47308e..5983b5c45 100644
--- a/llama_stack/apis/agents/agents.py
+++ b/llama_stack/apis/agents/agents.py
@@ -797,7 +797,7 @@ class Agents(Protocol):
self,
response_id: str,
) -> OpenAIResponseObject:
- """Retrieve an OpenAI response by its ID.
+ """Get a model response.
:param response_id: The ID of the OpenAI response to retrieve.
:returns: An OpenAIResponseObject.
@@ -826,7 +826,7 @@ class Agents(Protocol):
),
] = None,
) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
- """Create a new OpenAI response.
+ """Create a model response.
:param input: Input message(s) to create the response.
:param model: The underlying LLM used for completions.
@@ -846,7 +846,7 @@ class Agents(Protocol):
model: str | None = None,
order: Order | None = Order.desc,
) -> ListOpenAIResponseObject:
- """List all OpenAI responses.
+ """List all responses.
:param after: The ID of the last response to return.
:param limit: The number of responses to return.
@@ -869,7 +869,7 @@ class Agents(Protocol):
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
- """List input items for a given OpenAI response.
+ """List input items.
:param response_id: The ID of the response to retrieve input items for.
:param after: An item ID to list items after, used for pagination.
@@ -884,7 +884,7 @@ class Agents(Protocol):
@webmethod(route="/openai/v1/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1, deprecated=True)
@webmethod(route="/responses/{response_id}", method="DELETE", level=LLAMA_STACK_API_V1)
async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
- """Delete an OpenAI response by its ID.
+ """Delete a response.
:param response_id: The ID of the OpenAI response to delete.
:returns: An OpenAIDeleteResponseObject
diff --git a/llama_stack/apis/files/files.py b/llama_stack/apis/files/files.py
index 13f0e95fa..f1d3764db 100644
--- a/llama_stack/apis/files/files.py
+++ b/llama_stack/apis/files/files.py
@@ -104,6 +104,11 @@ class OpenAIFileDeleteResponse(BaseModel):
@runtime_checkable
@trace_protocol
class Files(Protocol):
+ """Files
+
+ This API is used to upload documents that can be used with other Llama Stack APIs.
+ """
+
# OpenAI Files API Endpoints
@webmethod(route="/openai/v1/files", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
@webmethod(route="/files", method="POST", level=LLAMA_STACK_API_V1)
@@ -113,7 +118,8 @@ class Files(Protocol):
purpose: Annotated[OpenAIFilePurpose, Form()],
expires_after: Annotated[ExpiresAfter | None, Form()] = None,
) -> OpenAIFileObject:
- """
+ """Upload file.
+
Upload a file that can be used across various endpoints.
The file upload should be a multipart form request with:
@@ -137,7 +143,8 @@ class Files(Protocol):
order: Order | None = Order.desc,
purpose: OpenAIFilePurpose | None = None,
) -> ListOpenAIFileResponse:
- """
+ """List files.
+
Returns a list of files that belong to the user's organization.
:param after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
@@ -154,7 +161,8 @@ class Files(Protocol):
self,
file_id: str,
) -> OpenAIFileObject:
- """
+ """Retrieve file.
+
Returns information about a specific file.
:param file_id: The ID of the file to use for this request.
@@ -168,8 +176,7 @@ class Files(Protocol):
self,
file_id: str,
) -> OpenAIFileDeleteResponse:
- """
- Delete a file.
+ """Delete file.
:param file_id: The ID of the file to use for this request.
:returns: An OpenAIFileDeleteResponse indicating successful deletion.
@@ -182,7 +189,8 @@ class Files(Protocol):
self,
file_id: str,
) -> Response:
- """
+ """Retrieve file content.
+
Returns the contents of the specified file.
:param file_id: The ID of the file to use for this request.
diff --git a/llama_stack/apis/inference/inference.py b/llama_stack/apis/inference/inference.py
index e88a16315..62a988ea6 100644
--- a/llama_stack/apis/inference/inference.py
+++ b/llama_stack/apis/inference/inference.py
@@ -1053,7 +1053,9 @@ class InferenceProvider(Protocol):
# for fill-in-the-middle type completion
suffix: str | None = None,
) -> OpenAICompletion:
- """Generate an OpenAI-compatible completion for the given prompt using the specified model.
+ """Create completion.
+
+ Generate an OpenAI-compatible completion for the given prompt using the specified model.
:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
:param prompt: The prompt to generate a completion for.
@@ -1105,7 +1107,9 @@ class InferenceProvider(Protocol):
top_p: float | None = None,
user: str | None = None,
) -> OpenAIChatCompletion | AsyncIterator[OpenAIChatCompletionChunk]:
- """Generate an OpenAI-compatible chat completion for the given messages using the specified model.
+ """Create chat completions.
+
+ Generate an OpenAI-compatible chat completion for the given messages using the specified model.
:param model: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
:param messages: List of messages in the conversation.
@@ -1144,7 +1148,9 @@ class InferenceProvider(Protocol):
dimensions: int | None = None,
user: str | None = None,
) -> OpenAIEmbeddingsResponse:
- """Generate OpenAI-compatible embeddings for the given input using the specified model.
+ """Create embeddings.
+
+ Generate OpenAI-compatible embeddings for the given input using the specified model.
:param model: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.
:param input: Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings.
@@ -1157,7 +1163,9 @@ class InferenceProvider(Protocol):
class Inference(InferenceProvider):
- """Llama Stack Inference API for generating completions, chat completions, and embeddings.
+ """Inference
+
+ Llama Stack Inference API for generating completions, chat completions, and embeddings.
This API provides the raw interface to the underlying models. Two kinds of models are supported:
- LLM models: these models generate "raw" and "chat" (conversational) completions.
@@ -1173,7 +1181,7 @@ class Inference(InferenceProvider):
model: str | None = None,
order: Order | None = Order.desc,
) -> ListOpenAIChatCompletionResponse:
- """List all chat completions.
+ """List chat completions.
:param after: The ID of the last chat completion to return.
:param limit: The maximum number of chat completions to return.
@@ -1188,7 +1196,9 @@ class Inference(InferenceProvider):
)
@webmethod(route="/chat/completions/{completion_id}", method="GET", level=LLAMA_STACK_API_V1)
async def get_chat_completion(self, completion_id: str) -> OpenAICompletionWithInputMessages:
- """Describe a chat completion by its ID.
+ """Get chat completion.
+
+ Describe a chat completion by its ID.
:param completion_id: ID of the chat completion.
:returns: A OpenAICompletionWithInputMessages.
diff --git a/llama_stack/apis/inspect/inspect.py b/llama_stack/apis/inspect/inspect.py
index e859dbe59..72f203621 100644
--- a/llama_stack/apis/inspect/inspect.py
+++ b/llama_stack/apis/inspect/inspect.py
@@ -58,9 +58,16 @@ class ListRoutesResponse(BaseModel):
@runtime_checkable
class Inspect(Protocol):
+ """Inspect
+
+ APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers.
+ """
+
@webmethod(route="/inspect/routes", method="GET", level=LLAMA_STACK_API_V1)
async def list_routes(self) -> ListRoutesResponse:
- """List all available API routes with their methods and implementing providers.
+ """List routes.
+
+ List all available API routes with their methods and implementing providers.
:returns: Response containing information about all available routes.
"""
@@ -68,7 +75,9 @@ class Inspect(Protocol):
@webmethod(route="/health", method="GET", level=LLAMA_STACK_API_V1)
async def health(self) -> HealthInfo:
- """Get the current health status of the service.
+ """Get health status.
+
+ Get the current health status of the service.
:returns: Health information indicating if the service is operational.
"""
@@ -76,7 +85,9 @@ class Inspect(Protocol):
@webmethod(route="/version", method="GET", level=LLAMA_STACK_API_V1)
async def version(self) -> VersionInfo:
- """Get the version of the service.
+ """Get version.
+
+ Get the version of the service.
:returns: Version information containing the service version number.
"""
diff --git a/llama_stack/apis/models/models.py b/llama_stack/apis/models/models.py
index 210ed9246..10949cb95 100644
--- a/llama_stack/apis/models/models.py
+++ b/llama_stack/apis/models/models.py
@@ -124,7 +124,9 @@ class Models(Protocol):
self,
model_id: str,
) -> Model:
- """Get a model by its identifier.
+ """Get model.
+
+ Get a model by its identifier.
:param model_id: The identifier of the model to get.
:returns: A Model.
@@ -140,7 +142,9 @@ class Models(Protocol):
metadata: dict[str, Any] | None = None,
model_type: ModelType | None = None,
) -> Model:
- """Register a model.
+ """Register model.
+
+ Register a model.
:param model_id: The identifier of the model to register.
:param provider_model_id: The identifier of the model in the provider.
@@ -156,7 +160,9 @@ class Models(Protocol):
self,
model_id: str,
) -> None:
- """Unregister a model.
+ """Unregister model.
+
+ Unregister a model.
:param model_id: The identifier of the model to unregister.
"""
diff --git a/llama_stack/apis/prompts/prompts.py b/llama_stack/apis/prompts/prompts.py
index c56185e25..b39c363c7 100644
--- a/llama_stack/apis/prompts/prompts.py
+++ b/llama_stack/apis/prompts/prompts.py
@@ -94,7 +94,9 @@ class ListPromptsResponse(BaseModel):
@runtime_checkable
@trace_protocol
class Prompts(Protocol):
- """Protocol for prompt management operations."""
+ """Prompts
+
+ Protocol for prompt management operations."""
@webmethod(route="/prompts", method="GET", level=LLAMA_STACK_API_V1)
async def list_prompts(self) -> ListPromptsResponse:
@@ -109,7 +111,9 @@ class Prompts(Protocol):
self,
prompt_id: str,
) -> ListPromptsResponse:
- """List all versions of a specific prompt.
+ """List prompt versions.
+
+ List all versions of a specific prompt.
:param prompt_id: The identifier of the prompt to list versions for.
:returns: A ListPromptsResponse containing all versions of the prompt.
@@ -122,7 +126,9 @@ class Prompts(Protocol):
prompt_id: str,
version: int | None = None,
) -> Prompt:
- """Get a prompt by its identifier and optional version.
+ """Get prompt.
+
+ Get a prompt by its identifier and optional version.
:param prompt_id: The identifier of the prompt to get.
:param version: The version of the prompt to get (defaults to latest).
@@ -136,7 +142,9 @@ class Prompts(Protocol):
prompt: str,
variables: list[str] | None = None,
) -> Prompt:
- """Create a new prompt.
+ """Create prompt.
+
+ Create a new prompt.
:param prompt: The prompt text content with variable placeholders.
:param variables: List of variable names that can be used in the prompt template.
@@ -153,7 +161,9 @@ class Prompts(Protocol):
variables: list[str] | None = None,
set_as_default: bool = True,
) -> Prompt:
- """Update an existing prompt (increments version).
+ """Update prompt.
+
+ Update an existing prompt (increments version).
:param prompt_id: The identifier of the prompt to update.
:param prompt: The updated prompt text content.
@@ -169,7 +179,9 @@ class Prompts(Protocol):
self,
prompt_id: str,
) -> None:
- """Delete a prompt.
+ """Delete prompt.
+
+ Delete a prompt.
:param prompt_id: The identifier of the prompt to delete.
"""
@@ -181,7 +193,9 @@ class Prompts(Protocol):
prompt_id: str,
version: int,
) -> Prompt:
- """Set which version of a prompt should be the default in get_prompt (latest).
+ """Set prompt version.
+
+ Set which version of a prompt should be the default in get_prompt (latest).
:param prompt_id: The identifier of the prompt.
:param version: The version to set as default.
diff --git a/llama_stack/apis/providers/providers.py b/llama_stack/apis/providers/providers.py
index d1cff0f6c..e1872571d 100644
--- a/llama_stack/apis/providers/providers.py
+++ b/llama_stack/apis/providers/providers.py
@@ -42,13 +42,16 @@ class ListProvidersResponse(BaseModel):
@runtime_checkable
class Providers(Protocol):
- """
+ """Providers
+
Providers API for inspecting, listing, and modifying providers and their configurations.
"""
@webmethod(route="/providers", method="GET", level=LLAMA_STACK_API_V1)
async def list_providers(self) -> ListProvidersResponse:
- """List all available providers.
+ """List providers.
+
+ List all available providers.
:returns: A ListProvidersResponse containing information about all providers.
"""
@@ -56,7 +59,9 @@ class Providers(Protocol):
@webmethod(route="/providers/{provider_id}", method="GET", level=LLAMA_STACK_API_V1)
async def inspect_provider(self, provider_id: str) -> ProviderInfo:
- """Get detailed information about a specific provider.
+ """Get provider.
+
+ Get detailed information about a specific provider.
:param provider_id: The ID of the provider to inspect.
:returns: A ProviderInfo object containing the provider's details.
diff --git a/llama_stack/apis/safety/safety.py b/llama_stack/apis/safety/safety.py
index 0fa250d90..2ae74b0a7 100644
--- a/llama_stack/apis/safety/safety.py
+++ b/llama_stack/apis/safety/safety.py
@@ -96,6 +96,11 @@ class ShieldStore(Protocol):
@runtime_checkable
@trace_protocol
class Safety(Protocol):
+ """Safety
+
+ OpenAI-compatible Moderations API.
+ """
+
shield_store: ShieldStore
@webmethod(route="/safety/run-shield", method="POST", level=LLAMA_STACK_API_V1)
@@ -105,7 +110,9 @@ class Safety(Protocol):
messages: list[Message],
params: dict[str, Any],
) -> RunShieldResponse:
- """Run a shield.
+ """Run shield.
+
+ Run a shield.
:param shield_id: The identifier of the shield to run.
:param messages: The messages to run the shield on.
@@ -117,7 +124,9 @@ class Safety(Protocol):
@webmethod(route="/openai/v1/moderations", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
@webmethod(route="/moderations", method="POST", level=LLAMA_STACK_API_V1)
async def run_moderation(self, input: str | list[str], model: str) -> ModerationObject:
- """Classifies if text and/or image inputs are potentially harmful.
+ """Create moderation.
+
+ Classifies if text and/or image inputs are potentially harmful.
:param input: Input (or inputs) to classify.
Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models.
:param model: The content moderation model you would like to use.