chore!: remove SDG API (#4035)

# What does this PR do?

This API hasn't received any traction and close to zero interest from
the community. Let's revisit in the future if things change.

Signed-off-by: Sébastien Han <seb@redhat.com>
Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
This commit is contained in:
Sébastien Han 2025-11-04 01:12:06 +01:00 committed by GitHub
parent 44096512b5
commit 4a5ef65286
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 395 additions and 1290 deletions

View file

@ -2010,40 +2010,6 @@ paths:
schema:
type: string
deprecated: false
/v1/synthetic-data-generation/generate:
post:
responses:
'200':
description: >-
Response containing filtered synthetic data samples and optional statistics
content:
application/json:
schema:
$ref: '#/components/schemas/SyntheticDataGenerationResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
$ref: >-
#/components/responses/TooManyRequests429
'500':
$ref: >-
#/components/responses/InternalServerError500
default:
$ref: '#/components/responses/DefaultError'
tags:
- SyntheticDataGeneration (Coming Soon)
summary: >-
Generate synthetic data based on input dialogs and apply filtering.
description: >-
Generate synthetic data based on input dialogs and apply filtering.
parameters: []
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/SyntheticDataGenerateRequest'
required: true
deprecated: false
/v1/tool-runtime/invoke:
post:
responses:
@ -10020,45 +9986,29 @@ components:
required:
- shield_id
title: RegisterShieldRequest
CompletionMessage:
InvokeToolRequest:
type: object
properties:
role:
tool_name:
type: string
const: assistant
default: assistant
description: The name of the tool to invoke.
kwargs:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
Must be "assistant" to identify this as the model's response
content:
$ref: '#/components/schemas/InterleavedContent'
description: The content of the model's response
stop_reason:
type: string
enum:
- end_of_turn
- end_of_message
- out_of_tokens
description: >-
Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`:
The model finished generating the entire response. - `StopReason.end_of_message`:
The model finished generating but generated a partial response -- usually,
a tool call. The user may call the tool and continue the conversation
with the tool's response. - `StopReason.out_of_tokens`: The model ran
out of token budget.
tool_calls:
type: array
items:
$ref: '#/components/schemas/ToolCall'
description: >-
List of tool calls. Each tool call is a ToolCall object.
A dictionary of arguments to pass to the tool.
additionalProperties: false
required:
- role
- content
- stop_reason
title: CompletionMessage
description: >-
A message containing the model's (assistant) response in a chat conversation.
- tool_name
- kwargs
title: InvokeToolRequest
ImageContentItem:
type: object
properties:
@ -10105,41 +10055,6 @@ components:
mapping:
image: '#/components/schemas/ImageContentItem'
text: '#/components/schemas/TextContentItem'
Message:
oneOf:
- $ref: '#/components/schemas/UserMessage'
- $ref: '#/components/schemas/SystemMessage'
- $ref: '#/components/schemas/ToolResponseMessage'
- $ref: '#/components/schemas/CompletionMessage'
discriminator:
propertyName: role
mapping:
user: '#/components/schemas/UserMessage'
system: '#/components/schemas/SystemMessage'
tool: '#/components/schemas/ToolResponseMessage'
assistant: '#/components/schemas/CompletionMessage'
SystemMessage:
type: object
properties:
role:
type: string
const: system
default: system
description: >-
Must be "system" to identify this as a system message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the "system prompt". If multiple system messages are provided,
they are concatenated. The underlying Llama Stack code may also add other
system messages (for example, for formatting tool definitions).
additionalProperties: false
required:
- role
- content
title: SystemMessage
description: >-
A system message providing instructions or context to the model.
TextContentItem:
type: object
properties:
@ -10158,179 +10073,6 @@ components:
- text
title: TextContentItem
description: A text content item
ToolCall:
type: object
properties:
call_id:
type: string
tool_name:
oneOf:
- type: string
enum:
- brave_search
- wolfram_alpha
- photogen
- code_interpreter
title: BuiltinTool
- type: string
arguments:
type: string
additionalProperties: false
required:
- call_id
- tool_name
- arguments
title: ToolCall
ToolResponseMessage:
type: object
properties:
role:
type: string
const: tool
default: tool
description: >-
Must be "tool" to identify this as a tool response
call_id:
type: string
description: >-
Unique identifier for the tool call this response is for
content:
$ref: '#/components/schemas/InterleavedContent'
description: The response content from the tool
additionalProperties: false
required:
- role
- call_id
- content
title: ToolResponseMessage
description: >-
A message representing the result of a tool invocation.
URL:
type: object
properties:
uri:
type: string
description: The URL string pointing to the resource
additionalProperties: false
required:
- uri
title: URL
description: A URL reference to external content.
UserMessage:
type: object
properties:
role:
type: string
const: user
default: user
description: >-
Must be "user" to identify this as a user message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the message, which can include text and other media
context:
$ref: '#/components/schemas/InterleavedContent'
description: >-
(Optional) This field is used internally by Llama Stack to pass RAG context.
This field may be removed in the API in the future.
additionalProperties: false
required:
- role
- content
title: UserMessage
description: >-
A message from the user in a chat conversation.
SyntheticDataGenerateRequest:
type: object
properties:
dialogs:
type: array
items:
$ref: '#/components/schemas/Message'
description: >-
List of conversation messages to use as input for synthetic data generation
filtering_function:
type: string
enum:
- none
- random
- top_k
- top_p
- top_k_top_p
- sigmoid
description: >-
Type of filtering to apply to generated synthetic data samples
model:
type: string
description: >-
(Optional) The identifier of the model to use. The model must be registered
with Llama Stack and available via the /models endpoint
additionalProperties: false
required:
- dialogs
- filtering_function
title: SyntheticDataGenerateRequest
SyntheticDataGenerationResponse:
type: object
properties:
synthetic_data:
type: array
items:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
List of generated synthetic data samples that passed the filtering criteria
statistics:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Statistical information about the generation process and filtering
results
additionalProperties: false
required:
- synthetic_data
title: SyntheticDataGenerationResponse
description: >-
Response from the synthetic data generation. Batch of (prompt, response, score)
tuples that pass the threshold.
InvokeToolRequest:
type: object
properties:
tool_name:
type: string
description: The name of the tool to invoke.
kwargs:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
A dictionary of arguments to pass to the tool.
additionalProperties: false
required:
- tool_name
- kwargs
title: InvokeToolRequest
ToolInvocationResult:
type: object
properties:
@ -10361,6 +10103,17 @@ components:
additionalProperties: false
title: ToolInvocationResult
description: Result of a tool invocation.
URL:
type: object
properties:
uri:
type: string
description: The URL string pointing to the resource
additionalProperties: false
required:
- uri
title: URL
description: A URL reference to external content.
ToolDef:
type: object
properties:
@ -12393,6 +12146,45 @@ components:
title: AgentSessionCreateResponse
description: >-
Response returned when creating a new agent session.
CompletionMessage:
type: object
properties:
role:
type: string
const: assistant
default: assistant
description: >-
Must be "assistant" to identify this as the model's response
content:
$ref: '#/components/schemas/InterleavedContent'
description: The content of the model's response
stop_reason:
type: string
enum:
- end_of_turn
- end_of_message
- out_of_tokens
description: >-
Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`:
The model finished generating the entire response. - `StopReason.end_of_message`:
The model finished generating but generated a partial response -- usually,
a tool call. The user may call the tool and continue the conversation
with the tool's response. - `StopReason.out_of_tokens`: The model ran
out of token budget.
tool_calls:
type: array
items:
$ref: '#/components/schemas/ToolCall'
description: >-
List of tool calls. Each tool call is a ToolCall object.
additionalProperties: false
required:
- role
- content
- stop_reason
title: CompletionMessage
description: >-
A message containing the model's (assistant) response in a chat conversation.
InferenceStep:
type: object
properties:
@ -12545,6 +12337,29 @@ components:
- step_type
title: ShieldCallStep
description: A shield call step in an agent turn.
ToolCall:
type: object
properties:
call_id:
type: string
tool_name:
oneOf:
- type: string
enum:
- brave_search
- wolfram_alpha
- photogen
- code_interpreter
title: BuiltinTool
- type: string
arguments:
type: string
additionalProperties: false
required:
- call_id
- tool_name
- arguments
title: ToolCall
ToolExecutionStep:
type: object
properties:
@ -12632,6 +12447,30 @@ components:
- content
title: ToolResponse
description: Response from a tool invocation.
ToolResponseMessage:
type: object
properties:
role:
type: string
const: tool
default: tool
description: >-
Must be "tool" to identify this as a tool response
call_id:
type: string
description: >-
Unique identifier for the tool call this response is for
content:
$ref: '#/components/schemas/InterleavedContent'
description: The response content from the tool
additionalProperties: false
required:
- role
- call_id
- content
title: ToolResponseMessage
description: >-
A message representing the result of a tool invocation.
Turn:
type: object
properties:
@ -12717,6 +12556,31 @@ components:
title: Turn
description: >-
A single turn in an interaction with an Agentic System.
UserMessage:
type: object
properties:
role:
type: string
const: user
default: user
description: >-
Must be "user" to identify this as a user message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the message, which can include text and other media
context:
$ref: '#/components/schemas/InterleavedContent'
description: >-
(Optional) This field is used internally by Llama Stack to pass RAG context.
This field may be removed in the API in the future.
additionalProperties: false
required:
- role
- content
title: UserMessage
description: >-
A message from the user in a chat conversation.
CreateAgentTurnRequest:
type: object
properties:
@ -13330,6 +13194,28 @@ components:
- sampling_params
title: ModelCandidate
description: A model candidate for evaluation.
SystemMessage:
type: object
properties:
role:
type: string
const: system
default: system
description: >-
Must be "system" to identify this as a system message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the "system prompt". If multiple system messages are provided,
they are concatenated. The underlying Llama Stack code may also add other
system messages (for example, for formatting tool definitions).
additionalProperties: false
required:
- role
- content
title: SystemMessage
description: >-
A system message providing instructions or context to the model.
EvaluateRowsRequest:
type: object
properties:
@ -14145,8 +14031,6 @@ tags:
description: ''
- name: Shields
description: ''
- name: SyntheticDataGeneration (Coming Soon)
description: ''
- name: ToolGroups
description: ''
- name: ToolRuntime
@ -14174,7 +14058,6 @@ x-tagGroups:
- Scoring
- ScoringFunctions
- Shields
- SyntheticDataGeneration (Coming Soon)
- ToolGroups
- ToolRuntime
- VectorIO

View file

@ -23,5 +23,4 @@ A Llama Stack API is described as a collection of REST endpoints. We currently s
We are working on adding a few more APIs to complete the application lifecycle. These will include:
- **Batch Inference**: run inference on a dataset of inputs
- **Batch Agents**: run agents on a dataset of inputs
- **Synthetic Data Generation**: generate synthetic data for model development
- **Batches**: OpenAI-compatible batch management for inference

View file

@ -2650,51 +2650,6 @@
"deprecated": false
}
},
"/v1/synthetic-data-generation/generate": {
"post": {
"responses": {
"200": {
"description": "Response containing filtered synthetic data samples and optional statistics",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SyntheticDataGenerationResponse"
}
}
}
},
"400": {
"$ref": "#/components/responses/BadRequest400"
},
"429": {
"$ref": "#/components/responses/TooManyRequests429"
},
"500": {
"$ref": "#/components/responses/InternalServerError500"
},
"default": {
"$ref": "#/components/responses/DefaultError"
}
},
"tags": [
"SyntheticDataGeneration (Coming Soon)"
],
"summary": "Generate synthetic data based on input dialogs and apply filtering.",
"description": "Generate synthetic data based on input dialogs and apply filtering.",
"parameters": [],
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/SyntheticDataGenerateRequest"
}
}
},
"required": true
},
"deprecated": false
}
},
"/v1/tool-runtime/invoke": {
"post": {
"responses": {
@ -11478,44 +11433,46 @@
],
"title": "RegisterShieldRequest"
},
"CompletionMessage": {
"InvokeToolRequest": {
"type": "object",
"properties": {
"role": {
"tool_name": {
"type": "string",
"const": "assistant",
"default": "assistant",
"description": "Must be \"assistant\" to identify this as the model's response"
"description": "The name of the tool to invoke."
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the model's response"
},
"stop_reason": {
"type": "string",
"enum": [
"end_of_turn",
"end_of_message",
"out_of_tokens"
],
"description": "Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: The model finished generating the entire response. - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response. - `StopReason.out_of_tokens`: The model ran out of token budget."
},
"tool_calls": {
"type": "array",
"items": {
"$ref": "#/components/schemas/ToolCall"
"kwargs": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "List of tool calls. Each tool call is a ToolCall object."
"description": "A dictionary of arguments to pass to the tool."
}
},
"additionalProperties": false,
"required": [
"role",
"content",
"stop_reason"
"tool_name",
"kwargs"
],
"title": "CompletionMessage",
"description": "A message containing the model's (assistant) response in a chat conversation."
"title": "InvokeToolRequest"
},
"ImageContentItem": {
"type": "object",
@ -11584,53 +11541,6 @@
}
}
},
"Message": {
"oneOf": [
{
"$ref": "#/components/schemas/UserMessage"
},
{
"$ref": "#/components/schemas/SystemMessage"
},
{
"$ref": "#/components/schemas/ToolResponseMessage"
},
{
"$ref": "#/components/schemas/CompletionMessage"
}
],
"discriminator": {
"propertyName": "role",
"mapping": {
"user": "#/components/schemas/UserMessage",
"system": "#/components/schemas/SystemMessage",
"tool": "#/components/schemas/ToolResponseMessage",
"assistant": "#/components/schemas/CompletionMessage"
}
}
},
"SystemMessage": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "system",
"default": "system",
"description": "Must be \"system\" to identify this as a system message"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)."
}
},
"additionalProperties": false,
"required": [
"role",
"content"
],
"title": "SystemMessage",
"description": "A system message providing instructions or context to the model."
},
"TextContentItem": {
"type": "object",
"properties": {
@ -11653,250 +11563,6 @@
"title": "TextContentItem",
"description": "A text content item"
},
"ToolCall": {
"type": "object",
"properties": {
"call_id": {
"type": "string"
},
"tool_name": {
"oneOf": [
{
"type": "string",
"enum": [
"brave_search",
"wolfram_alpha",
"photogen",
"code_interpreter"
],
"title": "BuiltinTool"
},
{
"type": "string"
}
]
},
"arguments": {
"type": "string"
}
},
"additionalProperties": false,
"required": [
"call_id",
"tool_name",
"arguments"
],
"title": "ToolCall"
},
"ToolResponseMessage": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "tool",
"default": "tool",
"description": "Must be \"tool\" to identify this as a tool response"
},
"call_id": {
"type": "string",
"description": "Unique identifier for the tool call this response is for"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The response content from the tool"
}
},
"additionalProperties": false,
"required": [
"role",
"call_id",
"content"
],
"title": "ToolResponseMessage",
"description": "A message representing the result of a tool invocation."
},
"URL": {
"type": "object",
"properties": {
"uri": {
"type": "string",
"description": "The URL string pointing to the resource"
}
},
"additionalProperties": false,
"required": [
"uri"
],
"title": "URL",
"description": "A URL reference to external content."
},
"UserMessage": {
"type": "object",
"properties": {
"role": {
"type": "string",
"const": "user",
"default": "user",
"description": "Must be \"user\" to identify this as a user message"
},
"content": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "The content of the message, which can include text and other media"
},
"context": {
"$ref": "#/components/schemas/InterleavedContent",
"description": "(Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future."
}
},
"additionalProperties": false,
"required": [
"role",
"content"
],
"title": "UserMessage",
"description": "A message from the user in a chat conversation."
},
"SyntheticDataGenerateRequest": {
"type": "object",
"properties": {
"dialogs": {
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
"description": "List of conversation messages to use as input for synthetic data generation"
},
"filtering_function": {
"type": "string",
"enum": [
"none",
"random",
"top_k",
"top_p",
"top_k_top_p",
"sigmoid"
],
"description": "Type of filtering to apply to generated synthetic data samples"
},
"model": {
"type": "string",
"description": "(Optional) The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint"
}
},
"additionalProperties": false,
"required": [
"dialogs",
"filtering_function"
],
"title": "SyntheticDataGenerateRequest"
},
"SyntheticDataGenerationResponse": {
"type": "object",
"properties": {
"synthetic_data": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
}
},
"description": "List of generated synthetic data samples that passed the filtering criteria"
},
"statistics": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "(Optional) Statistical information about the generation process and filtering results"
}
},
"additionalProperties": false,
"required": [
"synthetic_data"
],
"title": "SyntheticDataGenerationResponse",
"description": "Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold."
},
"InvokeToolRequest": {
"type": "object",
"properties": {
"tool_name": {
"type": "string",
"description": "The name of the tool to invoke."
},
"kwargs": {
"type": "object",
"additionalProperties": {
"oneOf": [
{
"type": "null"
},
{
"type": "boolean"
},
{
"type": "number"
},
{
"type": "string"
},
{
"type": "array"
},
{
"type": "object"
}
]
},
"description": "A dictionary of arguments to pass to the tool."
}
},
"additionalProperties": false,
"required": [
"tool_name",
"kwargs"
],
"title": "InvokeToolRequest"
},
"ToolInvocationResult": {
"type": "object",
"properties": {
@ -11943,6 +11609,21 @@
"title": "ToolInvocationResult",
"description": "Result of a tool invocation."
},
"URL": {
"type": "object",
"properties": {
"uri": {
"type": "string",
"description": "The URL string pointing to the resource"
}
},
"additionalProperties": false,
"required": [
"uri"
],
"title": "URL",
"description": "A URL reference to external content."
},
"ToolDef": {
"type": "object",
"properties": {
@ -13993,10 +13674,6 @@
"name": "Shields",
"description": ""
},
{
"name": "SyntheticDataGeneration (Coming Soon)",
"description": ""
},
{
"name": "ToolGroups",
"description": ""
@ -14027,7 +13704,6 @@
"Scoring",
"ScoringFunctions",
"Shields",
"SyntheticDataGeneration (Coming Soon)",
"ToolGroups",
"ToolRuntime",
"VectorIO"

View file

@ -2007,40 +2007,6 @@ paths:
schema:
type: string
deprecated: false
/v1/synthetic-data-generation/generate:
post:
responses:
'200':
description: >-
Response containing filtered synthetic data samples and optional statistics
content:
application/json:
schema:
$ref: '#/components/schemas/SyntheticDataGenerationResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
$ref: >-
#/components/responses/TooManyRequests429
'500':
$ref: >-
#/components/responses/InternalServerError500
default:
$ref: '#/components/responses/DefaultError'
tags:
- SyntheticDataGeneration (Coming Soon)
summary: >-
Generate synthetic data based on input dialogs and apply filtering.
description: >-
Generate synthetic data based on input dialogs and apply filtering.
parameters: []
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/SyntheticDataGenerateRequest'
required: true
deprecated: false
/v1/tool-runtime/invoke:
post:
responses:
@ -8807,45 +8773,29 @@ components:
required:
- shield_id
title: RegisterShieldRequest
CompletionMessage:
InvokeToolRequest:
type: object
properties:
role:
tool_name:
type: string
const: assistant
default: assistant
description: The name of the tool to invoke.
kwargs:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
Must be "assistant" to identify this as the model's response
content:
$ref: '#/components/schemas/InterleavedContent'
description: The content of the model's response
stop_reason:
type: string
enum:
- end_of_turn
- end_of_message
- out_of_tokens
description: >-
Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`:
The model finished generating the entire response. - `StopReason.end_of_message`:
The model finished generating but generated a partial response -- usually,
a tool call. The user may call the tool and continue the conversation
with the tool's response. - `StopReason.out_of_tokens`: The model ran
out of token budget.
tool_calls:
type: array
items:
$ref: '#/components/schemas/ToolCall'
description: >-
List of tool calls. Each tool call is a ToolCall object.
A dictionary of arguments to pass to the tool.
additionalProperties: false
required:
- role
- content
- stop_reason
title: CompletionMessage
description: >-
A message containing the model's (assistant) response in a chat conversation.
- tool_name
- kwargs
title: InvokeToolRequest
ImageContentItem:
type: object
properties:
@ -8892,41 +8842,6 @@ components:
mapping:
image: '#/components/schemas/ImageContentItem'
text: '#/components/schemas/TextContentItem'
Message:
oneOf:
- $ref: '#/components/schemas/UserMessage'
- $ref: '#/components/schemas/SystemMessage'
- $ref: '#/components/schemas/ToolResponseMessage'
- $ref: '#/components/schemas/CompletionMessage'
discriminator:
propertyName: role
mapping:
user: '#/components/schemas/UserMessage'
system: '#/components/schemas/SystemMessage'
tool: '#/components/schemas/ToolResponseMessage'
assistant: '#/components/schemas/CompletionMessage'
SystemMessage:
type: object
properties:
role:
type: string
const: system
default: system
description: >-
Must be "system" to identify this as a system message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the "system prompt". If multiple system messages are provided,
they are concatenated. The underlying Llama Stack code may also add other
system messages (for example, for formatting tool definitions).
additionalProperties: false
required:
- role
- content
title: SystemMessage
description: >-
A system message providing instructions or context to the model.
TextContentItem:
type: object
properties:
@ -8945,179 +8860,6 @@ components:
- text
title: TextContentItem
description: A text content item
ToolCall:
type: object
properties:
call_id:
type: string
tool_name:
oneOf:
- type: string
enum:
- brave_search
- wolfram_alpha
- photogen
- code_interpreter
title: BuiltinTool
- type: string
arguments:
type: string
additionalProperties: false
required:
- call_id
- tool_name
- arguments
title: ToolCall
ToolResponseMessage:
type: object
properties:
role:
type: string
const: tool
default: tool
description: >-
Must be "tool" to identify this as a tool response
call_id:
type: string
description: >-
Unique identifier for the tool call this response is for
content:
$ref: '#/components/schemas/InterleavedContent'
description: The response content from the tool
additionalProperties: false
required:
- role
- call_id
- content
title: ToolResponseMessage
description: >-
A message representing the result of a tool invocation.
URL:
type: object
properties:
uri:
type: string
description: The URL string pointing to the resource
additionalProperties: false
required:
- uri
title: URL
description: A URL reference to external content.
UserMessage:
type: object
properties:
role:
type: string
const: user
default: user
description: >-
Must be "user" to identify this as a user message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the message, which can include text and other media
context:
$ref: '#/components/schemas/InterleavedContent'
description: >-
(Optional) This field is used internally by Llama Stack to pass RAG context.
This field may be removed in the API in the future.
additionalProperties: false
required:
- role
- content
title: UserMessage
description: >-
A message from the user in a chat conversation.
SyntheticDataGenerateRequest:
type: object
properties:
dialogs:
type: array
items:
$ref: '#/components/schemas/Message'
description: >-
List of conversation messages to use as input for synthetic data generation
filtering_function:
type: string
enum:
- none
- random
- top_k
- top_p
- top_k_top_p
- sigmoid
description: >-
Type of filtering to apply to generated synthetic data samples
model:
type: string
description: >-
(Optional) The identifier of the model to use. The model must be registered
with Llama Stack and available via the /models endpoint
additionalProperties: false
required:
- dialogs
- filtering_function
title: SyntheticDataGenerateRequest
SyntheticDataGenerationResponse:
type: object
properties:
synthetic_data:
type: array
items:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
List of generated synthetic data samples that passed the filtering criteria
statistics:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Statistical information about the generation process and filtering
results
additionalProperties: false
required:
- synthetic_data
title: SyntheticDataGenerationResponse
description: >-
Response from the synthetic data generation. Batch of (prompt, response, score)
tuples that pass the threshold.
InvokeToolRequest:
type: object
properties:
tool_name:
type: string
description: The name of the tool to invoke.
kwargs:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
A dictionary of arguments to pass to the tool.
additionalProperties: false
required:
- tool_name
- kwargs
title: InvokeToolRequest
ToolInvocationResult:
type: object
properties:
@ -9148,6 +8890,17 @@ components:
additionalProperties: false
title: ToolInvocationResult
description: Result of a tool invocation.
URL:
type: object
properties:
uri:
type: string
description: The URL string pointing to the resource
additionalProperties: false
required:
- uri
title: URL
description: A URL reference to external content.
ToolDef:
type: object
properties:
@ -10749,8 +10502,6 @@ tags:
description: ''
- name: Shields
description: ''
- name: SyntheticDataGeneration (Coming Soon)
description: ''
- name: ToolGroups
description: ''
- name: ToolRuntime
@ -10773,7 +10524,6 @@ x-tagGroups:
- Scoring
- ScoringFunctions
- Shields
- SyntheticDataGeneration (Coming Soon)
- ToolGroups
- ToolRuntime
- VectorIO

View file

@ -2010,40 +2010,6 @@ paths:
schema:
type: string
deprecated: false
/v1/synthetic-data-generation/generate:
post:
responses:
'200':
description: >-
Response containing filtered synthetic data samples and optional statistics
content:
application/json:
schema:
$ref: '#/components/schemas/SyntheticDataGenerationResponse'
'400':
$ref: '#/components/responses/BadRequest400'
'429':
$ref: >-
#/components/responses/TooManyRequests429
'500':
$ref: >-
#/components/responses/InternalServerError500
default:
$ref: '#/components/responses/DefaultError'
tags:
- SyntheticDataGeneration (Coming Soon)
summary: >-
Generate synthetic data based on input dialogs and apply filtering.
description: >-
Generate synthetic data based on input dialogs and apply filtering.
parameters: []
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/SyntheticDataGenerateRequest'
required: true
deprecated: false
/v1/tool-runtime/invoke:
post:
responses:
@ -10020,45 +9986,29 @@ components:
required:
- shield_id
title: RegisterShieldRequest
CompletionMessage:
InvokeToolRequest:
type: object
properties:
role:
tool_name:
type: string
const: assistant
default: assistant
description: The name of the tool to invoke.
kwargs:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
Must be "assistant" to identify this as the model's response
content:
$ref: '#/components/schemas/InterleavedContent'
description: The content of the model's response
stop_reason:
type: string
enum:
- end_of_turn
- end_of_message
- out_of_tokens
description: >-
Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`:
The model finished generating the entire response. - `StopReason.end_of_message`:
The model finished generating but generated a partial response -- usually,
a tool call. The user may call the tool and continue the conversation
with the tool's response. - `StopReason.out_of_tokens`: The model ran
out of token budget.
tool_calls:
type: array
items:
$ref: '#/components/schemas/ToolCall'
description: >-
List of tool calls. Each tool call is a ToolCall object.
A dictionary of arguments to pass to the tool.
additionalProperties: false
required:
- role
- content
- stop_reason
title: CompletionMessage
description: >-
A message containing the model's (assistant) response in a chat conversation.
- tool_name
- kwargs
title: InvokeToolRequest
ImageContentItem:
type: object
properties:
@ -10105,41 +10055,6 @@ components:
mapping:
image: '#/components/schemas/ImageContentItem'
text: '#/components/schemas/TextContentItem'
Message:
oneOf:
- $ref: '#/components/schemas/UserMessage'
- $ref: '#/components/schemas/SystemMessage'
- $ref: '#/components/schemas/ToolResponseMessage'
- $ref: '#/components/schemas/CompletionMessage'
discriminator:
propertyName: role
mapping:
user: '#/components/schemas/UserMessage'
system: '#/components/schemas/SystemMessage'
tool: '#/components/schemas/ToolResponseMessage'
assistant: '#/components/schemas/CompletionMessage'
SystemMessage:
type: object
properties:
role:
type: string
const: system
default: system
description: >-
Must be "system" to identify this as a system message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the "system prompt". If multiple system messages are provided,
they are concatenated. The underlying Llama Stack code may also add other
system messages (for example, for formatting tool definitions).
additionalProperties: false
required:
- role
- content
title: SystemMessage
description: >-
A system message providing instructions or context to the model.
TextContentItem:
type: object
properties:
@ -10158,179 +10073,6 @@ components:
- text
title: TextContentItem
description: A text content item
ToolCall:
type: object
properties:
call_id:
type: string
tool_name:
oneOf:
- type: string
enum:
- brave_search
- wolfram_alpha
- photogen
- code_interpreter
title: BuiltinTool
- type: string
arguments:
type: string
additionalProperties: false
required:
- call_id
- tool_name
- arguments
title: ToolCall
ToolResponseMessage:
type: object
properties:
role:
type: string
const: tool
default: tool
description: >-
Must be "tool" to identify this as a tool response
call_id:
type: string
description: >-
Unique identifier for the tool call this response is for
content:
$ref: '#/components/schemas/InterleavedContent'
description: The response content from the tool
additionalProperties: false
required:
- role
- call_id
- content
title: ToolResponseMessage
description: >-
A message representing the result of a tool invocation.
URL:
type: object
properties:
uri:
type: string
description: The URL string pointing to the resource
additionalProperties: false
required:
- uri
title: URL
description: A URL reference to external content.
UserMessage:
type: object
properties:
role:
type: string
const: user
default: user
description: >-
Must be "user" to identify this as a user message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the message, which can include text and other media
context:
$ref: '#/components/schemas/InterleavedContent'
description: >-
(Optional) This field is used internally by Llama Stack to pass RAG context.
This field may be removed in the API in the future.
additionalProperties: false
required:
- role
- content
title: UserMessage
description: >-
A message from the user in a chat conversation.
SyntheticDataGenerateRequest:
type: object
properties:
dialogs:
type: array
items:
$ref: '#/components/schemas/Message'
description: >-
List of conversation messages to use as input for synthetic data generation
filtering_function:
type: string
enum:
- none
- random
- top_k
- top_p
- top_k_top_p
- sigmoid
description: >-
Type of filtering to apply to generated synthetic data samples
model:
type: string
description: >-
(Optional) The identifier of the model to use. The model must be registered
with Llama Stack and available via the /models endpoint
additionalProperties: false
required:
- dialogs
- filtering_function
title: SyntheticDataGenerateRequest
SyntheticDataGenerationResponse:
type: object
properties:
synthetic_data:
type: array
items:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
List of generated synthetic data samples that passed the filtering criteria
statistics:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
(Optional) Statistical information about the generation process and filtering
results
additionalProperties: false
required:
- synthetic_data
title: SyntheticDataGenerationResponse
description: >-
Response from the synthetic data generation. Batch of (prompt, response, score)
tuples that pass the threshold.
InvokeToolRequest:
type: object
properties:
tool_name:
type: string
description: The name of the tool to invoke.
kwargs:
type: object
additionalProperties:
oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string
- type: array
- type: object
description: >-
A dictionary of arguments to pass to the tool.
additionalProperties: false
required:
- tool_name
- kwargs
title: InvokeToolRequest
ToolInvocationResult:
type: object
properties:
@ -10361,6 +10103,17 @@ components:
additionalProperties: false
title: ToolInvocationResult
description: Result of a tool invocation.
URL:
type: object
properties:
uri:
type: string
description: The URL string pointing to the resource
additionalProperties: false
required:
- uri
title: URL
description: A URL reference to external content.
ToolDef:
type: object
properties:
@ -12393,6 +12146,45 @@ components:
title: AgentSessionCreateResponse
description: >-
Response returned when creating a new agent session.
CompletionMessage:
type: object
properties:
role:
type: string
const: assistant
default: assistant
description: >-
Must be "assistant" to identify this as the model's response
content:
$ref: '#/components/schemas/InterleavedContent'
description: The content of the model's response
stop_reason:
type: string
enum:
- end_of_turn
- end_of_message
- out_of_tokens
description: >-
Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`:
The model finished generating the entire response. - `StopReason.end_of_message`:
The model finished generating but generated a partial response -- usually,
a tool call. The user may call the tool and continue the conversation
with the tool's response. - `StopReason.out_of_tokens`: The model ran
out of token budget.
tool_calls:
type: array
items:
$ref: '#/components/schemas/ToolCall'
description: >-
List of tool calls. Each tool call is a ToolCall object.
additionalProperties: false
required:
- role
- content
- stop_reason
title: CompletionMessage
description: >-
A message containing the model's (assistant) response in a chat conversation.
InferenceStep:
type: object
properties:
@ -12545,6 +12337,29 @@ components:
- step_type
title: ShieldCallStep
description: A shield call step in an agent turn.
ToolCall:
type: object
properties:
call_id:
type: string
tool_name:
oneOf:
- type: string
enum:
- brave_search
- wolfram_alpha
- photogen
- code_interpreter
title: BuiltinTool
- type: string
arguments:
type: string
additionalProperties: false
required:
- call_id
- tool_name
- arguments
title: ToolCall
ToolExecutionStep:
type: object
properties:
@ -12632,6 +12447,30 @@ components:
- content
title: ToolResponse
description: Response from a tool invocation.
ToolResponseMessage:
type: object
properties:
role:
type: string
const: tool
default: tool
description: >-
Must be "tool" to identify this as a tool response
call_id:
type: string
description: >-
Unique identifier for the tool call this response is for
content:
$ref: '#/components/schemas/InterleavedContent'
description: The response content from the tool
additionalProperties: false
required:
- role
- call_id
- content
title: ToolResponseMessage
description: >-
A message representing the result of a tool invocation.
Turn:
type: object
properties:
@ -12717,6 +12556,31 @@ components:
title: Turn
description: >-
A single turn in an interaction with an Agentic System.
UserMessage:
type: object
properties:
role:
type: string
const: user
default: user
description: >-
Must be "user" to identify this as a user message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the message, which can include text and other media
context:
$ref: '#/components/schemas/InterleavedContent'
description: >-
(Optional) This field is used internally by Llama Stack to pass RAG context.
This field may be removed in the API in the future.
additionalProperties: false
required:
- role
- content
title: UserMessage
description: >-
A message from the user in a chat conversation.
CreateAgentTurnRequest:
type: object
properties:
@ -13330,6 +13194,28 @@ components:
- sampling_params
title: ModelCandidate
description: A model candidate for evaluation.
SystemMessage:
type: object
properties:
role:
type: string
const: system
default: system
description: >-
Must be "system" to identify this as a system message
content:
$ref: '#/components/schemas/InterleavedContent'
description: >-
The content of the "system prompt". If multiple system messages are provided,
they are concatenated. The underlying Llama Stack code may also add other
system messages (for example, for formatting tool definitions).
additionalProperties: false
required:
- role
- content
title: SystemMessage
description: >-
A system message providing instructions or context to the model.
EvaluateRowsRequest:
type: object
properties:
@ -14145,8 +14031,6 @@ tags:
description: ''
- name: Shields
description: ''
- name: SyntheticDataGeneration (Coming Soon)
description: ''
- name: ToolGroups
description: ''
- name: ToolRuntime
@ -14174,7 +14058,6 @@ x-tagGroups:
- Scoring
- ScoringFunctions
- Shields
- SyntheticDataGeneration (Coming Soon)
- ToolGroups
- ToolRuntime
- VectorIO

View file

@ -1,7 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .synthetic_data_generation import *

View file

@ -1,77 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from typing import Any, Protocol
from pydantic import BaseModel
from llama_stack.apis.inference import Message
from llama_stack.apis.version import LLAMA_STACK_API_V1
from llama_stack.schema_utils import json_schema_type, webmethod
class FilteringFunction(Enum):
"""The type of filtering function.
:cvar none: No filtering applied, accept all generated synthetic data
:cvar random: Random sampling of generated data points
:cvar top_k: Keep only the top-k highest scoring synthetic data samples
:cvar top_p: Nucleus-style filtering, keep samples exceeding cumulative score threshold
:cvar top_k_top_p: Combined top-k and top-p filtering strategy
:cvar sigmoid: Apply sigmoid function for probability-based filtering
"""
none = "none"
random = "random"
top_k = "top_k"
top_p = "top_p"
top_k_top_p = "top_k_top_p"
sigmoid = "sigmoid"
@json_schema_type
class SyntheticDataGenerationRequest(BaseModel):
"""Request to generate synthetic data. A small batch of prompts and a filtering function
:param dialogs: List of conversation messages to use as input for synthetic data generation
:param filtering_function: Type of filtering to apply to generated synthetic data samples
:param model: (Optional) The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint
"""
dialogs: list[Message]
filtering_function: FilteringFunction = FilteringFunction.none
model: str | None = None
@json_schema_type
class SyntheticDataGenerationResponse(BaseModel):
"""Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold.
:param synthetic_data: List of generated synthetic data samples that passed the filtering criteria
:param statistics: (Optional) Statistical information about the generation process and filtering results
"""
synthetic_data: list[dict[str, Any]]
statistics: dict[str, Any] | None = None
class SyntheticDataGeneration(Protocol):
@webmethod(route="/synthetic-data-generation/generate", level=LLAMA_STACK_API_V1)
def synthetic_data_generate(
self,
dialogs: list[Message],
filtering_function: FilteringFunction = FilteringFunction.none,
model: str | None = None,
) -> SyntheticDataGenerationResponse:
"""Generate synthetic data based on input dialogs and apply filtering.
:param dialogs: List of conversation messages to use as input for synthetic data generation
:param filtering_function: Type of filtering to apply to generated synthetic data samples
:param model: (Optional) The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint
:returns: Response containing filtered synthetic data samples and optional statistics
"""
...

View file

@ -31,7 +31,6 @@ from llama_stack.apis.safety import Safety
from llama_stack.apis.scoring import Scoring
from llama_stack.apis.scoring_functions import ScoringFunctions
from llama_stack.apis.shields import Shields
from llama_stack.apis.synthetic_data_generation import SyntheticDataGeneration
from llama_stack.apis.tools import RAGToolRuntime, ToolGroups, ToolRuntime
from llama_stack.apis.vector_io import VectorIO
from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl
@ -66,7 +65,6 @@ class LlamaStack(
Agents,
Batches,
Safety,
SyntheticDataGeneration,
Datasets,
PostTraining,
VectorIO,