feat(openapi): generate stainless config "more" programmatically (#4164)

Generate the Stainless client config directly from code so we can
validate the config before we ever write the YAML.

This change enforces allowed HTTP verbs/paths, detects duplicate routes
across resources, and ensures README example endpoints exist and match
the OpenAPI spec. The generator now fails fast when config entries
drift, keeping the published config (hopefully) more current with the
spec. I think more validation can be done but this is a good start.
This commit is contained in:
Ashwin Bharambe 2025-11-17 12:48:03 -08:00 committed by GitHub
parent 5fe6098350
commit 7d3db6b22c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 990 additions and 193 deletions

View file

@ -5,4 +5,7 @@ These are the source-of-truth configuration files used to generate the Stainless
A small side note: notice the `.yml` suffixes since Stainless uses that suffix typically for its configuration files. A small side note: notice the `.yml` suffixes since Stainless uses that suffix typically for its configuration files.
These files go hand-in-hand. As of now, only the `openapi.yml` file is automatically generated using the `scripts/run_openapi_generator.sh` script. These files go hand-in-hand. Both `openapi.yml` and `config.yml` are generated by `scripts/run_openapi_generator.sh`:
- `openapi.yml` comes from the FastAPI-based generator.
- `config.yml` is rendered from `scripts/openapi_generator/stainless_config/config_data.py` so the Stainless config stays in lock-step with the spec.

View file

@ -1,20 +1,16 @@
# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json # yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json
organization: organization:
# Name of your organization or company, used to determine the name of the client
# and headings.
name: llama-stack-client name: llama-stack-client
docs: https://llama-stack.readthedocs.io/en/latest/ docs: https://llama-stack.readthedocs.io/en/latest/
contact: llamastack@meta.com contact: llamastack@meta.com
security: security:
- {} - {}
- BearerAuth: [] - BearerAuth: []
security_schemes: security_schemes:
BearerAuth: BearerAuth:
type: http type: http
scheme: bearer scheme: bearer
# `targets` define the output targets and their customization options, such as
# whether to emit the Node SDK and what it's package name should be.
targets: targets:
node: node:
package_name: llama-stack-client package_name: llama-stack-client
@ -40,71 +36,123 @@ targets:
options: options:
enable_v2: true enable_v2: true
back_compat_use_shared_package: false back_compat_use_shared_package: false
# `client_settings` define settings for the API client, such as extra constructor
# arguments (used for authentication), retry behavior, idempotency, etc.
client_settings: client_settings:
default_env_prefix: LLAMA_STACK_CLIENT default_env_prefix: LLAMA_STACK_CLIENT
opts: opts:
api_key: api_key:
type: string type: string
read_env: LLAMA_STACK_CLIENT_API_KEY read_env: LLAMA_STACK_CLIENT_API_KEY
auth: { security_scheme: BearerAuth } auth:
security_scheme: BearerAuth
nullable: true nullable: true
# `environments` are a map of the name of the environment (e.g. "sandbox",
# "production") to the corresponding url to use.
environments: environments:
production: http://any-hosted-llama-stack.com production: http://any-hosted-llama-stack.com
# `pagination` defines [pagination schemes] which provides a template to match
# endpoints and generate next-page and auto-pagination helpers in the SDKs.
pagination: pagination:
- name: datasets_iterrows - name: datasets_iterrows
type: offset type: offset
request: request:
dataset_id: dataset_id:
type: string type: string
start_index: start_index:
type: integer type: integer
x-stainless-pagination-property: x-stainless-pagination-property:
purpose: offset_count_param purpose: offset_count_param
limit: limit:
type: integer type: integer
response: response:
data: data:
type: array type: array
items: items:
type: object
next_index:
type: integer
x-stainless-pagination-property:
purpose: offset_count_start_field
- name: openai_cursor_page
type: cursor
request:
limit:
type: integer
after:
type: string
x-stainless-pagination-property:
purpose: next_cursor_param
response:
data:
type: array
items: {}
has_more:
type: boolean
last_id:
type: string
x-stainless-pagination-property:
purpose: next_cursor_field
settings:
license: MIT
unwrap_response_fields:
- data
file_header: 'Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the terms described in the LICENSE file in
the root directory of this source tree.
'
openapi:
transformations:
- command: mergeObject
reason: Better return_type using enum
args:
target:
- $.components.schemas
object:
ReturnType:
additionalProperties: false
properties:
type:
enum:
- string
- number
- boolean
- array
- object
- json
- union
- chat_completion_input
- completion_input
- agent_turn_input
required:
- type
type: object type: object
next_index: - command: replaceProperties
type: integer reason: Replace return type properties with better model (see above)
x-stainless-pagination-property: args:
purpose: offset_count_start_field filter:
- name: openai_cursor_page only:
type: cursor - $.components.schemas.ScoringFn.properties.return_type
request: - $.components.schemas.RegisterScoringFunctionRequest.properties.return_type
limit: value:
type: integer $ref: '#/components/schemas/ReturnType'
after: - command: oneOfToAnyOf
type: string reason: Prism (mock server) doesn't like one of our requests as it technically
x-stainless-pagination-property: matches multiple variants
purpose: next_cursor_param readme:
response: example_requests:
data: default:
type: array type: request
items: {} endpoint: post /v1/chat/completions
has_more: params: {}
type: boolean headline:
last_id: type: request
type: string endpoint: get /v1/models
x-stainless-pagination-property: params: {}
purpose: next_cursor_field pagination:
# `resources` define the structure and organziation for your API, such as how type: request
# methods and models are grouped together and accessed. See the [configuration endpoint: post /v1/chat/completions
# guide] for more information. params: {}
#
# [configuration guide]:
# https://app.stainlessapi.com/docs/guides/configure#resources
resources: resources:
$shared: $shared:
models: models:
@ -128,19 +176,17 @@ resources:
methods: methods:
get: get /v1/tools/{tool_name} get: get /v1/tools/{tool_name}
list: list:
endpoint: get /v1/tools
paginated: false paginated: false
endpoint: get /v1/tools
tool_runtime: tool_runtime:
models: models:
tool_def: ToolDef tool_def: ToolDef
tool_invocation_result: ToolInvocationResult tool_invocation_result: ToolInvocationResult
methods: methods:
list_tools: list_tools:
endpoint: get /v1/tool-runtime/list-tools
paginated: false paginated: false
endpoint: get /v1/tool-runtime/list-tools
invoke_tool: post /v1/tool-runtime/invoke invoke_tool: post /v1/tool-runtime/invoke
responses: responses:
models: models:
response_object_stream: OpenAIResponseObjectStream response_object_stream: OpenAIResponseObjectStream
@ -148,10 +194,10 @@ resources:
methods: methods:
create: create:
type: http type: http
endpoint: post /v1/responses
streaming: streaming:
stream_event_model: responses.response_object_stream stream_event_model: responses.response_object_stream
param_discriminator: stream param_discriminator: stream
endpoint: post /v1/responses
retrieve: get /v1/responses/{response_id} retrieve: get /v1/responses/{response_id}
list: list:
type: http type: http
@ -164,9 +210,8 @@ resources:
methods: methods:
list: list:
type: http type: http
endpoint: get /v1/responses/{response_id}/input_items
paginated: false paginated: false
endpoint: get /v1/responses/{response_id}/input_items
prompts: prompts:
models: models:
prompt: Prompt prompt: Prompt
@ -174,8 +219,8 @@ resources:
methods: methods:
create: post /v1/prompts create: post /v1/prompts
list: list:
endpoint: get /v1/prompts
paginated: false paginated: false
endpoint: get /v1/prompts
retrieve: get /v1/prompts/{prompt_id} retrieve: get /v1/prompts/{prompt_id}
update: post /v1/prompts/{prompt_id} update: post /v1/prompts/{prompt_id}
delete: delete /v1/prompts/{prompt_id} delete: delete /v1/prompts/{prompt_id}
@ -184,9 +229,8 @@ resources:
versions: versions:
methods: methods:
list: list:
endpoint: get /v1/prompts/{prompt_id}/versions
paginated: false paginated: false
endpoint: get /v1/prompts/{prompt_id}/versions
conversations: conversations:
models: models:
conversation_object: Conversation conversation_object: Conversation
@ -216,7 +260,6 @@ resources:
delete: delete:
type: http type: http
endpoint: delete /v1/conversations/{conversation_id}/items/{item_id} endpoint: delete /v1/conversations/{conversation_id}/items/{item_id}
inspect: inspect:
models: models:
healthInfo: HealthInfo healthInfo: HealthInfo
@ -226,13 +269,11 @@ resources:
methods: methods:
health: get /v1/health health: get /v1/health
version: get /v1/version version: get /v1/version
embeddings: embeddings:
models: models:
create_embeddings_response: OpenAIEmbeddingsResponse create_embeddings_response: OpenAIEmbeddingsResponse
methods: methods:
create: post /v1/embeddings create: post /v1/embeddings
chat: chat:
models: models:
chat_completion_chunk: OpenAIChatCompletionChunk chat_completion_chunk: OpenAIChatCompletionChunk
@ -241,14 +282,14 @@ resources:
methods: methods:
create: create:
type: http type: http
endpoint: post /v1/chat/completions
streaming: streaming:
stream_event_model: chat.chat_completion_chunk stream_event_model: chat.chat_completion_chunk
param_discriminator: stream param_discriminator: stream
endpoint: post /v1/chat/completions
list: list:
type: http type: http
endpoint: get /v1/chat/completions
paginated: false paginated: false
endpoint: get /v1/chat/completions
retrieve: retrieve:
type: http type: http
endpoint: get /v1/chat/completions/{completion_id} endpoint: get /v1/chat/completions/{completion_id}
@ -256,17 +297,15 @@ resources:
methods: methods:
create: create:
type: http type: http
endpoint: post /v1/completions
streaming: streaming:
param_discriminator: stream param_discriminator: stream
endpoint: post /v1/completions
vector_io: vector_io:
models: models:
queryChunksResponse: QueryChunksResponse queryChunksResponse: QueryChunksResponse
methods: methods:
insert: post /v1/vector-io/insert insert: post /v1/vector-io/insert
query: post /v1/vector-io/query query: post /v1/vector-io/query
vector_stores: vector_stores:
models: models:
vector_store: VectorStoreObject vector_store: VectorStoreObject
@ -275,8 +314,7 @@ resources:
vector_store_search_response: VectorStoreSearchResponsePage vector_store_search_response: VectorStoreSearchResponsePage
methods: methods:
create: post /v1/vector_stores create: post /v1/vector_stores
list: list: get /v1/vector_stores
endpoint: get /v1/vector_stores
retrieve: get /v1/vector_stores/{vector_store_id} retrieve: get /v1/vector_stores/{vector_store_id}
update: post /v1/vector_stores/{vector_store_id} update: post /v1/vector_stores/{vector_store_id}
delete: delete /v1/vector_stores/{vector_store_id} delete: delete /v1/vector_stores/{vector_store_id}
@ -301,15 +339,14 @@ resources:
retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id} retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}
list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files
cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel
models: models:
models: models:
model: OpenAIModel model: OpenAIModel
list_models_response: OpenAIListModelsResponse list_models_response: OpenAIListModelsResponse
methods: methods:
list: list:
endpoint: get /v1/models
paginated: false paginated: false
endpoint: get /v1/models
retrieve: get /v1/models/{model_id} retrieve: get /v1/models/{model_id}
register: post /v1/models register: post /v1/models
unregister: delete /v1/models/{model_id} unregister: delete /v1/models/{model_id}
@ -317,38 +354,33 @@ resources:
openai: openai:
methods: methods:
list: list:
endpoint: get /v1/models
paginated: false paginated: false
endpoint: get /v1/models
providers: providers:
models: models:
list_providers_response: ListProvidersResponse list_providers_response: ListProvidersResponse
methods: methods:
list: list:
endpoint: get /v1/providers
paginated: false paginated: false
endpoint: get /v1/providers
retrieve: get /v1/providers/{provider_id} retrieve: get /v1/providers/{provider_id}
routes: routes:
models: models:
list_routes_response: ListRoutesResponse list_routes_response: ListRoutesResponse
methods: methods:
list: list:
endpoint: get /v1/inspect/routes
paginated: false paginated: false
endpoint: get /v1/inspect/routes
moderations: moderations:
models: models:
create_response: ModerationObject create_response: ModerationObject
methods: methods:
create: post /v1/moderations create: post /v1/moderations
safety: safety:
models: models:
run_shield_response: RunShieldResponse run_shield_response: RunShieldResponse
methods: methods:
run_shield: post /v1/safety/run-shield run_shield: post /v1/safety/run-shield
shields: shields:
models: models:
shield: Shield shield: Shield
@ -356,53 +388,48 @@ resources:
methods: methods:
retrieve: get /v1/shields/{identifier} retrieve: get /v1/shields/{identifier}
list: list:
endpoint: get /v1/shields
paginated: false paginated: false
endpoint: get /v1/shields
register: post /v1/shields register: post /v1/shields
delete: delete /v1/shields/{identifier} delete: delete /v1/shields/{identifier}
scoring: scoring:
methods: methods:
score: post /v1/scoring/score score: post /v1/scoring/score
score_batch: post /v1/scoring/score-batch score_batch: post /v1/scoring/score-batch
scoring_functions: scoring_functions:
methods:
retrieve: get /v1/scoring-functions/{scoring_fn_id}
list:
endpoint: get /v1/scoring-functions
paginated: false
register: post /v1/scoring-functions
unregister: delete /v1/scoring-functions/{scoring_fn_id}
models: models:
scoring_fn: ScoringFn scoring_fn: ScoringFn
scoring_fn_params: ScoringFnParams scoring_fn_params: ScoringFnParams
list_scoring_functions_response: ListScoringFunctionsResponse list_scoring_functions_response: ListScoringFunctionsResponse
methods:
retrieve: get /v1/scoring-functions/{scoring_fn_id}
list:
paginated: false
endpoint: get /v1/scoring-functions
register: post /v1/scoring-functions
unregister: delete /v1/scoring-functions/{scoring_fn_id}
files: files:
models:
file: OpenAIFileObject
list_files_response: ListOpenAIFileResponse
delete_file_response: OpenAIFileDeleteResponse
methods: methods:
create: post /v1/files create: post /v1/files
list: get /v1/files list: get /v1/files
retrieve: get /v1/files/{file_id} retrieve: get /v1/files/{file_id}
delete: delete /v1/files/{file_id} delete: delete /v1/files/{file_id}
content: get /v1/files/{file_id}/content content: get /v1/files/{file_id}/content
models:
file: OpenAIFileObject
list_files_response: ListOpenAIFileResponse
delete_file_response: OpenAIFileDeleteResponse
batches: batches:
methods: methods:
create: post /v1/batches create: post /v1/batches
list: get /v1/batches list: get /v1/batches
retrieve: get /v1/batches/{batch_id} retrieve: get /v1/batches/{batch_id}
cancel: post /v1/batches/{batch_id}/cancel cancel: post /v1/batches/{batch_id}/cancel
alpha: alpha:
subresources: subresources:
inference: inference:
methods: methods:
rerank: post /v1alpha/inference/rerank rerank: post /v1alpha/inference/rerank
post_training: post_training:
models: models:
algorithm_config: AlgorithmConfig algorithm_config: AlgorithmConfig
@ -418,39 +445,35 @@ resources:
cancel: post /v1alpha/post-training/job/cancel cancel: post /v1alpha/post-training/job/cancel
status: get /v1alpha/post-training/job/status status: get /v1alpha/post-training/job/status
list: list:
endpoint: get /v1alpha/post-training/jobs
paginated: false paginated: false
endpoint: get /v1alpha/post-training/jobs
benchmarks: benchmarks:
methods:
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
list:
endpoint: get /v1alpha/eval/benchmarks
paginated: false
register: post /v1alpha/eval/benchmarks
unregister: delete /v1alpha/eval/benchmarks/{benchmark_id}
models: models:
benchmark: Benchmark benchmark: Benchmark
list_benchmarks_response: ListBenchmarksResponse list_benchmarks_response: ListBenchmarksResponse
methods:
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
list:
paginated: false
endpoint: get /v1alpha/eval/benchmarks
register: post /v1alpha/eval/benchmarks
unregister: delete /v1alpha/eval/benchmarks/{benchmark_id}
eval: eval:
models:
evaluate_response: EvaluateResponse
benchmark_config: BenchmarkConfig
job: Job
methods: methods:
evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
subresources: subresources:
jobs: jobs:
methods: methods:
cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id} cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id} status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result
models:
evaluate_response: EvaluateResponse
benchmark_config: BenchmarkConfig
job: Job
beta: beta:
subresources: subresources:
datasets: datasets:
@ -460,74 +483,8 @@ resources:
register: post /v1beta/datasets register: post /v1beta/datasets
retrieve: get /v1beta/datasets/{dataset_id} retrieve: get /v1beta/datasets/{dataset_id}
list: list:
endpoint: get /v1beta/datasets
paginated: false paginated: false
endpoint: get /v1beta/datasets
unregister: delete /v1beta/datasets/{dataset_id} unregister: delete /v1beta/datasets/{dataset_id}
iterrows: get /v1beta/datasetio/iterrows/{dataset_id} iterrows: get /v1beta/datasetio/iterrows/{dataset_id}
appendrows: post /v1beta/datasetio/append-rows/{dataset_id} appendrows: post /v1beta/datasetio/append-rows/{dataset_id}
settings:
license: MIT
unwrap_response_fields: [data]
file_header: |
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the terms described in the LICENSE file in
the root directory of this source tree.
openapi:
transformations:
- command: mergeObject
reason: Better return_type using enum
args:
target:
- "$.components.schemas"
object:
ReturnType:
additionalProperties: false
properties:
type:
enum:
- string
- number
- boolean
- array
- object
- json
- union
- chat_completion_input
- completion_input
- agent_turn_input
required:
- type
type: object
- command: replaceProperties
reason: Replace return type properties with better model (see above)
args:
filter:
only:
- "$.components.schemas.ScoringFn.properties.return_type"
- "$.components.schemas.RegisterScoringFunctionRequest.properties.return_type"
value:
$ref: "#/components/schemas/ReturnType"
- command: oneOfToAnyOf
reason: Prism (mock server) doesn't like one of our requests as it technically matches multiple variants
# `readme` is used to configure the code snippets that will be rendered in the
# README.md of various SDKs. In particular, you can change the `headline`
# snippet's endpoint and the arguments to call it with.
readme:
example_requests:
default:
type: request
endpoint: post /v1/chat/completions
params: &ref_0 {}
headline:
type: request
endpoint: get /v1/models
params: *ref_0
pagination:
type: request
endpoint: post /v1/chat/completions
params: {}

View file

@ -11,6 +11,13 @@ This module provides functionality to generate OpenAPI specifications
from FastAPI applications. from FastAPI applications.
""" """
from .main import generate_openapi_spec, main
__all__ = ["generate_openapi_spec", "main"] __all__ = ["generate_openapi_spec", "main"]
def __getattr__(name: str):
if name in {"generate_openapi_spec", "main"}:
from .main import generate_openapi_spec as _gos
from .main import main as _main
return {"generate_openapi_spec": _gos, "main": _main}[name]
raise AttributeError(name)

View file

@ -0,0 +1,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Package marker for Stainless config generation.

View file

@ -0,0 +1,821 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from __future__ import annotations
from collections.abc import Iterator
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
import yaml
HEADER = "# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json\n\n"
SECTION_ORDER = [
"organization",
"security",
"security_schemes",
"targets",
"client_settings",
"environments",
"pagination",
"settings",
"openapi",
"readme",
"resources",
]
ORGANIZATION = {
"name": "llama-stack-client",
"docs": "https://llama-stack.readthedocs.io/en/latest/",
"contact": "llamastack@meta.com",
}
SECURITY = [{}, {"BearerAuth": []}]
SECURITY_SCHEMES = {"BearerAuth": {"type": "http", "scheme": "bearer"}}
TARGETS = {
"node": {
"package_name": "llama-stack-client",
"production_repo": "llamastack/llama-stack-client-typescript",
"publish": {"npm": False},
},
"python": {
"package_name": "llama_stack_client",
"production_repo": "llamastack/llama-stack-client-python",
"options": {"use_uv": True},
"publish": {"pypi": True},
"project_name": "llama_stack_client",
},
"kotlin": {
"reverse_domain": "com.llama_stack_client.api",
"production_repo": None,
"publish": {"maven": False},
},
"go": {
"package_name": "llama-stack-client",
"production_repo": "llamastack/llama-stack-client-go",
"options": {"enable_v2": True, "back_compat_use_shared_package": False},
},
}
CLIENT_SETTINGS = {
"default_env_prefix": "LLAMA_STACK_CLIENT",
"opts": {
"api_key": {
"type": "string",
"read_env": "LLAMA_STACK_CLIENT_API_KEY",
"auth": {"security_scheme": "BearerAuth"},
"nullable": True,
}
},
}
ENVIRONMENTS = {"production": "http://any-hosted-llama-stack.com"}
PAGINATION = [
{
"name": "datasets_iterrows",
"type": "offset",
"request": {
"dataset_id": {"type": "string"},
"start_index": {
"type": "integer",
"x-stainless-pagination-property": {"purpose": "offset_count_param"},
},
"limit": {"type": "integer"},
},
"response": {
"data": {"type": "array", "items": {"type": "object"}},
"next_index": {
"type": "integer",
"x-stainless-pagination-property": {"purpose": "offset_count_start_field"},
},
},
},
{
"name": "openai_cursor_page",
"type": "cursor",
"request": {
"limit": {"type": "integer"},
"after": {
"type": "string",
"x-stainless-pagination-property": {"purpose": "next_cursor_param"},
},
},
"response": {
"data": {"type": "array", "items": {}},
"has_more": {"type": "boolean"},
"last_id": {
"type": "string",
"x-stainless-pagination-property": {"purpose": "next_cursor_field"},
},
},
},
]
SETTINGS = {
"license": "MIT",
"unwrap_response_fields": ["data"],
"file_header": "Copyright (c) Meta Platforms, Inc. and affiliates.\n"
"All rights reserved.\n"
"\n"
"This source code is licensed under the terms described in the "
"LICENSE file in\n"
"the root directory of this source tree.\n",
}
OPENAPI = {
"transformations": [
{
"command": "mergeObject",
"reason": "Better return_type using enum",
"args": {
"target": ["$.components.schemas"],
"object": {
"ReturnType": {
"additionalProperties": False,
"properties": {
"type": {
"enum": [
"string",
"number",
"boolean",
"array",
"object",
"json",
"union",
"chat_completion_input",
"completion_input",
"agent_turn_input",
]
}
},
"required": ["type"],
"type": "object",
}
},
},
},
{
"command": "replaceProperties",
"reason": "Replace return type properties with better model (see above)",
"args": {
"filter": {
"only": [
"$.components.schemas.ScoringFn.properties.return_type",
"$.components.schemas.RegisterScoringFunctionRequest.properties.return_type",
]
},
"value": {"$ref": "#/components/schemas/ReturnType"},
},
},
{
"command": "oneOfToAnyOf",
"reason": "Prism (mock server) doesn't like one of our "
"requests as it technically matches multiple "
"variants",
},
]
}
README = {
"example_requests": {
"default": {
"type": "request",
"endpoint": "post /v1/chat/completions",
"params": {},
},
"headline": {"type": "request", "endpoint": "get /v1/models", "params": {}},
"pagination": {
"type": "request",
"endpoint": "post /v1/chat/completions",
"params": {},
},
}
}
ALL_RESOURCES = {
"$shared": {
"models": {
"interleaved_content_item": "InterleavedContentItem",
"interleaved_content": "InterleavedContent",
"param_type": "ParamType",
"safety_violation": "SafetyViolation",
"sampling_params": "SamplingParams",
"scoring_result": "ScoringResult",
"system_message": "SystemMessage",
}
},
"toolgroups": {
"models": {
"tool_group": "ToolGroup",
"list_tool_groups_response": "ListToolGroupsResponse",
},
"methods": {
"register": "post /v1/toolgroups",
"get": "get /v1/toolgroups/{toolgroup_id}",
"list": "get /v1/toolgroups",
"unregister": "delete /v1/toolgroups/{toolgroup_id}",
},
},
"tools": {
"methods": {
"get": "get /v1/tools/{tool_name}",
"list": {"paginated": False, "endpoint": "get /v1/tools"},
}
},
"tool_runtime": {
"models": {
"tool_def": "ToolDef",
"tool_invocation_result": "ToolInvocationResult",
},
"methods": {
"list_tools": {
"paginated": False,
"endpoint": "get /v1/tool-runtime/list-tools",
},
"invoke_tool": "post /v1/tool-runtime/invoke",
},
},
"responses": {
"models": {
"response_object_stream": "OpenAIResponseObjectStream",
"response_object": "OpenAIResponseObject",
},
"methods": {
"create": {
"type": "http",
"streaming": {
"stream_event_model": "responses.response_object_stream",
"param_discriminator": "stream",
},
"endpoint": "post /v1/responses",
},
"retrieve": "get /v1/responses/{response_id}",
"list": {"type": "http", "endpoint": "get /v1/responses"},
"delete": {
"type": "http",
"endpoint": "delete /v1/responses/{response_id}",
},
},
"subresources": {
"input_items": {
"methods": {
"list": {
"type": "http",
"paginated": False,
"endpoint": "get /v1/responses/{response_id}/input_items",
}
}
}
},
},
"prompts": {
"models": {"prompt": "Prompt", "list_prompts_response": "ListPromptsResponse"},
"methods": {
"create": "post /v1/prompts",
"list": {"paginated": False, "endpoint": "get /v1/prompts"},
"retrieve": "get /v1/prompts/{prompt_id}",
"update": "post /v1/prompts/{prompt_id}",
"delete": "delete /v1/prompts/{prompt_id}",
"set_default_version": "post /v1/prompts/{prompt_id}/set-default-version",
},
"subresources": {
"versions": {
"methods": {
"list": {
"paginated": False,
"endpoint": "get /v1/prompts/{prompt_id}/versions",
}
}
}
},
},
"conversations": {
"models": {"conversation_object": "Conversation"},
"methods": {
"create": {"type": "http", "endpoint": "post /v1/conversations"},
"retrieve": "get /v1/conversations/{conversation_id}",
"update": {
"type": "http",
"endpoint": "post /v1/conversations/{conversation_id}",
},
"delete": {
"type": "http",
"endpoint": "delete /v1/conversations/{conversation_id}",
},
},
"subresources": {
"items": {
"methods": {
"get": {
"type": "http",
"endpoint": "get /v1/conversations/{conversation_id}/items/{item_id}",
},
"list": {
"type": "http",
"endpoint": "get /v1/conversations/{conversation_id}/items",
},
"create": {
"type": "http",
"endpoint": "post /v1/conversations/{conversation_id}/items",
},
"delete": {
"type": "http",
"endpoint": "delete /v1/conversations/{conversation_id}/items/{item_id}",
},
}
}
},
},
"inspect": {
"models": {
"healthInfo": "HealthInfo",
"providerInfo": "ProviderInfo",
"routeInfo": "RouteInfo",
"versionInfo": "VersionInfo",
},
"methods": {"health": "get /v1/health", "version": "get /v1/version"},
},
"embeddings": {
"models": {"create_embeddings_response": "OpenAIEmbeddingsResponse"},
"methods": {"create": "post /v1/embeddings"},
},
"chat": {
"models": {"chat_completion_chunk": "OpenAIChatCompletionChunk"},
"subresources": {
"completions": {
"methods": {
"create": {
"type": "http",
"streaming": {
"stream_event_model": "chat.chat_completion_chunk",
"param_discriminator": "stream",
},
"endpoint": "post /v1/chat/completions",
},
"list": {
"type": "http",
"paginated": False,
"endpoint": "get /v1/chat/completions",
},
"retrieve": {
"type": "http",
"endpoint": "get /v1/chat/completions/{completion_id}",
},
}
}
},
},
"completions": {
"methods": {
"create": {
"type": "http",
"streaming": {"param_discriminator": "stream"},
"endpoint": "post /v1/completions",
}
}
},
"vector_io": {
"models": {"queryChunksResponse": "QueryChunksResponse"},
"methods": {
"insert": "post /v1/vector-io/insert",
"query": "post /v1/vector-io/query",
},
},
"vector_stores": {
"models": {
"vector_store": "VectorStoreObject",
"list_vector_stores_response": "VectorStoreListResponse",
"vector_store_delete_response": "VectorStoreDeleteResponse",
"vector_store_search_response": "VectorStoreSearchResponsePage",
},
"methods": {
"create": "post /v1/vector_stores",
"list": "get /v1/vector_stores",
"retrieve": "get /v1/vector_stores/{vector_store_id}",
"update": "post /v1/vector_stores/{vector_store_id}",
"delete": "delete /v1/vector_stores/{vector_store_id}",
"search": "post /v1/vector_stores/{vector_store_id}/search",
},
"subresources": {
"files": {
"models": {"vector_store_file": "VectorStoreFileObject"},
"methods": {
"list": "get /v1/vector_stores/{vector_store_id}/files",
"retrieve": "get /v1/vector_stores/{vector_store_id}/files/{file_id}",
"update": "post /v1/vector_stores/{vector_store_id}/files/{file_id}",
"delete": "delete /v1/vector_stores/{vector_store_id}/files/{file_id}",
"create": "post /v1/vector_stores/{vector_store_id}/files",
"content": "get /v1/vector_stores/{vector_store_id}/files/{file_id}/content",
},
},
"file_batches": {
"models": {
"vector_store_file_batches": "VectorStoreFileBatchObject",
"list_vector_store_files_in_batch_response": "VectorStoreFilesListInBatchResponse",
},
"methods": {
"create": "post /v1/vector_stores/{vector_store_id}/file_batches",
"retrieve": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
"list_files": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
"cancel": "post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
},
},
},
},
"models": {
"models": {
"model": "OpenAIModel",
"list_models_response": "OpenAIListModelsResponse",
},
"methods": {
"list": {"paginated": False, "endpoint": "get /v1/models"},
"retrieve": "get /v1/models/{model_id}",
"register": "post /v1/models",
"unregister": "delete /v1/models/{model_id}",
},
"subresources": {"openai": {"methods": {"list": {"paginated": False, "endpoint": "get /v1/models"}}}},
},
"providers": {
"models": {"list_providers_response": "ListProvidersResponse"},
"methods": {
"list": {"paginated": False, "endpoint": "get /v1/providers"},
"retrieve": "get /v1/providers/{provider_id}",
},
},
"routes": {
"models": {"list_routes_response": "ListRoutesResponse"},
"methods": {"list": {"paginated": False, "endpoint": "get /v1/inspect/routes"}},
},
"moderations": {
"models": {"create_response": "ModerationObject"},
"methods": {"create": "post /v1/moderations"},
},
"safety": {
"models": {"run_shield_response": "RunShieldResponse"},
"methods": {"run_shield": "post /v1/safety/run-shield"},
},
"shields": {
"models": {"shield": "Shield", "list_shields_response": "ListShieldsResponse"},
"methods": {
"retrieve": "get /v1/shields/{identifier}",
"list": {"paginated": False, "endpoint": "get /v1/shields"},
"register": "post /v1/shields",
"delete": "delete /v1/shields/{identifier}",
},
},
"scoring": {
"methods": {
"score": "post /v1/scoring/score",
"score_batch": "post /v1/scoring/score-batch",
}
},
"scoring_functions": {
"models": {
"scoring_fn": "ScoringFn",
"scoring_fn_params": "ScoringFnParams",
"list_scoring_functions_response": "ListScoringFunctionsResponse",
},
"methods": {
"retrieve": "get /v1/scoring-functions/{scoring_fn_id}",
"list": {"paginated": False, "endpoint": "get /v1/scoring-functions"},
"register": "post /v1/scoring-functions",
"unregister": "delete /v1/scoring-functions/{scoring_fn_id}",
},
},
"files": {
"models": {
"file": "OpenAIFileObject",
"list_files_response": "ListOpenAIFileResponse",
"delete_file_response": "OpenAIFileDeleteResponse",
},
"methods": {
"create": "post /v1/files",
"list": "get /v1/files",
"retrieve": "get /v1/files/{file_id}",
"delete": "delete /v1/files/{file_id}",
"content": "get /v1/files/{file_id}/content",
},
},
"batches": {
"methods": {
"create": "post /v1/batches",
"list": "get /v1/batches",
"retrieve": "get /v1/batches/{batch_id}",
"cancel": "post /v1/batches/{batch_id}/cancel",
}
},
"alpha": {
"subresources": {
"inference": {"methods": {"rerank": "post /v1alpha/inference/rerank"}},
"post_training": {
"models": {
"algorithm_config": "AlgorithmConfig",
"post_training_job": "PostTrainingJob",
"list_post_training_jobs_response": "ListPostTrainingJobsResponse",
},
"methods": {
"preference_optimize": "post /v1alpha/post-training/preference-optimize",
"supervised_fine_tune": "post /v1alpha/post-training/supervised-fine-tune",
},
"subresources": {
"job": {
"methods": {
"artifacts": "get /v1alpha/post-training/job/artifacts",
"cancel": "post /v1alpha/post-training/job/cancel",
"status": "get /v1alpha/post-training/job/status",
"list": {
"paginated": False,
"endpoint": "get /v1alpha/post-training/jobs",
},
}
}
},
},
"benchmarks": {
"models": {
"benchmark": "Benchmark",
"list_benchmarks_response": "ListBenchmarksResponse",
},
"methods": {
"retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}",
"list": {
"paginated": False,
"endpoint": "get /v1alpha/eval/benchmarks",
},
"register": "post /v1alpha/eval/benchmarks",
"unregister": "delete /v1alpha/eval/benchmarks/{benchmark_id}",
},
},
"eval": {
"models": {
"evaluate_response": "EvaluateResponse",
"benchmark_config": "BenchmarkConfig",
"job": "Job",
},
"methods": {
"evaluate_rows": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
"run_eval": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
"evaluate_rows_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
"run_eval_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
},
"subresources": {
"jobs": {
"methods": {
"cancel": "delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
"status": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
"retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
}
}
},
},
}
},
"beta": {
"subresources": {
"datasets": {
"models": {"list_datasets_response": "ListDatasetsResponse"},
"methods": {
"register": "post /v1beta/datasets",
"retrieve": "get /v1beta/datasets/{dataset_id}",
"list": {"paginated": False, "endpoint": "get /v1beta/datasets"},
"unregister": "delete /v1beta/datasets/{dataset_id}",
"iterrows": "get /v1beta/datasetio/iterrows/{dataset_id}",
"appendrows": "post /v1beta/datasetio/append-rows/{dataset_id}",
},
}
}
},
}
HTTP_METHODS = {"get", "post", "put", "patch", "delete", "options", "head"}
@dataclass
class Endpoint:
method: str
path: str
extra: dict[str, Any] = field(default_factory=dict)
@classmethod
def from_config(cls, value: Any) -> Endpoint:
if isinstance(value, str):
method, _, path = value.partition(" ")
return cls._from_parts(method, path)
if isinstance(value, dict) and "endpoint" in value:
method, _, path = value["endpoint"].partition(" ")
extra = {k: v for k, v in value.items() if k != "endpoint"}
endpoint = cls._from_parts(method, path)
endpoint.extra.update(extra)
return endpoint
raise ValueError(f"Unsupported endpoint value: {value!r}")
@classmethod
def _from_parts(cls, method: str, path: str) -> Endpoint:
method = method.strip().lower()
path = path.strip()
if method not in HTTP_METHODS:
raise ValueError(f"Unsupported HTTP method for Stainless config: {method!r}")
if not path.startswith("/"):
raise ValueError(f"Endpoint path must start with '/': {path!r}")
return cls(method=method, path=path)
def to_config(self) -> Any:
if not self.extra:
return f"{self.method} {self.path}"
data = dict(self.extra)
data["endpoint"] = f"{self.method} {self.path}"
return data
def route_key(self) -> str:
return f"{self.method} {self.path}"
@dataclass
class Resource:
models: dict[str, str] | None = None
methods: dict[str, Endpoint] = field(default_factory=dict)
subresources: dict[str, Resource] = field(default_factory=dict)
@classmethod
def from_dict(cls, data: dict[str, Any]) -> Resource:
models = data.get("models")
methods = {name: Endpoint.from_config(value) for name, value in data.get("methods", {}).items()}
subresources = {name: cls.from_dict(value) for name, value in data.get("subresources", {}).items()}
return cls(models=models, methods=methods, subresources=subresources)
def to_config(self) -> dict[str, Any]:
result: dict[str, Any] = {}
if self.models:
result["models"] = self.models
if self.methods:
result["methods"] = {name: endpoint.to_config() for name, endpoint in self.methods.items()}
if self.subresources:
result["subresources"] = {name: resource.to_config() for name, resource in self.subresources.items()}
return result
def collect_endpoint_paths(self) -> set[str]:
paths = {endpoint.route_key() for endpoint in self.methods.values()}
for subresource in self.subresources.values():
paths.update(subresource.collect_endpoint_paths())
return paths
def iter_endpoints(self, prefix: str) -> Iterator[tuple[str, str]]:
for method_name, endpoint in self.methods.items():
label = f"{prefix}.{method_name}" if prefix else method_name
yield endpoint.route_key(), label
for sub_name, subresource in self.subresources.items():
sub_prefix = f"{prefix}.{sub_name}" if prefix else sub_name
yield from subresource.iter_endpoints(sub_prefix)
_RESOURCES = {name: Resource.from_dict(data) for name, data in ALL_RESOURCES.items()}
def _load_openapi_paths(openapi_path: Path) -> set[str]:
spec = yaml.safe_load(openapi_path.read_text()) or {}
paths: set[str] = set()
for path, path_item in (spec.get("paths") or {}).items():
if not isinstance(path_item, dict):
continue
for method, operation in path_item.items():
if not isinstance(operation, dict):
continue
paths.add(f"{str(method).lower()} {path}")
return paths
@dataclass(frozen=True)
class StainlessConfig:
organization: dict[str, Any]
security: list[Any]
security_schemes: dict[str, Any]
targets: dict[str, Any]
client_settings: dict[str, Any]
environments: dict[str, Any]
pagination: list[dict[str, Any]]
settings: dict[str, Any]
openapi: dict[str, Any]
readme: dict[str, Any]
resources: dict[str, Resource]
@classmethod
def make(cls) -> StainlessConfig:
return cls(
organization=ORGANIZATION,
security=SECURITY,
security_schemes=SECURITY_SCHEMES,
targets=TARGETS,
client_settings=CLIENT_SETTINGS,
environments=ENVIRONMENTS,
pagination=PAGINATION,
settings=SETTINGS,
openapi=OPENAPI,
readme=README,
resources=dict(_RESOURCES),
)
def referenced_paths(self) -> set[str]:
paths: set[str] = set()
for resource in self.resources.values():
paths.update(resource.collect_endpoint_paths())
paths.update(self.readme_endpoint_paths())
return paths
def readme_endpoint_paths(self) -> set[str]:
example_requests = self.readme.get("example_requests", {}) if self.readme else {}
paths: set[str] = set()
for entry in example_requests.values():
endpoint = entry.get("endpoint") if isinstance(entry, dict) else None
if isinstance(endpoint, str):
method, _, route = endpoint.partition(" ")
method = method.strip().lower()
route = route.strip()
if method and route:
paths.add(f"{method} {route}")
return paths
def endpoint_map(self) -> dict[str, list[str]]:
mapping: dict[str, list[str]] = {}
for resource_name, resource in self.resources.items():
for route, label in resource.iter_endpoints(resource_name):
mapping.setdefault(route, []).append(label)
return mapping
def validate_unique_endpoints(self) -> None:
duplicates: dict[str, list[str]] = {}
for route, labels in self.endpoint_map().items():
top_levels = {label.split(".", 1)[0] for label in labels}
if len(top_levels) > 1:
duplicates[route] = labels
if duplicates:
formatted = "\n".join(
f" - {route} defined in: {', '.join(sorted(labels))}" for route, labels in sorted(duplicates.items())
)
raise ValueError("Duplicate endpoints found across resources:\n" + formatted)
def validate_readme_endpoints(self) -> None:
resource_paths: set[str] = set()
for resource in self.resources.values():
resource_paths.update(resource.collect_endpoint_paths())
missing = sorted(path for path in self.readme_endpoint_paths() if path not in resource_paths)
if missing:
formatted = "\n".join(f" - {path}" for path in missing)
raise ValueError("README example endpoints are not present in Stainless resources:\n" + formatted)
def to_dict(self) -> dict[str, Any]:
cfg: dict[str, Any] = {}
for section in SECTION_ORDER:
if section == "resources":
cfg[section] = {name: resource.to_config() for name, resource in self.resources.items()}
continue
cfg[section] = getattr(self, section)
return cfg
def validate_against_openapi(self, openapi_path: Path) -> None:
if not openapi_path.exists():
raise FileNotFoundError(f"OpenAPI spec not found at {openapi_path}")
spec_paths = _load_openapi_paths(openapi_path)
config_paths = self.referenced_paths()
missing = sorted(path for path in config_paths if path not in spec_paths)
if missing:
formatted = "\n".join(f" - {path}" for path in missing)
raise ValueError("Stainless config references missing endpoints:\n" + formatted)
def validate(self, openapi_path: Path | None = None) -> None:
self.validate_unique_endpoints()
self.validate_readme_endpoints()
if openapi_path is not None:
self.validate_against_openapi(openapi_path)
def build_config() -> dict[str, Any]:
return StainlessConfig.make().to_dict()
def write_config(repo_root: Path, openapi_path: Path | None = None) -> Path:
stainless_config = StainlessConfig.make()
spec_path = (openapi_path or (repo_root / "client-sdks" / "stainless" / "openapi.yml")).resolve()
stainless_config.validate(spec_path)
yaml_text = yaml.safe_dump(stainless_config.to_dict(), sort_keys=False)
output = repo_root / "client-sdks" / "stainless" / "config.yml"
output.write_text(HEADER + yaml_text)
return output
def main() -> None:
repo_root = Path(__file__).resolve().parents[3]
output = write_config(repo_root)
print(f"Wrote Stainless config: {output}")
if __name__ == "__main__":
main()

View file

@ -17,3 +17,5 @@ PYTHONPATH=$PYTHONPATH:$stack_dir \
python3 -m scripts.openapi_generator "$stack_dir"/docs/static python3 -m scripts.openapi_generator "$stack_dir"/docs/static
cp "$stack_dir"/docs/static/stainless-llama-stack-spec.yaml "$stack_dir"/client-sdks/stainless/openapi.yml cp "$stack_dir"/docs/static/stainless-llama-stack-spec.yaml "$stack_dir"/client-sdks/stainless/openapi.yml
PYTHONPATH=$PYTHONPATH:$stack_dir \
python3 -m scripts.openapi_generator.stainless_config.generate_config