Merge branch 'main' into routeur

This commit is contained in:
Sébastien Han 2025-11-24 14:58:43 +01:00 committed by GitHub
commit 3770963130
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
255 changed files with 18366 additions and 1909 deletions

View file

@ -11,6 +11,13 @@ This module provides functionality to generate OpenAPI specifications
from FastAPI applications.
"""
from .main import generate_openapi_spec, main
__all__ = ["generate_openapi_spec", "main"]
def __getattr__(name: str):
if name in {"generate_openapi_spec", "main"}:
from .main import generate_openapi_spec as _gos
from .main import main as _main
return {"generate_openapi_spec": _gos, "main": _main}[name]
raise AttributeError(name)

View file

@ -15,6 +15,7 @@ import typing
from typing import Annotated, Any, get_args, get_origin
from fastapi import FastAPI
from fastapi.params import Body as FastAPIBody
from pydantic import Field, create_model
from llama_stack.log import get_logger
@ -26,6 +27,8 @@ from .state import _extra_body_fields, register_dynamic_model
logger = get_logger(name=__name__, category="core")
type QueryParameter = tuple[str, type, Any, bool]
def _to_pascal_case(segment: str) -> str:
tokens = re.findall(r"[A-Za-z]+|\d+", segment)
@ -75,12 +78,12 @@ def _create_endpoint_with_request_model(
return endpoint
def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_any: bool = False) -> dict[str, tuple]:
def _build_field_definitions(query_parameters: list[QueryParameter], use_any: bool = False) -> dict[str, tuple]:
"""Build field definitions for a Pydantic model from query parameters."""
from typing import Any
field_definitions = {}
for param_name, param_type, default_value in query_parameters:
for param_name, param_type, default_value, _ in query_parameters:
if use_any:
field_definitions[param_name] = (Any, ... if default_value is inspect.Parameter.empty else default_value)
continue
@ -108,10 +111,10 @@ def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_
field_definitions[param_name] = (Any, ... if default_value is inspect.Parameter.empty else default_value)
# Ensure all parameters are included
expected_params = {name for name, _, _ in query_parameters}
expected_params = {name for name, _, _, _ in query_parameters}
missing = expected_params - set(field_definitions.keys())
if missing:
for param_name, _, default_value in query_parameters:
for param_name, _, default_value, _ in query_parameters:
if param_name in missing:
field_definitions[param_name] = (
Any,
@ -126,7 +129,7 @@ def _create_dynamic_request_model(
webmethod,
method_name: str,
http_method: str,
query_parameters: list[tuple[str, type, Any]],
query_parameters: list[QueryParameter],
use_any: bool = False,
variant_suffix: str | None = None,
) -> type | None:
@ -143,12 +146,12 @@ def _create_dynamic_request_model(
def _build_signature_params(
query_parameters: list[tuple[str, type, Any]],
query_parameters: list[QueryParameter],
) -> tuple[list[inspect.Parameter], dict[str, type]]:
"""Build signature parameters and annotations from query parameters."""
signature_params = []
param_annotations = {}
for param_name, param_type, default_value in query_parameters:
for param_name, param_type, default_value, _ in query_parameters:
param_annotations[param_name] = param_type
signature_params.append(
inspect.Parameter(
@ -219,6 +222,19 @@ def _is_extra_body_field(metadata_item: Any) -> bool:
return isinstance(metadata_item, ExtraBodyField)
def _should_embed_parameter(param_type: Any) -> bool:
"""Determine whether a parameter should be embedded (wrapped) in the request body."""
if get_origin(param_type) is Annotated:
args = get_args(param_type)
metadata = args[1:] if len(args) > 1 else []
for metadata_item in metadata:
if isinstance(metadata_item, FastAPIBody):
# FastAPI treats embed=None as False, so default to False when unset.
return bool(metadata_item.embed)
# Unannotated parameters default to embed=True through create_dynamic_typed_route.
return True
def _is_async_iterator_type(type_obj: Any) -> bool:
"""Check if a type is AsyncIterator or AsyncIterable."""
from collections.abc import AsyncIterable, AsyncIterator
@ -282,7 +298,7 @@ def _find_models_for_endpoint(
Returns:
tuple: (request_model, response_model, query_parameters, file_form_params, streaming_response_model, response_schema_name)
where query_parameters is a list of (name, type, default_value) tuples
where query_parameters is a list of (name, type, default_value, should_embed) tuples
and file_form_params is a list of inspect.Parameter objects for File()/Form() params
and streaming_response_model is the model for streaming responses (AsyncIterator content)
"""
@ -299,7 +315,7 @@ def _find_models_for_endpoint(
# Find request model and collect all body parameters
request_model = None
query_parameters = []
query_parameters: list[QueryParameter] = []
file_form_params = []
path_params = set()
extra_body_params = []
@ -325,6 +341,7 @@ def _find_models_for_endpoint(
# Check if it's a File() or Form() parameter - these need special handling
param_type = param.annotation
param_should_embed = _should_embed_parameter(param_type)
if _is_file_or_form_param(param_type):
# File() and Form() parameters must be in the function signature directly
# They cannot be part of a Pydantic model
@ -350,30 +367,14 @@ def _find_models_for_endpoint(
# Store as extra body parameter - exclude from request model
extra_body_params.append((param_name, base_type, extra_body_description))
continue
param_type = base_type
# Check if it's a Pydantic model (for POST/PUT requests)
if hasattr(param_type, "model_json_schema"):
# Collect all body parameters including Pydantic models
# We'll decide later whether to use a single model or create a combined one
query_parameters.append((param_name, param_type, param.default))
elif get_origin(param_type) is Annotated:
# Handle Annotated types - get the base type
args = get_args(param_type)
if args and hasattr(args[0], "model_json_schema"):
# Collect Pydantic models from Annotated types
query_parameters.append((param_name, args[0], param.default))
else:
# Regular annotated parameter (but not File/Form, already handled above)
query_parameters.append((param_name, param_type, param.default))
query_parameters.append((param_name, param_type, param.default, param_should_embed))
else:
# This is likely a body parameter for POST/PUT or query parameter for GET
# Store the parameter info for later use
# Preserve inspect.Parameter.empty to distinguish "no default" from "default=None"
default_value = param.default
# Extract the base type from union types (e.g., str | None -> str)
# Also make it safe for FastAPI to avoid forward reference issues
query_parameters.append((param_name, param_type, default_value))
# Regular annotated parameter (but not File/Form, already handled above)
query_parameters.append((param_name, param_type, param.default, param_should_embed))
# Store extra body fields for later use in post-processing
# We'll store them when the endpoint is created, as we need the full path
@ -385,8 +386,8 @@ def _find_models_for_endpoint(
# Otherwise, we'll create a combined request model from all parameters
# BUT: For GET requests, never create a request body - all parameters should be query parameters
if is_post_put and len(query_parameters) == 1:
param_name, param_type, default_value = query_parameters[0]
if hasattr(param_type, "model_json_schema"):
param_name, param_type, default_value, should_embed = query_parameters[0]
if hasattr(param_type, "model_json_schema") and not should_embed:
request_model = param_type
query_parameters = [] # Clear query_parameters so we use the single model
@ -495,7 +496,7 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
if file_form_params and is_post_put:
signature_params = list(file_form_params)
param_annotations = {param.name: param.annotation for param in file_form_params}
for param_name, param_type, default_value in query_parameters:
for param_name, param_type, default_value, _ in query_parameters:
signature_params.append(
inspect.Parameter(
param_name,

View file

@ -0,0 +1,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Package marker for Stainless config generation.

View file

@ -0,0 +1,821 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from __future__ import annotations
from collections.abc import Iterator
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
import yaml
HEADER = "# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json\n\n"
SECTION_ORDER = [
"organization",
"security",
"security_schemes",
"targets",
"client_settings",
"environments",
"pagination",
"settings",
"openapi",
"readme",
"resources",
]
ORGANIZATION = {
"name": "llama-stack-client",
"docs": "https://llama-stack.readthedocs.io/en/latest/",
"contact": "llamastack@meta.com",
}
SECURITY = [{}, {"BearerAuth": []}]
SECURITY_SCHEMES = {"BearerAuth": {"type": "http", "scheme": "bearer"}}
TARGETS = {
"node": {
"package_name": "llama-stack-client",
"production_repo": "llamastack/llama-stack-client-typescript",
"publish": {"npm": False},
},
"python": {
"package_name": "llama_stack_client",
"production_repo": "llamastack/llama-stack-client-python",
"options": {"use_uv": True},
"publish": {"pypi": True},
"project_name": "llama_stack_client",
},
"kotlin": {
"reverse_domain": "com.llama_stack_client.api",
"production_repo": None,
"publish": {"maven": False},
},
"go": {
"package_name": "llama-stack-client",
"production_repo": "llamastack/llama-stack-client-go",
"options": {"enable_v2": True, "back_compat_use_shared_package": False},
},
}
CLIENT_SETTINGS = {
"default_env_prefix": "LLAMA_STACK_CLIENT",
"opts": {
"api_key": {
"type": "string",
"read_env": "LLAMA_STACK_CLIENT_API_KEY",
"auth": {"security_scheme": "BearerAuth"},
"nullable": True,
}
},
}
ENVIRONMENTS = {"production": "http://any-hosted-llama-stack.com"}
PAGINATION = [
{
"name": "datasets_iterrows",
"type": "offset",
"request": {
"dataset_id": {"type": "string"},
"start_index": {
"type": "integer",
"x-stainless-pagination-property": {"purpose": "offset_count_param"},
},
"limit": {"type": "integer"},
},
"response": {
"data": {"type": "array", "items": {"type": "object"}},
"next_index": {
"type": "integer",
"x-stainless-pagination-property": {"purpose": "offset_count_start_field"},
},
},
},
{
"name": "openai_cursor_page",
"type": "cursor",
"request": {
"limit": {"type": "integer"},
"after": {
"type": "string",
"x-stainless-pagination-property": {"purpose": "next_cursor_param"},
},
},
"response": {
"data": {"type": "array", "items": {}},
"has_more": {"type": "boolean"},
"last_id": {
"type": "string",
"x-stainless-pagination-property": {"purpose": "next_cursor_field"},
},
},
},
]
SETTINGS = {
"license": "MIT",
"unwrap_response_fields": ["data"],
"file_header": "Copyright (c) Meta Platforms, Inc. and affiliates.\n"
"All rights reserved.\n"
"\n"
"This source code is licensed under the terms described in the "
"LICENSE file in\n"
"the root directory of this source tree.\n",
}
OPENAPI = {
"transformations": [
{
"command": "mergeObject",
"reason": "Better return_type using enum",
"args": {
"target": ["$.components.schemas"],
"object": {
"ReturnType": {
"additionalProperties": False,
"properties": {
"type": {
"enum": [
"string",
"number",
"boolean",
"array",
"object",
"json",
"union",
"chat_completion_input",
"completion_input",
"agent_turn_input",
]
}
},
"required": ["type"],
"type": "object",
}
},
},
},
{
"command": "replaceProperties",
"reason": "Replace return type properties with better model (see above)",
"args": {
"filter": {
"only": [
"$.components.schemas.ScoringFn.properties.return_type",
"$.components.schemas.RegisterScoringFunctionRequest.properties.return_type",
]
},
"value": {"$ref": "#/components/schemas/ReturnType"},
},
},
{
"command": "oneOfToAnyOf",
"reason": "Prism (mock server) doesn't like one of our "
"requests as it technically matches multiple "
"variants",
},
]
}
README = {
"example_requests": {
"default": {
"type": "request",
"endpoint": "post /v1/chat/completions",
"params": {},
},
"headline": {"type": "request", "endpoint": "get /v1/models", "params": {}},
"pagination": {
"type": "request",
"endpoint": "post /v1/chat/completions",
"params": {},
},
}
}
ALL_RESOURCES = {
"$shared": {
"models": {
"interleaved_content_item": "InterleavedContentItem",
"interleaved_content": "InterleavedContent",
"param_type": "ParamType",
"safety_violation": "SafetyViolation",
"sampling_params": "SamplingParams",
"scoring_result": "ScoringResult",
"system_message": "SystemMessage",
}
},
"toolgroups": {
"models": {
"tool_group": "ToolGroup",
"list_tool_groups_response": "ListToolGroupsResponse",
},
"methods": {
"register": "post /v1/toolgroups",
"get": "get /v1/toolgroups/{toolgroup_id}",
"list": "get /v1/toolgroups",
"unregister": "delete /v1/toolgroups/{toolgroup_id}",
},
},
"tools": {
"methods": {
"get": "get /v1/tools/{tool_name}",
"list": {"paginated": False, "endpoint": "get /v1/tools"},
}
},
"tool_runtime": {
"models": {
"tool_def": "ToolDef",
"tool_invocation_result": "ToolInvocationResult",
},
"methods": {
"list_tools": {
"paginated": False,
"endpoint": "get /v1/tool-runtime/list-tools",
},
"invoke_tool": "post /v1/tool-runtime/invoke",
},
},
"responses": {
"models": {
"response_object_stream": "OpenAIResponseObjectStream",
"response_object": "OpenAIResponseObject",
},
"methods": {
"create": {
"type": "http",
"streaming": {
"stream_event_model": "responses.response_object_stream",
"param_discriminator": "stream",
},
"endpoint": "post /v1/responses",
},
"retrieve": "get /v1/responses/{response_id}",
"list": {"type": "http", "endpoint": "get /v1/responses"},
"delete": {
"type": "http",
"endpoint": "delete /v1/responses/{response_id}",
},
},
"subresources": {
"input_items": {
"methods": {
"list": {
"type": "http",
"paginated": False,
"endpoint": "get /v1/responses/{response_id}/input_items",
}
}
}
},
},
"prompts": {
"models": {"prompt": "Prompt", "list_prompts_response": "ListPromptsResponse"},
"methods": {
"create": "post /v1/prompts",
"list": {"paginated": False, "endpoint": "get /v1/prompts"},
"retrieve": "get /v1/prompts/{prompt_id}",
"update": "post /v1/prompts/{prompt_id}",
"delete": "delete /v1/prompts/{prompt_id}",
"set_default_version": "post /v1/prompts/{prompt_id}/set-default-version",
},
"subresources": {
"versions": {
"methods": {
"list": {
"paginated": False,
"endpoint": "get /v1/prompts/{prompt_id}/versions",
}
}
}
},
},
"conversations": {
"models": {"conversation_object": "Conversation"},
"methods": {
"create": {"type": "http", "endpoint": "post /v1/conversations"},
"retrieve": "get /v1/conversations/{conversation_id}",
"update": {
"type": "http",
"endpoint": "post /v1/conversations/{conversation_id}",
},
"delete": {
"type": "http",
"endpoint": "delete /v1/conversations/{conversation_id}",
},
},
"subresources": {
"items": {
"methods": {
"get": {
"type": "http",
"endpoint": "get /v1/conversations/{conversation_id}/items/{item_id}",
},
"list": {
"type": "http",
"endpoint": "get /v1/conversations/{conversation_id}/items",
},
"create": {
"type": "http",
"endpoint": "post /v1/conversations/{conversation_id}/items",
},
"delete": {
"type": "http",
"endpoint": "delete /v1/conversations/{conversation_id}/items/{item_id}",
},
}
}
},
},
"inspect": {
"models": {
"healthInfo": "HealthInfo",
"providerInfo": "ProviderInfo",
"routeInfo": "RouteInfo",
"versionInfo": "VersionInfo",
},
"methods": {"health": "get /v1/health", "version": "get /v1/version"},
},
"embeddings": {
"models": {"create_embeddings_response": "OpenAIEmbeddingsResponse"},
"methods": {"create": "post /v1/embeddings"},
},
"chat": {
"models": {"chat_completion_chunk": "OpenAIChatCompletionChunk"},
"subresources": {
"completions": {
"methods": {
"create": {
"type": "http",
"streaming": {
"stream_event_model": "chat.chat_completion_chunk",
"param_discriminator": "stream",
},
"endpoint": "post /v1/chat/completions",
},
"list": {
"type": "http",
"paginated": False,
"endpoint": "get /v1/chat/completions",
},
"retrieve": {
"type": "http",
"endpoint": "get /v1/chat/completions/{completion_id}",
},
}
}
},
},
"completions": {
"methods": {
"create": {
"type": "http",
"streaming": {"param_discriminator": "stream"},
"endpoint": "post /v1/completions",
}
}
},
"vector_io": {
"models": {"queryChunksResponse": "QueryChunksResponse"},
"methods": {
"insert": "post /v1/vector-io/insert",
"query": "post /v1/vector-io/query",
},
},
"vector_stores": {
"models": {
"vector_store": "VectorStoreObject",
"list_vector_stores_response": "VectorStoreListResponse",
"vector_store_delete_response": "VectorStoreDeleteResponse",
"vector_store_search_response": "VectorStoreSearchResponsePage",
},
"methods": {
"create": "post /v1/vector_stores",
"list": "get /v1/vector_stores",
"retrieve": "get /v1/vector_stores/{vector_store_id}",
"update": "post /v1/vector_stores/{vector_store_id}",
"delete": "delete /v1/vector_stores/{vector_store_id}",
"search": "post /v1/vector_stores/{vector_store_id}/search",
},
"subresources": {
"files": {
"models": {"vector_store_file": "VectorStoreFileObject"},
"methods": {
"list": "get /v1/vector_stores/{vector_store_id}/files",
"retrieve": "get /v1/vector_stores/{vector_store_id}/files/{file_id}",
"update": "post /v1/vector_stores/{vector_store_id}/files/{file_id}",
"delete": "delete /v1/vector_stores/{vector_store_id}/files/{file_id}",
"create": "post /v1/vector_stores/{vector_store_id}/files",
"content": "get /v1/vector_stores/{vector_store_id}/files/{file_id}/content",
},
},
"file_batches": {
"models": {
"vector_store_file_batches": "VectorStoreFileBatchObject",
"list_vector_store_files_in_batch_response": "VectorStoreFilesListInBatchResponse",
},
"methods": {
"create": "post /v1/vector_stores/{vector_store_id}/file_batches",
"retrieve": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
"list_files": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
"cancel": "post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
},
},
},
},
"models": {
"models": {
"model": "OpenAIModel",
"list_models_response": "OpenAIListModelsResponse",
},
"methods": {
"list": {"paginated": False, "endpoint": "get /v1/models"},
"retrieve": "get /v1/models/{model_id}",
"register": "post /v1/models",
"unregister": "delete /v1/models/{model_id}",
},
"subresources": {"openai": {"methods": {"list": {"paginated": False, "endpoint": "get /v1/models"}}}},
},
"providers": {
"models": {"list_providers_response": "ListProvidersResponse"},
"methods": {
"list": {"paginated": False, "endpoint": "get /v1/providers"},
"retrieve": "get /v1/providers/{provider_id}",
},
},
"routes": {
"models": {"list_routes_response": "ListRoutesResponse"},
"methods": {"list": {"paginated": False, "endpoint": "get /v1/inspect/routes"}},
},
"moderations": {
"models": {"create_response": "ModerationObject"},
"methods": {"create": "post /v1/moderations"},
},
"safety": {
"models": {"run_shield_response": "RunShieldResponse"},
"methods": {"run_shield": "post /v1/safety/run-shield"},
},
"shields": {
"models": {"shield": "Shield", "list_shields_response": "ListShieldsResponse"},
"methods": {
"retrieve": "get /v1/shields/{identifier}",
"list": {"paginated": False, "endpoint": "get /v1/shields"},
"register": "post /v1/shields",
"delete": "delete /v1/shields/{identifier}",
},
},
"scoring": {
"methods": {
"score": "post /v1/scoring/score",
"score_batch": "post /v1/scoring/score-batch",
}
},
"scoring_functions": {
"models": {
"scoring_fn": "ScoringFn",
"scoring_fn_params": "ScoringFnParams",
"list_scoring_functions_response": "ListScoringFunctionsResponse",
},
"methods": {
"retrieve": "get /v1/scoring-functions/{scoring_fn_id}",
"list": {"paginated": False, "endpoint": "get /v1/scoring-functions"},
"register": "post /v1/scoring-functions",
"unregister": "delete /v1/scoring-functions/{scoring_fn_id}",
},
},
"files": {
"models": {
"file": "OpenAIFileObject",
"list_files_response": "ListOpenAIFileResponse",
"delete_file_response": "OpenAIFileDeleteResponse",
},
"methods": {
"create": "post /v1/files",
"list": "get /v1/files",
"retrieve": "get /v1/files/{file_id}",
"delete": "delete /v1/files/{file_id}",
"content": "get /v1/files/{file_id}/content",
},
},
"batches": {
"methods": {
"create": "post /v1/batches",
"list": "get /v1/batches",
"retrieve": "get /v1/batches/{batch_id}",
"cancel": "post /v1/batches/{batch_id}/cancel",
}
},
"alpha": {
"subresources": {
"inference": {"methods": {"rerank": "post /v1alpha/inference/rerank"}},
"post_training": {
"models": {
"algorithm_config": "AlgorithmConfig",
"post_training_job": "PostTrainingJob",
"list_post_training_jobs_response": "ListPostTrainingJobsResponse",
},
"methods": {
"preference_optimize": "post /v1alpha/post-training/preference-optimize",
"supervised_fine_tune": "post /v1alpha/post-training/supervised-fine-tune",
},
"subresources": {
"job": {
"methods": {
"artifacts": "get /v1alpha/post-training/job/artifacts",
"cancel": "post /v1alpha/post-training/job/cancel",
"status": "get /v1alpha/post-training/job/status",
"list": {
"paginated": False,
"endpoint": "get /v1alpha/post-training/jobs",
},
}
}
},
},
"benchmarks": {
"models": {
"benchmark": "Benchmark",
"list_benchmarks_response": "ListBenchmarksResponse",
},
"methods": {
"retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}",
"list": {
"paginated": False,
"endpoint": "get /v1alpha/eval/benchmarks",
},
"register": "post /v1alpha/eval/benchmarks",
"unregister": "delete /v1alpha/eval/benchmarks/{benchmark_id}",
},
},
"eval": {
"models": {
"evaluate_response": "EvaluateResponse",
"benchmark_config": "BenchmarkConfig",
"job": "Job",
},
"methods": {
"evaluate_rows": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
"run_eval": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
"evaluate_rows_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
"run_eval_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
},
"subresources": {
"jobs": {
"methods": {
"cancel": "delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
"status": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
"retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
}
}
},
},
}
},
"beta": {
"subresources": {
"datasets": {
"models": {"list_datasets_response": "ListDatasetsResponse"},
"methods": {
"register": "post /v1beta/datasets",
"retrieve": "get /v1beta/datasets/{dataset_id}",
"list": {"paginated": False, "endpoint": "get /v1beta/datasets"},
"unregister": "delete /v1beta/datasets/{dataset_id}",
"iterrows": "get /v1beta/datasetio/iterrows/{dataset_id}",
"appendrows": "post /v1beta/datasetio/append-rows/{dataset_id}",
},
}
}
},
}
HTTP_METHODS = {"get", "post", "put", "patch", "delete", "options", "head"}
@dataclass
class Endpoint:
method: str
path: str
extra: dict[str, Any] = field(default_factory=dict)
@classmethod
def from_config(cls, value: Any) -> Endpoint:
if isinstance(value, str):
method, _, path = value.partition(" ")
return cls._from_parts(method, path)
if isinstance(value, dict) and "endpoint" in value:
method, _, path = value["endpoint"].partition(" ")
extra = {k: v for k, v in value.items() if k != "endpoint"}
endpoint = cls._from_parts(method, path)
endpoint.extra.update(extra)
return endpoint
raise ValueError(f"Unsupported endpoint value: {value!r}")
@classmethod
def _from_parts(cls, method: str, path: str) -> Endpoint:
method = method.strip().lower()
path = path.strip()
if method not in HTTP_METHODS:
raise ValueError(f"Unsupported HTTP method for Stainless config: {method!r}")
if not path.startswith("/"):
raise ValueError(f"Endpoint path must start with '/': {path!r}")
return cls(method=method, path=path)
def to_config(self) -> Any:
if not self.extra:
return f"{self.method} {self.path}"
data = dict(self.extra)
data["endpoint"] = f"{self.method} {self.path}"
return data
def route_key(self) -> str:
return f"{self.method} {self.path}"
@dataclass
class Resource:
models: dict[str, str] | None = None
methods: dict[str, Endpoint] = field(default_factory=dict)
subresources: dict[str, Resource] = field(default_factory=dict)
@classmethod
def from_dict(cls, data: dict[str, Any]) -> Resource:
models = data.get("models")
methods = {name: Endpoint.from_config(value) for name, value in data.get("methods", {}).items()}
subresources = {name: cls.from_dict(value) for name, value in data.get("subresources", {}).items()}
return cls(models=models, methods=methods, subresources=subresources)
def to_config(self) -> dict[str, Any]:
result: dict[str, Any] = {}
if self.models:
result["models"] = self.models
if self.methods:
result["methods"] = {name: endpoint.to_config() for name, endpoint in self.methods.items()}
if self.subresources:
result["subresources"] = {name: resource.to_config() for name, resource in self.subresources.items()}
return result
def collect_endpoint_paths(self) -> set[str]:
paths = {endpoint.route_key() for endpoint in self.methods.values()}
for subresource in self.subresources.values():
paths.update(subresource.collect_endpoint_paths())
return paths
def iter_endpoints(self, prefix: str) -> Iterator[tuple[str, str]]:
for method_name, endpoint in self.methods.items():
label = f"{prefix}.{method_name}" if prefix else method_name
yield endpoint.route_key(), label
for sub_name, subresource in self.subresources.items():
sub_prefix = f"{prefix}.{sub_name}" if prefix else sub_name
yield from subresource.iter_endpoints(sub_prefix)
_RESOURCES = {name: Resource.from_dict(data) for name, data in ALL_RESOURCES.items()}
def _load_openapi_paths(openapi_path: Path) -> set[str]:
spec = yaml.safe_load(openapi_path.read_text()) or {}
paths: set[str] = set()
for path, path_item in (spec.get("paths") or {}).items():
if not isinstance(path_item, dict):
continue
for method, operation in path_item.items():
if not isinstance(operation, dict):
continue
paths.add(f"{str(method).lower()} {path}")
return paths
@dataclass(frozen=True)
class StainlessConfig:
organization: dict[str, Any]
security: list[Any]
security_schemes: dict[str, Any]
targets: dict[str, Any]
client_settings: dict[str, Any]
environments: dict[str, Any]
pagination: list[dict[str, Any]]
settings: dict[str, Any]
openapi: dict[str, Any]
readme: dict[str, Any]
resources: dict[str, Resource]
@classmethod
def make(cls) -> StainlessConfig:
return cls(
organization=ORGANIZATION,
security=SECURITY,
security_schemes=SECURITY_SCHEMES,
targets=TARGETS,
client_settings=CLIENT_SETTINGS,
environments=ENVIRONMENTS,
pagination=PAGINATION,
settings=SETTINGS,
openapi=OPENAPI,
readme=README,
resources=dict(_RESOURCES),
)
def referenced_paths(self) -> set[str]:
paths: set[str] = set()
for resource in self.resources.values():
paths.update(resource.collect_endpoint_paths())
paths.update(self.readme_endpoint_paths())
return paths
def readme_endpoint_paths(self) -> set[str]:
example_requests = self.readme.get("example_requests", {}) if self.readme else {}
paths: set[str] = set()
for entry in example_requests.values():
endpoint = entry.get("endpoint") if isinstance(entry, dict) else None
if isinstance(endpoint, str):
method, _, route = endpoint.partition(" ")
method = method.strip().lower()
route = route.strip()
if method and route:
paths.add(f"{method} {route}")
return paths
def endpoint_map(self) -> dict[str, list[str]]:
mapping: dict[str, list[str]] = {}
for resource_name, resource in self.resources.items():
for route, label in resource.iter_endpoints(resource_name):
mapping.setdefault(route, []).append(label)
return mapping
def validate_unique_endpoints(self) -> None:
duplicates: dict[str, list[str]] = {}
for route, labels in self.endpoint_map().items():
top_levels = {label.split(".", 1)[0] for label in labels}
if len(top_levels) > 1:
duplicates[route] = labels
if duplicates:
formatted = "\n".join(
f" - {route} defined in: {', '.join(sorted(labels))}" for route, labels in sorted(duplicates.items())
)
raise ValueError("Duplicate endpoints found across resources:\n" + formatted)
def validate_readme_endpoints(self) -> None:
resource_paths: set[str] = set()
for resource in self.resources.values():
resource_paths.update(resource.collect_endpoint_paths())
missing = sorted(path for path in self.readme_endpoint_paths() if path not in resource_paths)
if missing:
formatted = "\n".join(f" - {path}" for path in missing)
raise ValueError("README example endpoints are not present in Stainless resources:\n" + formatted)
def to_dict(self) -> dict[str, Any]:
cfg: dict[str, Any] = {}
for section in SECTION_ORDER:
if section == "resources":
cfg[section] = {name: resource.to_config() for name, resource in self.resources.items()}
continue
cfg[section] = getattr(self, section)
return cfg
def validate_against_openapi(self, openapi_path: Path) -> None:
if not openapi_path.exists():
raise FileNotFoundError(f"OpenAPI spec not found at {openapi_path}")
spec_paths = _load_openapi_paths(openapi_path)
config_paths = self.referenced_paths()
missing = sorted(path for path in config_paths if path not in spec_paths)
if missing:
formatted = "\n".join(f" - {path}" for path in missing)
raise ValueError("Stainless config references missing endpoints:\n" + formatted)
def validate(self, openapi_path: Path | None = None) -> None:
self.validate_unique_endpoints()
self.validate_readme_endpoints()
if openapi_path is not None:
self.validate_against_openapi(openapi_path)
def build_config() -> dict[str, Any]:
return StainlessConfig.make().to_dict()
def write_config(repo_root: Path, openapi_path: Path | None = None) -> Path:
stainless_config = StainlessConfig.make()
spec_path = (openapi_path or (repo_root / "client-sdks" / "stainless" / "openapi.yml")).resolve()
stainless_config.validate(spec_path)
yaml_text = yaml.safe_dump(stainless_config.to_dict(), sort_keys=False)
output = repo_root / "client-sdks" / "stainless" / "config.yml"
output.write_text(HEADER + yaml_text)
return output
def main() -> None:
repo_root = Path(__file__).resolve().parents[3]
output = write_config(repo_root)
print(f"Wrote Stainless config: {output}")
if __name__ == "__main__":
main()