diff --git a/client-sdks/stainless/README.md b/client-sdks/stainless/README.md index 73e7082d4..54ff3d3d1 100644 --- a/client-sdks/stainless/README.md +++ b/client-sdks/stainless/README.md @@ -5,4 +5,7 @@ These are the source-of-truth configuration files used to generate the Stainless A small side note: notice the `.yml` suffixes since Stainless uses that suffix typically for its configuration files. -These files go hand-in-hand. As of now, only the `openapi.yml` file is automatically generated using the `scripts/run_openapi_generator.sh` script. +These files go hand-in-hand. Both `openapi.yml` and `config.yml` are generated by `scripts/run_openapi_generator.sh`: + +- `openapi.yml` comes from the FastAPI-based generator. +- `config.yml` is rendered from `scripts/openapi_generator/stainless_config/config_data.py` so the Stainless config stays in lock-step with the spec. diff --git a/client-sdks/stainless/config.yml b/client-sdks/stainless/config.yml index 9b26114fe..212b2b54a 100644 --- a/client-sdks/stainless/config.yml +++ b/client-sdks/stainless/config.yml @@ -1,20 +1,16 @@ # yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json organization: - # Name of your organization or company, used to determine the name of the client - # and headings. name: llama-stack-client docs: https://llama-stack.readthedocs.io/en/latest/ contact: llamastack@meta.com security: - - {} - - BearerAuth: [] +- {} +- BearerAuth: [] security_schemes: BearerAuth: type: http scheme: bearer -# `targets` define the output targets and their customization options, such as -# whether to emit the Node SDK and what it's package name should be. targets: node: package_name: llama-stack-client @@ -40,71 +36,123 @@ targets: options: enable_v2: true back_compat_use_shared_package: false - -# `client_settings` define settings for the API client, such as extra constructor -# arguments (used for authentication), retry behavior, idempotency, etc. client_settings: default_env_prefix: LLAMA_STACK_CLIENT opts: api_key: type: string read_env: LLAMA_STACK_CLIENT_API_KEY - auth: { security_scheme: BearerAuth } + auth: + security_scheme: BearerAuth nullable: true - -# `environments` are a map of the name of the environment (e.g. "sandbox", -# "production") to the corresponding url to use. environments: production: http://any-hosted-llama-stack.com - -# `pagination` defines [pagination schemes] which provides a template to match -# endpoints and generate next-page and auto-pagination helpers in the SDKs. pagination: - - name: datasets_iterrows - type: offset - request: - dataset_id: - type: string - start_index: - type: integer - x-stainless-pagination-property: - purpose: offset_count_param - limit: - type: integer - response: - data: - type: array - items: +- name: datasets_iterrows + type: offset + request: + dataset_id: + type: string + start_index: + type: integer + x-stainless-pagination-property: + purpose: offset_count_param + limit: + type: integer + response: + data: + type: array + items: + type: object + next_index: + type: integer + x-stainless-pagination-property: + purpose: offset_count_start_field +- name: openai_cursor_page + type: cursor + request: + limit: + type: integer + after: + type: string + x-stainless-pagination-property: + purpose: next_cursor_param + response: + data: + type: array + items: {} + has_more: + type: boolean + last_id: + type: string + x-stainless-pagination-property: + purpose: next_cursor_field +settings: + license: MIT + unwrap_response_fields: + - data + file_header: 'Copyright (c) Meta Platforms, Inc. and affiliates. + + All rights reserved. + + + This source code is licensed under the terms described in the LICENSE file in + + the root directory of this source tree. + + ' +openapi: + transformations: + - command: mergeObject + reason: Better return_type using enum + args: + target: + - $.components.schemas + object: + ReturnType: + additionalProperties: false + properties: + type: + enum: + - string + - number + - boolean + - array + - object + - json + - union + - chat_completion_input + - completion_input + - agent_turn_input + required: + - type type: object - next_index: - type: integer - x-stainless-pagination-property: - purpose: offset_count_start_field - - name: openai_cursor_page - type: cursor - request: - limit: - type: integer - after: - type: string - x-stainless-pagination-property: - purpose: next_cursor_param - response: - data: - type: array - items: {} - has_more: - type: boolean - last_id: - type: string - x-stainless-pagination-property: - purpose: next_cursor_field -# `resources` define the structure and organziation for your API, such as how -# methods and models are grouped together and accessed. See the [configuration -# guide] for more information. -# -# [configuration guide]: -# https://app.stainlessapi.com/docs/guides/configure#resources + - command: replaceProperties + reason: Replace return type properties with better model (see above) + args: + filter: + only: + - $.components.schemas.ScoringFn.properties.return_type + - $.components.schemas.RegisterScoringFunctionRequest.properties.return_type + value: + $ref: '#/components/schemas/ReturnType' + - command: oneOfToAnyOf + reason: Prism (mock server) doesn't like one of our requests as it technically + matches multiple variants +readme: + example_requests: + default: + type: request + endpoint: post /v1/chat/completions + params: {} + headline: + type: request + endpoint: get /v1/models + params: {} + pagination: + type: request + endpoint: post /v1/chat/completions + params: {} resources: $shared: models: @@ -128,19 +176,17 @@ resources: methods: get: get /v1/tools/{tool_name} list: - endpoint: get /v1/tools paginated: false - + endpoint: get /v1/tools tool_runtime: models: tool_def: ToolDef tool_invocation_result: ToolInvocationResult methods: list_tools: - endpoint: get /v1/tool-runtime/list-tools paginated: false + endpoint: get /v1/tool-runtime/list-tools invoke_tool: post /v1/tool-runtime/invoke - responses: models: response_object_stream: OpenAIResponseObjectStream @@ -148,10 +194,10 @@ resources: methods: create: type: http - endpoint: post /v1/responses streaming: stream_event_model: responses.response_object_stream param_discriminator: stream + endpoint: post /v1/responses retrieve: get /v1/responses/{response_id} list: type: http @@ -164,9 +210,8 @@ resources: methods: list: type: http - endpoint: get /v1/responses/{response_id}/input_items paginated: false - + endpoint: get /v1/responses/{response_id}/input_items prompts: models: prompt: Prompt @@ -174,8 +219,8 @@ resources: methods: create: post /v1/prompts list: - endpoint: get /v1/prompts paginated: false + endpoint: get /v1/prompts retrieve: get /v1/prompts/{prompt_id} update: post /v1/prompts/{prompt_id} delete: delete /v1/prompts/{prompt_id} @@ -184,9 +229,8 @@ resources: versions: methods: list: - endpoint: get /v1/prompts/{prompt_id}/versions paginated: false - + endpoint: get /v1/prompts/{prompt_id}/versions conversations: models: conversation_object: Conversation @@ -216,7 +260,6 @@ resources: delete: type: http endpoint: delete /v1/conversations/{conversation_id}/items/{item_id} - inspect: models: healthInfo: HealthInfo @@ -226,13 +269,11 @@ resources: methods: health: get /v1/health version: get /v1/version - embeddings: models: create_embeddings_response: OpenAIEmbeddingsResponse methods: create: post /v1/embeddings - chat: models: chat_completion_chunk: OpenAIChatCompletionChunk @@ -241,14 +282,14 @@ resources: methods: create: type: http - endpoint: post /v1/chat/completions streaming: stream_event_model: chat.chat_completion_chunk param_discriminator: stream + endpoint: post /v1/chat/completions list: type: http - endpoint: get /v1/chat/completions paginated: false + endpoint: get /v1/chat/completions retrieve: type: http endpoint: get /v1/chat/completions/{completion_id} @@ -256,17 +297,15 @@ resources: methods: create: type: http - endpoint: post /v1/completions streaming: param_discriminator: stream - + endpoint: post /v1/completions vector_io: models: queryChunksResponse: QueryChunksResponse methods: insert: post /v1/vector-io/insert query: post /v1/vector-io/query - vector_stores: models: vector_store: VectorStoreObject @@ -275,8 +314,7 @@ resources: vector_store_search_response: VectorStoreSearchResponsePage methods: create: post /v1/vector_stores - list: - endpoint: get /v1/vector_stores + list: get /v1/vector_stores retrieve: get /v1/vector_stores/{vector_store_id} update: post /v1/vector_stores/{vector_store_id} delete: delete /v1/vector_stores/{vector_store_id} @@ -301,15 +339,14 @@ resources: retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id} list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel - models: models: model: OpenAIModel list_models_response: OpenAIListModelsResponse methods: list: - endpoint: get /v1/models paginated: false + endpoint: get /v1/models retrieve: get /v1/models/{model_id} register: post /v1/models unregister: delete /v1/models/{model_id} @@ -317,38 +354,33 @@ resources: openai: methods: list: - endpoint: get /v1/models paginated: false - + endpoint: get /v1/models providers: models: list_providers_response: ListProvidersResponse methods: list: - endpoint: get /v1/providers paginated: false + endpoint: get /v1/providers retrieve: get /v1/providers/{provider_id} - routes: models: list_routes_response: ListRoutesResponse methods: list: - endpoint: get /v1/inspect/routes paginated: false - + endpoint: get /v1/inspect/routes moderations: models: create_response: ModerationObject methods: create: post /v1/moderations - safety: models: run_shield_response: RunShieldResponse methods: run_shield: post /v1/safety/run-shield - shields: models: shield: Shield @@ -356,53 +388,48 @@ resources: methods: retrieve: get /v1/shields/{identifier} list: - endpoint: get /v1/shields paginated: false + endpoint: get /v1/shields register: post /v1/shields delete: delete /v1/shields/{identifier} - scoring: methods: score: post /v1/scoring/score score_batch: post /v1/scoring/score-batch scoring_functions: - methods: - retrieve: get /v1/scoring-functions/{scoring_fn_id} - list: - endpoint: get /v1/scoring-functions - paginated: false - register: post /v1/scoring-functions - unregister: delete /v1/scoring-functions/{scoring_fn_id} models: scoring_fn: ScoringFn scoring_fn_params: ScoringFnParams list_scoring_functions_response: ListScoringFunctionsResponse - + methods: + retrieve: get /v1/scoring-functions/{scoring_fn_id} + list: + paginated: false + endpoint: get /v1/scoring-functions + register: post /v1/scoring-functions + unregister: delete /v1/scoring-functions/{scoring_fn_id} files: + models: + file: OpenAIFileObject + list_files_response: ListOpenAIFileResponse + delete_file_response: OpenAIFileDeleteResponse methods: create: post /v1/files list: get /v1/files retrieve: get /v1/files/{file_id} delete: delete /v1/files/{file_id} content: get /v1/files/{file_id}/content - models: - file: OpenAIFileObject - list_files_response: ListOpenAIFileResponse - delete_file_response: OpenAIFileDeleteResponse - batches: methods: create: post /v1/batches list: get /v1/batches retrieve: get /v1/batches/{batch_id} cancel: post /v1/batches/{batch_id}/cancel - alpha: subresources: inference: methods: rerank: post /v1alpha/inference/rerank - post_training: models: algorithm_config: AlgorithmConfig @@ -418,39 +445,35 @@ resources: cancel: post /v1alpha/post-training/job/cancel status: get /v1alpha/post-training/job/status list: - endpoint: get /v1alpha/post-training/jobs paginated: false - + endpoint: get /v1alpha/post-training/jobs benchmarks: - methods: - retrieve: get /v1alpha/eval/benchmarks/{benchmark_id} - list: - endpoint: get /v1alpha/eval/benchmarks - paginated: false - register: post /v1alpha/eval/benchmarks - unregister: delete /v1alpha/eval/benchmarks/{benchmark_id} models: benchmark: Benchmark list_benchmarks_response: ListBenchmarksResponse - + methods: + retrieve: get /v1alpha/eval/benchmarks/{benchmark_id} + list: + paginated: false + endpoint: get /v1alpha/eval/benchmarks + register: post /v1alpha/eval/benchmarks + unregister: delete /v1alpha/eval/benchmarks/{benchmark_id} eval: + models: + evaluate_response: EvaluateResponse + benchmark_config: BenchmarkConfig + job: Job methods: evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs - subresources: jobs: methods: cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id} status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id} retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result - models: - evaluate_response: EvaluateResponse - benchmark_config: BenchmarkConfig - job: Job - beta: subresources: datasets: @@ -460,74 +483,8 @@ resources: register: post /v1beta/datasets retrieve: get /v1beta/datasets/{dataset_id} list: - endpoint: get /v1beta/datasets paginated: false + endpoint: get /v1beta/datasets unregister: delete /v1beta/datasets/{dataset_id} iterrows: get /v1beta/datasetio/iterrows/{dataset_id} appendrows: post /v1beta/datasetio/append-rows/{dataset_id} - -settings: - license: MIT - unwrap_response_fields: [data] - file_header: | - Copyright (c) Meta Platforms, Inc. and affiliates. - All rights reserved. - - This source code is licensed under the terms described in the LICENSE file in - the root directory of this source tree. - -openapi: - transformations: - - command: mergeObject - reason: Better return_type using enum - args: - target: - - "$.components.schemas" - object: - ReturnType: - additionalProperties: false - properties: - type: - enum: - - string - - number - - boolean - - array - - object - - json - - union - - chat_completion_input - - completion_input - - agent_turn_input - required: - - type - type: object - - command: replaceProperties - reason: Replace return type properties with better model (see above) - args: - filter: - only: - - "$.components.schemas.ScoringFn.properties.return_type" - - "$.components.schemas.RegisterScoringFunctionRequest.properties.return_type" - value: - $ref: "#/components/schemas/ReturnType" - - command: oneOfToAnyOf - reason: Prism (mock server) doesn't like one of our requests as it technically matches multiple variants - -# `readme` is used to configure the code snippets that will be rendered in the -# README.md of various SDKs. In particular, you can change the `headline` -# snippet's endpoint and the arguments to call it with. -readme: - example_requests: - default: - type: request - endpoint: post /v1/chat/completions - params: &ref_0 {} - headline: - type: request - endpoint: get /v1/models - params: *ref_0 - pagination: - type: request - endpoint: post /v1/chat/completions - params: {} diff --git a/scripts/openapi_generator/__init__.py b/scripts/openapi_generator/__init__.py index 7f6aaa1d1..834836f76 100644 --- a/scripts/openapi_generator/__init__.py +++ b/scripts/openapi_generator/__init__.py @@ -11,6 +11,13 @@ This module provides functionality to generate OpenAPI specifications from FastAPI applications. """ -from .main import generate_openapi_spec, main - __all__ = ["generate_openapi_spec", "main"] + + +def __getattr__(name: str): + if name in {"generate_openapi_spec", "main"}: + from .main import generate_openapi_spec as _gos + from .main import main as _main + + return {"generate_openapi_spec": _gos, "main": _main}[name] + raise AttributeError(name) diff --git a/scripts/openapi_generator/stainless_config/__init__.py b/scripts/openapi_generator/stainless_config/__init__.py new file mode 100644 index 000000000..fcc1077b4 --- /dev/null +++ b/scripts/openapi_generator/stainless_config/__init__.py @@ -0,0 +1,2 @@ +# Package marker for Stainless config generation. + diff --git a/scripts/openapi_generator/stainless_config/generate_config.py b/scripts/openapi_generator/stainless_config/generate_config.py new file mode 100755 index 000000000..cf55536d0 --- /dev/null +++ b/scripts/openapi_generator/stainless_config/generate_config.py @@ -0,0 +1,754 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import argparse +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +import yaml + +HEADER = "# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json\n\n" + +SECTION_ORDER = [ + "organization", + "security", + "security_schemes", + "targets", + "client_settings", + "environments", + "pagination", + "settings", + "openapi", + "readme", + "resources", +] + +ORGANIZATION = { + "name": "llama-stack-client", + "docs": "https://llama-stack.readthedocs.io/en/latest/", + "contact": "llamastack@meta.com", +} + +SECURITY = [{}, {"BearerAuth": []}] + +SECURITY_SCHEMES = {"BearerAuth": {"type": "http", "scheme": "bearer"}} + +TARGETS = { + "node": { + "package_name": "llama-stack-client", + "production_repo": "llamastack/llama-stack-client-typescript", + "publish": {"npm": False}, + }, + "python": { + "package_name": "llama_stack_client", + "production_repo": "llamastack/llama-stack-client-python", + "options": {"use_uv": True}, + "publish": {"pypi": True}, + "project_name": "llama_stack_client", + }, + "kotlin": { + "reverse_domain": "com.llama_stack_client.api", + "production_repo": None, + "publish": {"maven": False}, + }, + "go": { + "package_name": "llama-stack-client", + "production_repo": "llamastack/llama-stack-client-go", + "options": {"enable_v2": True, "back_compat_use_shared_package": False}, + }, +} + +CLIENT_SETTINGS = { + "default_env_prefix": "LLAMA_STACK_CLIENT", + "opts": { + "api_key": { + "type": "string", + "read_env": "LLAMA_STACK_CLIENT_API_KEY", + "auth": {"security_scheme": "BearerAuth"}, + "nullable": True, + } + }, +} + +ENVIRONMENTS = {"production": "http://any-hosted-llama-stack.com"} + +PAGINATION = [ + { + "name": "datasets_iterrows", + "type": "offset", + "request": { + "dataset_id": {"type": "string"}, + "start_index": { + "type": "integer", + "x-stainless-pagination-property": {"purpose": "offset_count_param"}, + }, + "limit": {"type": "integer"}, + }, + "response": { + "data": {"type": "array", "items": {"type": "object"}}, + "next_index": { + "type": "integer", + "x-stainless-pagination-property": {"purpose": "offset_count_start_field"}, + }, + }, + }, + { + "name": "openai_cursor_page", + "type": "cursor", + "request": { + "limit": {"type": "integer"}, + "after": { + "type": "string", + "x-stainless-pagination-property": {"purpose": "next_cursor_param"}, + }, + }, + "response": { + "data": {"type": "array", "items": {}}, + "has_more": {"type": "boolean"}, + "last_id": { + "type": "string", + "x-stainless-pagination-property": {"purpose": "next_cursor_field"}, + }, + }, + }, +] + +SETTINGS = { + "license": "MIT", + "unwrap_response_fields": ["data"], + "file_header": "Copyright (c) Meta Platforms, Inc. and affiliates.\n" + "All rights reserved.\n" + "\n" + "This source code is licensed under the terms described in the " + "LICENSE file in\n" + "the root directory of this source tree.\n", +} + +OPENAPI = { + "transformations": [ + { + "command": "mergeObject", + "reason": "Better return_type using enum", + "args": { + "target": ["$.components.schemas"], + "object": { + "ReturnType": { + "additionalProperties": False, + "properties": { + "type": { + "enum": [ + "string", + "number", + "boolean", + "array", + "object", + "json", + "union", + "chat_completion_input", + "completion_input", + "agent_turn_input", + ] + } + }, + "required": ["type"], + "type": "object", + } + }, + }, + }, + { + "command": "replaceProperties", + "reason": "Replace return type properties with better model (see above)", + "args": { + "filter": { + "only": [ + "$.components.schemas.ScoringFn.properties.return_type", + "$.components.schemas.RegisterScoringFunctionRequest.properties.return_type", + ] + }, + "value": {"$ref": "#/components/schemas/ReturnType"}, + }, + }, + { + "command": "oneOfToAnyOf", + "reason": "Prism (mock server) doesn't like one of our " + "requests as it technically matches multiple " + "variants", + }, + ] +} + +README = { + "example_requests": { + "default": { + "type": "request", + "endpoint": "post /v1/chat/completions", + "params": {}, + }, + "headline": {"type": "request", "endpoint": "get /v1/models", "params": {}}, + "pagination": { + "type": "request", + "endpoint": "post /v1/chat/completions", + "params": {}, + }, + } +} + +ALL_RESOURCES = { + "$shared": { + "models": { + "interleaved_content_item": "InterleavedContentItem", + "interleaved_content": "InterleavedContent", + "param_type": "ParamType", + "safety_violation": "SafetyViolation", + "sampling_params": "SamplingParams", + "scoring_result": "ScoringResult", + "system_message": "SystemMessage", + } + }, + "toolgroups": { + "models": { + "tool_group": "ToolGroup", + "list_tool_groups_response": "ListToolGroupsResponse", + }, + "methods": { + "register": "post /v1/toolgroups", + "get": "get /v1/toolgroups/{toolgroup_id}", + "list": "get /v1/toolgroups", + "unregister": "delete /v1/toolgroups/{toolgroup_id}", + }, + }, + "tools": { + "methods": { + "get": "get /v1/tools/{tool_name}", + "list": {"paginated": False, "endpoint": "get /v1/tools"}, + } + }, + "tool_runtime": { + "models": { + "tool_def": "ToolDef", + "tool_invocation_result": "ToolInvocationResult", + }, + "methods": { + "list_tools": { + "paginated": False, + "endpoint": "get /v1/tool-runtime/list-tools", + }, + "invoke_tool": "post /v1/tool-runtime/invoke", + }, + }, + "responses": { + "models": { + "response_object_stream": "OpenAIResponseObjectStream", + "response_object": "OpenAIResponseObject", + }, + "methods": { + "create": { + "type": "http", + "streaming": { + "stream_event_model": "responses.response_object_stream", + "param_discriminator": "stream", + }, + "endpoint": "post /v1/responses", + }, + "retrieve": "get /v1/responses/{response_id}", + "list": {"type": "http", "endpoint": "get /v1/responses"}, + "delete": { + "type": "http", + "endpoint": "delete /v1/responses/{response_id}", + }, + }, + "subresources": { + "input_items": { + "methods": { + "list": { + "type": "http", + "paginated": False, + "endpoint": "get /v1/responses/{response_id}/input_items", + } + } + } + }, + }, + "prompts": { + "models": {"prompt": "Prompt", "list_prompts_response": "ListPromptsResponse"}, + "methods": { + "create": "post /v1/prompts", + "list": {"paginated": False, "endpoint": "get /v1/prompts"}, + "retrieve": "get /v1/prompts/{prompt_id}", + "update": "post /v1/prompts/{prompt_id}", + "delete": "delete /v1/prompts/{prompt_id}", + "set_default_version": "post /v1/prompts/{prompt_id}/set-default-version", + }, + "subresources": { + "versions": { + "methods": { + "list": { + "paginated": False, + "endpoint": "get /v1/prompts/{prompt_id}/versions", + } + } + } + }, + }, + "conversations": { + "models": {"conversation_object": "Conversation"}, + "methods": { + "create": {"type": "http", "endpoint": "post /v1/conversations"}, + "retrieve": "get /v1/conversations/{conversation_id}", + "update": { + "type": "http", + "endpoint": "post /v1/conversations/{conversation_id}", + }, + "delete": { + "type": "http", + "endpoint": "delete /v1/conversations/{conversation_id}", + }, + }, + "subresources": { + "items": { + "methods": { + "get": { + "type": "http", + "endpoint": "get /v1/conversations/{conversation_id}/items/{item_id}", + }, + "list": { + "type": "http", + "endpoint": "get /v1/conversations/{conversation_id}/items", + }, + "create": { + "type": "http", + "endpoint": "post /v1/conversations/{conversation_id}/items", + }, + "delete": { + "type": "http", + "endpoint": "delete /v1/conversations/{conversation_id}/items/{item_id}", + }, + } + } + }, + }, + "inspect": { + "models": { + "healthInfo": "HealthInfo", + "providerInfo": "ProviderInfo", + "routeInfo": "RouteInfo", + "versionInfo": "VersionInfo", + }, + "methods": {"health": "get /v1/health", "version": "get /v1/version"}, + }, + "embeddings": { + "models": {"create_embeddings_response": "OpenAIEmbeddingsResponse"}, + "methods": {"create": "post /v1/embeddings"}, + }, + "chat": { + "models": {"chat_completion_chunk": "OpenAIChatCompletionChunk"}, + "subresources": { + "completions": { + "methods": { + "create": { + "type": "http", + "streaming": { + "stream_event_model": "chat.chat_completion_chunk", + "param_discriminator": "stream", + }, + "endpoint": "post /v1/chat/completions", + }, + "list": { + "type": "http", + "paginated": False, + "endpoint": "get /v1/chat/completions", + }, + "retrieve": { + "type": "http", + "endpoint": "get /v1/chat/completions/{completion_id}", + }, + } + } + }, + }, + "completions": { + "methods": { + "create": { + "type": "http", + "streaming": {"param_discriminator": "stream"}, + "endpoint": "post /v1/completions", + } + } + }, + "vector_io": { + "models": {"queryChunksResponse": "QueryChunksResponse"}, + "methods": { + "insert": "post /v1/vector-io/insert", + "query": "post /v1/vector-io/query", + }, + }, + "vector_stores": { + "models": { + "vector_store": "VectorStoreObject", + "list_vector_stores_response": "VectorStoreListResponse", + "vector_store_delete_response": "VectorStoreDeleteResponse", + "vector_store_search_response": "VectorStoreSearchResponsePage", + }, + "methods": { + "create": "post /v1/vector_stores", + "list": "get /v1/vector_stores", + "retrieve": "get /v1/vector_stores/{vector_store_id}", + "update": "post /v1/vector_stores/{vector_store_id}", + "delete": "delete /v1/vector_stores/{vector_store_id}", + "search": "post /v1/vector_stores/{vector_store_id}/search", + }, + "subresources": { + "files": { + "models": {"vector_store_file": "VectorStoreFileObject"}, + "methods": { + "list": "get /v1/vector_stores/{vector_store_id}/files", + "retrieve": "get /v1/vector_stores/{vector_store_id}/files/{file_id}", + "update": "post /v1/vector_stores/{vector_store_id}/files/{file_id}", + "delete": "delete /v1/vector_stores/{vector_store_id}/files/{file_id}", + "create": "post /v1/vector_stores/{vector_store_id}/files", + "content": "get /v1/vector_stores/{vector_store_id}/files/{file_id}/content", + }, + }, + "file_batches": { + "models": { + "vector_store_file_batches": "VectorStoreFileBatchObject", + "list_vector_store_files_in_batch_response": "VectorStoreFilesListInBatchResponse", + }, + "methods": { + "create": "post /v1/vector_stores/{vector_store_id}/file_batches", + "retrieve": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}", + "list_files": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files", + "cancel": "post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel", + }, + }, + }, + }, + "models": { + "models": { + "model": "OpenAIModel", + "list_models_response": "OpenAIListModelsResponse", + }, + "methods": { + "list": {"paginated": False, "endpoint": "get /v1/models"}, + "retrieve": "get /v1/models/{model_id}", + "register": "post /v1/models", + "unregister": "delete /v1/models/{model_id}", + }, + "subresources": {"openai": {"methods": {"list": {"paginated": False, "endpoint": "get /v1/models"}}}}, + }, + "providers": { + "models": {"list_providers_response": "ListProvidersResponse"}, + "methods": { + "list": {"paginated": False, "endpoint": "get /v1/providers"}, + "retrieve": "get /v1/providers/{provider_id}", + }, + }, + "routes": { + "models": {"list_routes_response": "ListRoutesResponse"}, + "methods": {"list": {"paginated": False, "endpoint": "get /v1/inspect/routes"}}, + }, + "moderations": { + "models": {"create_response": "ModerationObject"}, + "methods": {"create": "post /v1/moderations"}, + }, + "safety": { + "models": {"run_shield_response": "RunShieldResponse"}, + "methods": {"run_shield": "post /v1/safety/run-shield"}, + }, + "shields": { + "models": {"shield": "Shield", "list_shields_response": "ListShieldsResponse"}, + "methods": { + "retrieve": "get /v1/shields/{identifier}", + "list": {"paginated": False, "endpoint": "get /v1/shields"}, + "register": "post /v1/shields", + "delete": "delete /v1/shields/{identifier}", + }, + }, + "scoring": { + "methods": { + "score": "post /v1/scoring/score", + "score_batch": "post /v1/scoring/score-batch", + } + }, + "scoring_functions": { + "models": { + "scoring_fn": "ScoringFn", + "scoring_fn_params": "ScoringFnParams", + "list_scoring_functions_response": "ListScoringFunctionsResponse", + }, + "methods": { + "retrieve": "get /v1/scoring-functions/{scoring_fn_id}", + "list": {"paginated": False, "endpoint": "get /v1/scoring-functions"}, + "register": "post /v1/scoring-functions", + "unregister": "delete /v1/scoring-functions/{scoring_fn_id}", + }, + }, + "files": { + "models": { + "file": "OpenAIFileObject", + "list_files_response": "ListOpenAIFileResponse", + "delete_file_response": "OpenAIFileDeleteResponse", + }, + "methods": { + "create": "post /v1/files", + "list": "get /v1/files", + "retrieve": "get /v1/files/{file_id}", + "delete": "delete /v1/files/{file_id}", + "content": "get /v1/files/{file_id}/content", + }, + }, + "batches": { + "methods": { + "create": "post /v1/batches", + "list": "get /v1/batches", + "retrieve": "get /v1/batches/{batch_id}", + "cancel": "post /v1/batches/{batch_id}/cancel", + } + }, + "alpha": { + "subresources": { + "inference": {"methods": {"rerank": "post /v1alpha/inference/rerank"}}, + "post_training": { + "models": { + "algorithm_config": "AlgorithmConfig", + "post_training_job": "PostTrainingJob", + "list_post_training_jobs_response": "ListPostTrainingJobsResponse", + }, + "methods": { + "preference_optimize": "post /v1alpha/post-training/preference-optimize", + "supervised_fine_tune": "post /v1alpha/post-training/supervised-fine-tune", + }, + "subresources": { + "job": { + "methods": { + "artifacts": "get /v1alpha/post-training/job/artifacts", + "cancel": "post /v1alpha/post-training/job/cancel", + "status": "get /v1alpha/post-training/job/status", + "list": { + "paginated": False, + "endpoint": "get /v1alpha/post-training/jobs", + }, + } + } + }, + }, + "benchmarks": { + "models": { + "benchmark": "Benchmark", + "list_benchmarks_response": "ListBenchmarksResponse", + }, + "methods": { + "retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}", + "list": { + "paginated": False, + "endpoint": "get /v1alpha/eval/benchmarks", + }, + "register": "post /v1alpha/eval/benchmarks", + "unregister": "delete /v1alpha/eval/benchmarks/{benchmark_id}", + }, + }, + "eval": { + "models": { + "evaluate_response": "EvaluateResponse", + "benchmark_config": "BenchmarkConfig", + "job": "Job", + }, + "methods": { + "evaluate_rows": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations", + "run_eval": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs", + "evaluate_rows_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations", + "run_eval_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs", + }, + "subresources": { + "jobs": { + "methods": { + "cancel": "delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", + "status": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}", + "retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result", + } + } + }, + }, + } + }, + "beta": { + "subresources": { + "datasets": { + "models": {"list_datasets_response": "ListDatasetsResponse"}, + "methods": { + "register": "post /v1beta/datasets", + "retrieve": "get /v1beta/datasets/{dataset_id}", + "list": {"paginated": False, "endpoint": "get /v1beta/datasets"}, + "unregister": "delete /v1beta/datasets/{dataset_id}", + "iterrows": "get /v1beta/datasetio/iterrows/{dataset_id}", + "appendrows": "post /v1beta/datasetio/append-rows/{dataset_id}", + }, + } + } + }, +} + + +@dataclass +class Endpoint: + method: str + path: str + extra: dict[str, Any] = field(default_factory=dict) + + @classmethod + def from_config(cls, value: Any) -> Endpoint: + if isinstance(value, str): + method, _, path = value.partition(" ") + return cls(method, path) + if isinstance(value, dict) and "endpoint" in value: + method, _, path = value["endpoint"].partition(" ") + extra = {k: v for k, v in value.items() if k != "endpoint"} + return cls(method, path, extra) + raise ValueError(f"Unsupported endpoint value: {value!r}") + + def to_config(self) -> Any: + if not self.extra: + return f"{self.method} {self.path}" + data = dict(self.extra) + data["endpoint"] = f"{self.method} {self.path}" + return data + + def route_key(self) -> str: + return f"{self.method.lower()} {self.path}" + + +@dataclass +class Resource: + models: dict[str, str] | None = None + methods: dict[str, Endpoint] = field(default_factory=dict) + subresources: dict[str, Resource] = field(default_factory=dict) + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> Resource: + models = data.get("models") + methods = {name: Endpoint.from_config(value) for name, value in data.get("methods", {}).items()} + subresources = {name: cls.from_dict(value) for name, value in data.get("subresources", {}).items()} + return cls(models=models, methods=methods, subresources=subresources) + + def to_config(self) -> dict[str, Any]: + result: dict[str, Any] = {} + if self.models: + result["models"] = self.models + if self.methods: + result["methods"] = {name: endpoint.to_config() for name, endpoint in self.methods.items()} + if self.subresources: + result["subresources"] = {name: resource.to_config() for name, resource in self.subresources.items()} + return result + + def collect_endpoint_paths(self) -> set[str]: + paths = {endpoint.route_key() for endpoint in self.methods.values()} + for subresource in self.subresources.values(): + paths.update(subresource.collect_endpoint_paths()) + return paths + + +_RESOURCES = {name: Resource.from_dict(data) for name, data in ALL_RESOURCES.items()} + + +def _load_openapi_paths(openapi_path: Path) -> set[str]: + spec = yaml.safe_load(openapi_path.read_text()) or {} + paths: set[str] = set() + for path, path_item in (spec.get("paths") or {}).items(): + if not isinstance(path_item, dict): + continue + for method, operation in path_item.items(): + if not isinstance(operation, dict): + continue + paths.add(f"{str(method).lower()} {path}") + return paths + + +@dataclass(frozen=True) +class StainlessConfig: + organization: dict[str, Any] + security: list[Any] + security_schemes: dict[str, Any] + targets: dict[str, Any] + client_settings: dict[str, Any] + environments: dict[str, Any] + pagination: list[dict[str, Any]] + settings: dict[str, Any] + openapi: dict[str, Any] + readme: dict[str, Any] + resources: dict[str, Resource] + + @classmethod + def make(cls) -> StainlessConfig: + return cls( + organization=ORGANIZATION, + security=SECURITY, + security_schemes=SECURITY_SCHEMES, + targets=TARGETS, + client_settings=CLIENT_SETTINGS, + environments=ENVIRONMENTS, + pagination=PAGINATION, + settings=SETTINGS, + openapi=OPENAPI, + readme=README, + resources=dict(_RESOURCES), + ) + + def referenced_paths(self) -> set[str]: + paths: set[str] = set() + for resource in self.resources.values(): + paths.update(resource.collect_endpoint_paths()) + return paths + + def to_dict(self) -> dict[str, Any]: + cfg: dict[str, Any] = {} + for section in SECTION_ORDER: + if section == "resources": + cfg[section] = {name: resource.to_config() for name, resource in self.resources.items()} + continue + cfg[section] = getattr(self, section) + return cfg + + def validate_against_openapi(self, openapi_path: Path) -> None: + if not openapi_path.exists(): + raise FileNotFoundError(f"OpenAPI spec not found at {openapi_path}") + spec_paths = _load_openapi_paths(openapi_path) + config_paths = self.referenced_paths() + missing = sorted(path for path in config_paths if path not in spec_paths) + if missing: + formatted = "\n".join(f" - {path}" for path in missing) + raise ValueError("Stainless config references missing endpoints:\n" + formatted) + + +def build_config() -> dict[str, Any]: + return StainlessConfig.make().to_dict() + + +def write_config(repo_root: Path, openapi_path: Path | None = None) -> Path: + stainless_config = StainlessConfig.make() + spec_path = (openapi_path or (repo_root / "client-sdks" / "stainless" / "openapi.yml")).resolve() + stainless_config.validate_against_openapi(spec_path) + yaml_text = yaml.safe_dump(stainless_config.to_dict(), sort_keys=False) + output = repo_root / "client-sdks" / "stainless" / "config.yml" + output.write_text(HEADER + yaml_text) + return output + + +def main() -> None: + parser = argparse.ArgumentParser(description="Generate stainless config and validate it against OpenAPI spec.") + parser.add_argument( + "--openapi", + type=Path, + default=None, + help="Path to OpenAPI spec used for validation (defaults to client-sdks/stainless/openapi.yml).", + ) + args = parser.parse_args() + repo_root = Path(__file__).resolve().parents[3] + output = write_config(repo_root, args.openapi) + print(f"Wrote Stainless config: {output}") + + +if __name__ == "__main__": + main() diff --git a/scripts/run_openapi_generator.sh b/scripts/run_openapi_generator.sh index 946b2886f..d4e3b2ec7 100755 --- a/scripts/run_openapi_generator.sh +++ b/scripts/run_openapi_generator.sh @@ -17,3 +17,5 @@ PYTHONPATH=$PYTHONPATH:$stack_dir \ python3 -m scripts.openapi_generator "$stack_dir"/docs/static cp "$stack_dir"/docs/static/stainless-llama-stack-spec.yaml "$stack_dir"/client-sdks/stainless/openapi.yml +PYTHONPATH=$PYTHONPATH:$stack_dir \ + python3 -m scripts.openapi_generator.stainless_config.generate_config