mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
Merge branch 'main' into routeur
This commit is contained in:
commit
3770963130
255 changed files with 18366 additions and 1909 deletions
|
|
@ -287,9 +287,9 @@ start_container() {
|
|||
# On macOS/Windows, use host.docker.internal to reach host from container
|
||||
# On Linux with --network host, use localhost
|
||||
if [[ "$(uname)" == "Darwin" ]] || [[ "$(uname)" == *"MINGW"* ]]; then
|
||||
OLLAMA_URL="${OLLAMA_URL:-http://host.docker.internal:11434}"
|
||||
OLLAMA_URL="${OLLAMA_URL:-http://host.docker.internal:11434/v1}"
|
||||
else
|
||||
OLLAMA_URL="${OLLAMA_URL:-http://localhost:11434}"
|
||||
OLLAMA_URL="${OLLAMA_URL:-http://localhost:11434/v1}"
|
||||
fi
|
||||
DOCKER_ENV_VARS="$DOCKER_ENV_VARS -e OLLAMA_URL=$OLLAMA_URL"
|
||||
|
||||
|
|
|
|||
|
|
@ -16,16 +16,16 @@ import sys
|
|||
from tests.integration.suites import SETUP_DEFINITIONS, SUITE_DEFINITIONS
|
||||
|
||||
|
||||
def get_setup_env_vars(setup_name, suite_name=None):
|
||||
def get_setup_config(setup_name, suite_name=None):
|
||||
"""
|
||||
Get environment variables for a setup, with optional suite default fallback.
|
||||
Get full configuration (env vars + defaults) for a setup.
|
||||
|
||||
Args:
|
||||
setup_name: Name of the setup (e.g., 'ollama', 'gpt')
|
||||
suite_name: Optional suite name to get default setup if setup_name is None
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables
|
||||
Dictionary with 'env' and 'defaults' keys
|
||||
"""
|
||||
# If no setup specified, try to get default from suite
|
||||
if not setup_name and suite_name:
|
||||
|
|
@ -34,7 +34,7 @@ def get_setup_env_vars(setup_name, suite_name=None):
|
|||
setup_name = suite.default_setup
|
||||
|
||||
if not setup_name:
|
||||
return {}
|
||||
return {"env": {}, "defaults": {}}
|
||||
|
||||
setup = SETUP_DEFINITIONS.get(setup_name)
|
||||
if not setup:
|
||||
|
|
@ -44,27 +44,31 @@ def get_setup_env_vars(setup_name, suite_name=None):
|
|||
)
|
||||
sys.exit(1)
|
||||
|
||||
return setup.env
|
||||
return {"env": setup.env, "defaults": setup.defaults}
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Extract environment variables from a test setup")
|
||||
parser = argparse.ArgumentParser(description="Extract environment variables and defaults from a test setup")
|
||||
parser.add_argument("--setup", help="Setup name (e.g., ollama, gpt)")
|
||||
parser.add_argument("--suite", help="Suite name to get default setup from if --setup not provided")
|
||||
parser.add_argument("--format", choices=["bash", "json"], default="bash", help="Output format (default: bash)")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
env_vars = get_setup_env_vars(args.setup, args.suite)
|
||||
config = get_setup_config(args.setup, args.suite)
|
||||
|
||||
if args.format == "bash":
|
||||
# Output as bash export statements
|
||||
for key, value in env_vars.items():
|
||||
# Output env vars as bash export statements
|
||||
for key, value in config["env"].items():
|
||||
print(f"export {key}='{value}'")
|
||||
# Output defaults as bash export statements with LLAMA_STACK_TEST_ prefix
|
||||
for key, value in config["defaults"].items():
|
||||
env_key = f"LLAMA_STACK_TEST_{key.upper()}"
|
||||
print(f"export {env_key}='{value}'")
|
||||
elif args.format == "json":
|
||||
import json
|
||||
|
||||
print(json.dumps(env_vars))
|
||||
print(json.dumps(config))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -640,7 +640,7 @@ cmd=( run -d "${PLATFORM_OPTS[@]}" --name llama-stack \
|
|||
--network llama-net \
|
||||
-p "${PORT}:${PORT}" \
|
||||
"${server_env_opts[@]}" \
|
||||
-e OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}" \
|
||||
-e OLLAMA_URL="http://ollama-server:${OLLAMA_PORT}/v1" \
|
||||
"${SERVER_IMAGE}" --port "${PORT}")
|
||||
|
||||
log "🦙 Starting Llama Stack..."
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ TEST_PATTERN=""
|
|||
INFERENCE_MODE="replay"
|
||||
EXTRA_PARAMS=""
|
||||
COLLECT_ONLY=false
|
||||
TYPESCRIPT_ONLY=false
|
||||
|
||||
# Function to display usage
|
||||
usage() {
|
||||
|
|
@ -34,6 +35,7 @@ Options:
|
|||
--subdirs STRING Comma-separated list of test subdirectories to run (overrides suite)
|
||||
--pattern STRING Regex pattern to pass to pytest -k
|
||||
--collect-only Collect tests only without running them (skips server startup)
|
||||
--typescript-only Skip Python tests and run only TypeScript client tests
|
||||
--help Show this help message
|
||||
|
||||
Suites are defined in tests/integration/suites.py and define which tests to run.
|
||||
|
|
@ -90,6 +92,10 @@ while [[ $# -gt 0 ]]; do
|
|||
COLLECT_ONLY=true
|
||||
shift
|
||||
;;
|
||||
--typescript-only)
|
||||
TYPESCRIPT_ONLY=true
|
||||
shift
|
||||
;;
|
||||
--help)
|
||||
usage
|
||||
exit 0
|
||||
|
|
@ -181,6 +187,10 @@ echo "$SETUP_ENV"
|
|||
eval "$SETUP_ENV"
|
||||
echo ""
|
||||
|
||||
# Export suite and setup names for TypeScript tests
|
||||
export LLAMA_STACK_TEST_SUITE="$TEST_SUITE"
|
||||
export LLAMA_STACK_TEST_SETUP="$TEST_SETUP"
|
||||
|
||||
ROOT_DIR="$THIS_DIR/.."
|
||||
cd $ROOT_DIR
|
||||
|
||||
|
|
@ -212,6 +222,71 @@ find_available_port() {
|
|||
return 1
|
||||
}
|
||||
|
||||
run_client_ts_tests() {
|
||||
if ! command -v npm &>/dev/null; then
|
||||
echo "npm could not be found; ensure Node.js is installed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
pushd tests/integration/client-typescript >/dev/null
|
||||
|
||||
# Determine if TS_CLIENT_PATH is a directory path or an npm version
|
||||
if [[ -d "$TS_CLIENT_PATH" ]]; then
|
||||
# It's a directory path - use local checkout
|
||||
if [[ ! -f "$TS_CLIENT_PATH/package.json" ]]; then
|
||||
echo "Error: $TS_CLIENT_PATH exists but doesn't look like llama-stack-client-typescript (no package.json)"
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
echo "Using local llama-stack-client-typescript from: $TS_CLIENT_PATH"
|
||||
|
||||
# Build the TypeScript client first
|
||||
echo "Building TypeScript client..."
|
||||
pushd "$TS_CLIENT_PATH" >/dev/null
|
||||
npm install --silent
|
||||
npm run build --silent
|
||||
popd >/dev/null
|
||||
|
||||
# Install other dependencies first
|
||||
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
|
||||
npm ci --silent
|
||||
else
|
||||
npm install --silent
|
||||
fi
|
||||
|
||||
# Then install the client from local directory
|
||||
echo "Installing llama-stack-client from: $TS_CLIENT_PATH"
|
||||
npm install "$TS_CLIENT_PATH" --silent
|
||||
else
|
||||
# It's an npm version specifier - install from npm
|
||||
echo "Installing llama-stack-client@${TS_CLIENT_PATH} from npm"
|
||||
if [[ "${CI:-}" == "true" || "${CI:-}" == "1" ]]; then
|
||||
npm ci --silent
|
||||
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
|
||||
else
|
||||
npm install "llama-stack-client@${TS_CLIENT_PATH}" --silent
|
||||
fi
|
||||
fi
|
||||
|
||||
# Verify installation
|
||||
echo "Verifying llama-stack-client installation..."
|
||||
if npm list llama-stack-client 2>/dev/null | grep -q llama-stack-client; then
|
||||
echo "✅ llama-stack-client successfully installed"
|
||||
npm list llama-stack-client
|
||||
else
|
||||
echo "❌ llama-stack-client not found in node_modules"
|
||||
echo "Installed packages:"
|
||||
npm list --depth=0
|
||||
popd >/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Running TypeScript tests for suite $TEST_SUITE (setup $TEST_SETUP)"
|
||||
npm test
|
||||
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
# Start Llama Stack Server if needed
|
||||
if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then
|
||||
# Find an available port for the server
|
||||
|
|
@ -221,6 +296,7 @@ if [[ "$STACK_CONFIG" == *"server:"* && "$COLLECT_ONLY" == false ]]; then
|
|||
exit 1
|
||||
fi
|
||||
export LLAMA_STACK_PORT
|
||||
export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT"
|
||||
echo "Will use port: $LLAMA_STACK_PORT"
|
||||
|
||||
stop_server() {
|
||||
|
|
@ -298,6 +374,7 @@ if [[ "$STACK_CONFIG" == *"docker:"* && "$COLLECT_ONLY" == false ]]; then
|
|||
exit 1
|
||||
fi
|
||||
export LLAMA_STACK_PORT
|
||||
export TEST_API_BASE_URL="http://localhost:$LLAMA_STACK_PORT"
|
||||
echo "Will use port: $LLAMA_STACK_PORT"
|
||||
|
||||
echo "=== Building Docker Image for distribution: $DISTRO ==="
|
||||
|
|
@ -473,16 +550,23 @@ if [[ -n "$STACK_CONFIG" ]]; then
|
|||
STACK_CONFIG_ARG="--stack-config=$STACK_CONFIG"
|
||||
fi
|
||||
|
||||
pytest -s -v $PYTEST_TARGET \
|
||||
$STACK_CONFIG_ARG \
|
||||
--inference-mode="$INFERENCE_MODE" \
|
||||
-k "$PYTEST_PATTERN" \
|
||||
$EXTRA_PARAMS \
|
||||
--color=yes \
|
||||
--embedding-model=sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \
|
||||
--color=yes $EXTRA_PARAMS \
|
||||
--capture=tee-sys
|
||||
exit_code=$?
|
||||
# Run Python tests unless typescript-only mode
|
||||
if [[ "$TYPESCRIPT_ONLY" == "false" ]]; then
|
||||
pytest -s -v $PYTEST_TARGET \
|
||||
$STACK_CONFIG_ARG \
|
||||
--inference-mode="$INFERENCE_MODE" \
|
||||
-k "$PYTEST_PATTERN" \
|
||||
$EXTRA_PARAMS \
|
||||
--color=yes \
|
||||
--embedding-model=sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \
|
||||
--color=yes $EXTRA_PARAMS \
|
||||
--capture=tee-sys
|
||||
exit_code=$?
|
||||
else
|
||||
echo "Skipping Python tests (--typescript-only mode)"
|
||||
exit_code=0
|
||||
fi
|
||||
|
||||
set +x
|
||||
set -e
|
||||
|
||||
|
|
@ -506,5 +590,10 @@ else
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Run TypeScript client tests if TS_CLIENT_PATH is set
|
||||
if [[ $exit_code -eq 0 && -n "${TS_CLIENT_PATH:-}" && "${LLAMA_STACK_TEST_STACK_CONFIG_TYPE:-}" == "server" ]]; then
|
||||
run_client_ts_tests
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Integration Tests Complete ==="
|
||||
|
|
|
|||
|
|
@ -11,6 +11,13 @@ This module provides functionality to generate OpenAPI specifications
|
|||
from FastAPI applications.
|
||||
"""
|
||||
|
||||
from .main import generate_openapi_spec, main
|
||||
|
||||
__all__ = ["generate_openapi_spec", "main"]
|
||||
|
||||
|
||||
def __getattr__(name: str):
|
||||
if name in {"generate_openapi_spec", "main"}:
|
||||
from .main import generate_openapi_spec as _gos
|
||||
from .main import main as _main
|
||||
|
||||
return {"generate_openapi_spec": _gos, "main": _main}[name]
|
||||
raise AttributeError(name)
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import typing
|
|||
from typing import Annotated, Any, get_args, get_origin
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.params import Body as FastAPIBody
|
||||
from pydantic import Field, create_model
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
|
|
@ -26,6 +27,8 @@ from .state import _extra_body_fields, register_dynamic_model
|
|||
|
||||
logger = get_logger(name=__name__, category="core")
|
||||
|
||||
type QueryParameter = tuple[str, type, Any, bool]
|
||||
|
||||
|
||||
def _to_pascal_case(segment: str) -> str:
|
||||
tokens = re.findall(r"[A-Za-z]+|\d+", segment)
|
||||
|
|
@ -75,12 +78,12 @@ def _create_endpoint_with_request_model(
|
|||
return endpoint
|
||||
|
||||
|
||||
def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_any: bool = False) -> dict[str, tuple]:
|
||||
def _build_field_definitions(query_parameters: list[QueryParameter], use_any: bool = False) -> dict[str, tuple]:
|
||||
"""Build field definitions for a Pydantic model from query parameters."""
|
||||
from typing import Any
|
||||
|
||||
field_definitions = {}
|
||||
for param_name, param_type, default_value in query_parameters:
|
||||
for param_name, param_type, default_value, _ in query_parameters:
|
||||
if use_any:
|
||||
field_definitions[param_name] = (Any, ... if default_value is inspect.Parameter.empty else default_value)
|
||||
continue
|
||||
|
|
@ -108,10 +111,10 @@ def _build_field_definitions(query_parameters: list[tuple[str, type, Any]], use_
|
|||
field_definitions[param_name] = (Any, ... if default_value is inspect.Parameter.empty else default_value)
|
||||
|
||||
# Ensure all parameters are included
|
||||
expected_params = {name for name, _, _ in query_parameters}
|
||||
expected_params = {name for name, _, _, _ in query_parameters}
|
||||
missing = expected_params - set(field_definitions.keys())
|
||||
if missing:
|
||||
for param_name, _, default_value in query_parameters:
|
||||
for param_name, _, default_value, _ in query_parameters:
|
||||
if param_name in missing:
|
||||
field_definitions[param_name] = (
|
||||
Any,
|
||||
|
|
@ -126,7 +129,7 @@ def _create_dynamic_request_model(
|
|||
webmethod,
|
||||
method_name: str,
|
||||
http_method: str,
|
||||
query_parameters: list[tuple[str, type, Any]],
|
||||
query_parameters: list[QueryParameter],
|
||||
use_any: bool = False,
|
||||
variant_suffix: str | None = None,
|
||||
) -> type | None:
|
||||
|
|
@ -143,12 +146,12 @@ def _create_dynamic_request_model(
|
|||
|
||||
|
||||
def _build_signature_params(
|
||||
query_parameters: list[tuple[str, type, Any]],
|
||||
query_parameters: list[QueryParameter],
|
||||
) -> tuple[list[inspect.Parameter], dict[str, type]]:
|
||||
"""Build signature parameters and annotations from query parameters."""
|
||||
signature_params = []
|
||||
param_annotations = {}
|
||||
for param_name, param_type, default_value in query_parameters:
|
||||
for param_name, param_type, default_value, _ in query_parameters:
|
||||
param_annotations[param_name] = param_type
|
||||
signature_params.append(
|
||||
inspect.Parameter(
|
||||
|
|
@ -219,6 +222,19 @@ def _is_extra_body_field(metadata_item: Any) -> bool:
|
|||
return isinstance(metadata_item, ExtraBodyField)
|
||||
|
||||
|
||||
def _should_embed_parameter(param_type: Any) -> bool:
|
||||
"""Determine whether a parameter should be embedded (wrapped) in the request body."""
|
||||
if get_origin(param_type) is Annotated:
|
||||
args = get_args(param_type)
|
||||
metadata = args[1:] if len(args) > 1 else []
|
||||
for metadata_item in metadata:
|
||||
if isinstance(metadata_item, FastAPIBody):
|
||||
# FastAPI treats embed=None as False, so default to False when unset.
|
||||
return bool(metadata_item.embed)
|
||||
# Unannotated parameters default to embed=True through create_dynamic_typed_route.
|
||||
return True
|
||||
|
||||
|
||||
def _is_async_iterator_type(type_obj: Any) -> bool:
|
||||
"""Check if a type is AsyncIterator or AsyncIterable."""
|
||||
from collections.abc import AsyncIterable, AsyncIterator
|
||||
|
|
@ -282,7 +298,7 @@ def _find_models_for_endpoint(
|
|||
|
||||
Returns:
|
||||
tuple: (request_model, response_model, query_parameters, file_form_params, streaming_response_model, response_schema_name)
|
||||
where query_parameters is a list of (name, type, default_value) tuples
|
||||
where query_parameters is a list of (name, type, default_value, should_embed) tuples
|
||||
and file_form_params is a list of inspect.Parameter objects for File()/Form() params
|
||||
and streaming_response_model is the model for streaming responses (AsyncIterator content)
|
||||
"""
|
||||
|
|
@ -299,7 +315,7 @@ def _find_models_for_endpoint(
|
|||
|
||||
# Find request model and collect all body parameters
|
||||
request_model = None
|
||||
query_parameters = []
|
||||
query_parameters: list[QueryParameter] = []
|
||||
file_form_params = []
|
||||
path_params = set()
|
||||
extra_body_params = []
|
||||
|
|
@ -325,6 +341,7 @@ def _find_models_for_endpoint(
|
|||
|
||||
# Check if it's a File() or Form() parameter - these need special handling
|
||||
param_type = param.annotation
|
||||
param_should_embed = _should_embed_parameter(param_type)
|
||||
if _is_file_or_form_param(param_type):
|
||||
# File() and Form() parameters must be in the function signature directly
|
||||
# They cannot be part of a Pydantic model
|
||||
|
|
@ -350,30 +367,14 @@ def _find_models_for_endpoint(
|
|||
# Store as extra body parameter - exclude from request model
|
||||
extra_body_params.append((param_name, base_type, extra_body_description))
|
||||
continue
|
||||
param_type = base_type
|
||||
|
||||
# Check if it's a Pydantic model (for POST/PUT requests)
|
||||
if hasattr(param_type, "model_json_schema"):
|
||||
# Collect all body parameters including Pydantic models
|
||||
# We'll decide later whether to use a single model or create a combined one
|
||||
query_parameters.append((param_name, param_type, param.default))
|
||||
elif get_origin(param_type) is Annotated:
|
||||
# Handle Annotated types - get the base type
|
||||
args = get_args(param_type)
|
||||
if args and hasattr(args[0], "model_json_schema"):
|
||||
# Collect Pydantic models from Annotated types
|
||||
query_parameters.append((param_name, args[0], param.default))
|
||||
else:
|
||||
# Regular annotated parameter (but not File/Form, already handled above)
|
||||
query_parameters.append((param_name, param_type, param.default))
|
||||
query_parameters.append((param_name, param_type, param.default, param_should_embed))
|
||||
else:
|
||||
# This is likely a body parameter for POST/PUT or query parameter for GET
|
||||
# Store the parameter info for later use
|
||||
# Preserve inspect.Parameter.empty to distinguish "no default" from "default=None"
|
||||
default_value = param.default
|
||||
|
||||
# Extract the base type from union types (e.g., str | None -> str)
|
||||
# Also make it safe for FastAPI to avoid forward reference issues
|
||||
query_parameters.append((param_name, param_type, default_value))
|
||||
# Regular annotated parameter (but not File/Form, already handled above)
|
||||
query_parameters.append((param_name, param_type, param.default, param_should_embed))
|
||||
|
||||
# Store extra body fields for later use in post-processing
|
||||
# We'll store them when the endpoint is created, as we need the full path
|
||||
|
|
@ -385,8 +386,8 @@ def _find_models_for_endpoint(
|
|||
# Otherwise, we'll create a combined request model from all parameters
|
||||
# BUT: For GET requests, never create a request body - all parameters should be query parameters
|
||||
if is_post_put and len(query_parameters) == 1:
|
||||
param_name, param_type, default_value = query_parameters[0]
|
||||
if hasattr(param_type, "model_json_schema"):
|
||||
param_name, param_type, default_value, should_embed = query_parameters[0]
|
||||
if hasattr(param_type, "model_json_schema") and not should_embed:
|
||||
request_model = param_type
|
||||
query_parameters = [] # Clear query_parameters so we use the single model
|
||||
|
||||
|
|
@ -495,7 +496,7 @@ def _create_fastapi_endpoint(app: FastAPI, route, webmethod, api: Api):
|
|||
if file_form_params and is_post_put:
|
||||
signature_params = list(file_form_params)
|
||||
param_annotations = {param.name: param.annotation for param in file_form_params}
|
||||
for param_name, param_type, default_value in query_parameters:
|
||||
for param_name, param_type, default_value, _ in query_parameters:
|
||||
signature_params.append(
|
||||
inspect.Parameter(
|
||||
param_name,
|
||||
|
|
|
|||
7
scripts/openapi_generator/stainless_config/__init__.py
Normal file
7
scripts/openapi_generator/stainless_config/__init__.py
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
# Package marker for Stainless config generation.
|
||||
821
scripts/openapi_generator/stainless_config/generate_config.py
Normal file
821
scripts/openapi_generator/stainless_config/generate_config.py
Normal file
|
|
@ -0,0 +1,821 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
|
||||
HEADER = "# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json\n\n"
|
||||
|
||||
SECTION_ORDER = [
|
||||
"organization",
|
||||
"security",
|
||||
"security_schemes",
|
||||
"targets",
|
||||
"client_settings",
|
||||
"environments",
|
||||
"pagination",
|
||||
"settings",
|
||||
"openapi",
|
||||
"readme",
|
||||
"resources",
|
||||
]
|
||||
|
||||
ORGANIZATION = {
|
||||
"name": "llama-stack-client",
|
||||
"docs": "https://llama-stack.readthedocs.io/en/latest/",
|
||||
"contact": "llamastack@meta.com",
|
||||
}
|
||||
|
||||
SECURITY = [{}, {"BearerAuth": []}]
|
||||
|
||||
SECURITY_SCHEMES = {"BearerAuth": {"type": "http", "scheme": "bearer"}}
|
||||
|
||||
TARGETS = {
|
||||
"node": {
|
||||
"package_name": "llama-stack-client",
|
||||
"production_repo": "llamastack/llama-stack-client-typescript",
|
||||
"publish": {"npm": False},
|
||||
},
|
||||
"python": {
|
||||
"package_name": "llama_stack_client",
|
||||
"production_repo": "llamastack/llama-stack-client-python",
|
||||
"options": {"use_uv": True},
|
||||
"publish": {"pypi": True},
|
||||
"project_name": "llama_stack_client",
|
||||
},
|
||||
"kotlin": {
|
||||
"reverse_domain": "com.llama_stack_client.api",
|
||||
"production_repo": None,
|
||||
"publish": {"maven": False},
|
||||
},
|
||||
"go": {
|
||||
"package_name": "llama-stack-client",
|
||||
"production_repo": "llamastack/llama-stack-client-go",
|
||||
"options": {"enable_v2": True, "back_compat_use_shared_package": False},
|
||||
},
|
||||
}
|
||||
|
||||
CLIENT_SETTINGS = {
|
||||
"default_env_prefix": "LLAMA_STACK_CLIENT",
|
||||
"opts": {
|
||||
"api_key": {
|
||||
"type": "string",
|
||||
"read_env": "LLAMA_STACK_CLIENT_API_KEY",
|
||||
"auth": {"security_scheme": "BearerAuth"},
|
||||
"nullable": True,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
ENVIRONMENTS = {"production": "http://any-hosted-llama-stack.com"}
|
||||
|
||||
PAGINATION = [
|
||||
{
|
||||
"name": "datasets_iterrows",
|
||||
"type": "offset",
|
||||
"request": {
|
||||
"dataset_id": {"type": "string"},
|
||||
"start_index": {
|
||||
"type": "integer",
|
||||
"x-stainless-pagination-property": {"purpose": "offset_count_param"},
|
||||
},
|
||||
"limit": {"type": "integer"},
|
||||
},
|
||||
"response": {
|
||||
"data": {"type": "array", "items": {"type": "object"}},
|
||||
"next_index": {
|
||||
"type": "integer",
|
||||
"x-stainless-pagination-property": {"purpose": "offset_count_start_field"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "openai_cursor_page",
|
||||
"type": "cursor",
|
||||
"request": {
|
||||
"limit": {"type": "integer"},
|
||||
"after": {
|
||||
"type": "string",
|
||||
"x-stainless-pagination-property": {"purpose": "next_cursor_param"},
|
||||
},
|
||||
},
|
||||
"response": {
|
||||
"data": {"type": "array", "items": {}},
|
||||
"has_more": {"type": "boolean"},
|
||||
"last_id": {
|
||||
"type": "string",
|
||||
"x-stainless-pagination-property": {"purpose": "next_cursor_field"},
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
SETTINGS = {
|
||||
"license": "MIT",
|
||||
"unwrap_response_fields": ["data"],
|
||||
"file_header": "Copyright (c) Meta Platforms, Inc. and affiliates.\n"
|
||||
"All rights reserved.\n"
|
||||
"\n"
|
||||
"This source code is licensed under the terms described in the "
|
||||
"LICENSE file in\n"
|
||||
"the root directory of this source tree.\n",
|
||||
}
|
||||
|
||||
OPENAPI = {
|
||||
"transformations": [
|
||||
{
|
||||
"command": "mergeObject",
|
||||
"reason": "Better return_type using enum",
|
||||
"args": {
|
||||
"target": ["$.components.schemas"],
|
||||
"object": {
|
||||
"ReturnType": {
|
||||
"additionalProperties": False,
|
||||
"properties": {
|
||||
"type": {
|
||||
"enum": [
|
||||
"string",
|
||||
"number",
|
||||
"boolean",
|
||||
"array",
|
||||
"object",
|
||||
"json",
|
||||
"union",
|
||||
"chat_completion_input",
|
||||
"completion_input",
|
||||
"agent_turn_input",
|
||||
]
|
||||
}
|
||||
},
|
||||
"required": ["type"],
|
||||
"type": "object",
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"command": "replaceProperties",
|
||||
"reason": "Replace return type properties with better model (see above)",
|
||||
"args": {
|
||||
"filter": {
|
||||
"only": [
|
||||
"$.components.schemas.ScoringFn.properties.return_type",
|
||||
"$.components.schemas.RegisterScoringFunctionRequest.properties.return_type",
|
||||
]
|
||||
},
|
||||
"value": {"$ref": "#/components/schemas/ReturnType"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"command": "oneOfToAnyOf",
|
||||
"reason": "Prism (mock server) doesn't like one of our "
|
||||
"requests as it technically matches multiple "
|
||||
"variants",
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
README = {
|
||||
"example_requests": {
|
||||
"default": {
|
||||
"type": "request",
|
||||
"endpoint": "post /v1/chat/completions",
|
||||
"params": {},
|
||||
},
|
||||
"headline": {"type": "request", "endpoint": "get /v1/models", "params": {}},
|
||||
"pagination": {
|
||||
"type": "request",
|
||||
"endpoint": "post /v1/chat/completions",
|
||||
"params": {},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
ALL_RESOURCES = {
|
||||
"$shared": {
|
||||
"models": {
|
||||
"interleaved_content_item": "InterleavedContentItem",
|
||||
"interleaved_content": "InterleavedContent",
|
||||
"param_type": "ParamType",
|
||||
"safety_violation": "SafetyViolation",
|
||||
"sampling_params": "SamplingParams",
|
||||
"scoring_result": "ScoringResult",
|
||||
"system_message": "SystemMessage",
|
||||
}
|
||||
},
|
||||
"toolgroups": {
|
||||
"models": {
|
||||
"tool_group": "ToolGroup",
|
||||
"list_tool_groups_response": "ListToolGroupsResponse",
|
||||
},
|
||||
"methods": {
|
||||
"register": "post /v1/toolgroups",
|
||||
"get": "get /v1/toolgroups/{toolgroup_id}",
|
||||
"list": "get /v1/toolgroups",
|
||||
"unregister": "delete /v1/toolgroups/{toolgroup_id}",
|
||||
},
|
||||
},
|
||||
"tools": {
|
||||
"methods": {
|
||||
"get": "get /v1/tools/{tool_name}",
|
||||
"list": {"paginated": False, "endpoint": "get /v1/tools"},
|
||||
}
|
||||
},
|
||||
"tool_runtime": {
|
||||
"models": {
|
||||
"tool_def": "ToolDef",
|
||||
"tool_invocation_result": "ToolInvocationResult",
|
||||
},
|
||||
"methods": {
|
||||
"list_tools": {
|
||||
"paginated": False,
|
||||
"endpoint": "get /v1/tool-runtime/list-tools",
|
||||
},
|
||||
"invoke_tool": "post /v1/tool-runtime/invoke",
|
||||
},
|
||||
},
|
||||
"responses": {
|
||||
"models": {
|
||||
"response_object_stream": "OpenAIResponseObjectStream",
|
||||
"response_object": "OpenAIResponseObject",
|
||||
},
|
||||
"methods": {
|
||||
"create": {
|
||||
"type": "http",
|
||||
"streaming": {
|
||||
"stream_event_model": "responses.response_object_stream",
|
||||
"param_discriminator": "stream",
|
||||
},
|
||||
"endpoint": "post /v1/responses",
|
||||
},
|
||||
"retrieve": "get /v1/responses/{response_id}",
|
||||
"list": {"type": "http", "endpoint": "get /v1/responses"},
|
||||
"delete": {
|
||||
"type": "http",
|
||||
"endpoint": "delete /v1/responses/{response_id}",
|
||||
},
|
||||
},
|
||||
"subresources": {
|
||||
"input_items": {
|
||||
"methods": {
|
||||
"list": {
|
||||
"type": "http",
|
||||
"paginated": False,
|
||||
"endpoint": "get /v1/responses/{response_id}/input_items",
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"prompts": {
|
||||
"models": {"prompt": "Prompt", "list_prompts_response": "ListPromptsResponse"},
|
||||
"methods": {
|
||||
"create": "post /v1/prompts",
|
||||
"list": {"paginated": False, "endpoint": "get /v1/prompts"},
|
||||
"retrieve": "get /v1/prompts/{prompt_id}",
|
||||
"update": "post /v1/prompts/{prompt_id}",
|
||||
"delete": "delete /v1/prompts/{prompt_id}",
|
||||
"set_default_version": "post /v1/prompts/{prompt_id}/set-default-version",
|
||||
},
|
||||
"subresources": {
|
||||
"versions": {
|
||||
"methods": {
|
||||
"list": {
|
||||
"paginated": False,
|
||||
"endpoint": "get /v1/prompts/{prompt_id}/versions",
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"conversations": {
|
||||
"models": {"conversation_object": "Conversation"},
|
||||
"methods": {
|
||||
"create": {"type": "http", "endpoint": "post /v1/conversations"},
|
||||
"retrieve": "get /v1/conversations/{conversation_id}",
|
||||
"update": {
|
||||
"type": "http",
|
||||
"endpoint": "post /v1/conversations/{conversation_id}",
|
||||
},
|
||||
"delete": {
|
||||
"type": "http",
|
||||
"endpoint": "delete /v1/conversations/{conversation_id}",
|
||||
},
|
||||
},
|
||||
"subresources": {
|
||||
"items": {
|
||||
"methods": {
|
||||
"get": {
|
||||
"type": "http",
|
||||
"endpoint": "get /v1/conversations/{conversation_id}/items/{item_id}",
|
||||
},
|
||||
"list": {
|
||||
"type": "http",
|
||||
"endpoint": "get /v1/conversations/{conversation_id}/items",
|
||||
},
|
||||
"create": {
|
||||
"type": "http",
|
||||
"endpoint": "post /v1/conversations/{conversation_id}/items",
|
||||
},
|
||||
"delete": {
|
||||
"type": "http",
|
||||
"endpoint": "delete /v1/conversations/{conversation_id}/items/{item_id}",
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"inspect": {
|
||||
"models": {
|
||||
"healthInfo": "HealthInfo",
|
||||
"providerInfo": "ProviderInfo",
|
||||
"routeInfo": "RouteInfo",
|
||||
"versionInfo": "VersionInfo",
|
||||
},
|
||||
"methods": {"health": "get /v1/health", "version": "get /v1/version"},
|
||||
},
|
||||
"embeddings": {
|
||||
"models": {"create_embeddings_response": "OpenAIEmbeddingsResponse"},
|
||||
"methods": {"create": "post /v1/embeddings"},
|
||||
},
|
||||
"chat": {
|
||||
"models": {"chat_completion_chunk": "OpenAIChatCompletionChunk"},
|
||||
"subresources": {
|
||||
"completions": {
|
||||
"methods": {
|
||||
"create": {
|
||||
"type": "http",
|
||||
"streaming": {
|
||||
"stream_event_model": "chat.chat_completion_chunk",
|
||||
"param_discriminator": "stream",
|
||||
},
|
||||
"endpoint": "post /v1/chat/completions",
|
||||
},
|
||||
"list": {
|
||||
"type": "http",
|
||||
"paginated": False,
|
||||
"endpoint": "get /v1/chat/completions",
|
||||
},
|
||||
"retrieve": {
|
||||
"type": "http",
|
||||
"endpoint": "get /v1/chat/completions/{completion_id}",
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"completions": {
|
||||
"methods": {
|
||||
"create": {
|
||||
"type": "http",
|
||||
"streaming": {"param_discriminator": "stream"},
|
||||
"endpoint": "post /v1/completions",
|
||||
}
|
||||
}
|
||||
},
|
||||
"vector_io": {
|
||||
"models": {"queryChunksResponse": "QueryChunksResponse"},
|
||||
"methods": {
|
||||
"insert": "post /v1/vector-io/insert",
|
||||
"query": "post /v1/vector-io/query",
|
||||
},
|
||||
},
|
||||
"vector_stores": {
|
||||
"models": {
|
||||
"vector_store": "VectorStoreObject",
|
||||
"list_vector_stores_response": "VectorStoreListResponse",
|
||||
"vector_store_delete_response": "VectorStoreDeleteResponse",
|
||||
"vector_store_search_response": "VectorStoreSearchResponsePage",
|
||||
},
|
||||
"methods": {
|
||||
"create": "post /v1/vector_stores",
|
||||
"list": "get /v1/vector_stores",
|
||||
"retrieve": "get /v1/vector_stores/{vector_store_id}",
|
||||
"update": "post /v1/vector_stores/{vector_store_id}",
|
||||
"delete": "delete /v1/vector_stores/{vector_store_id}",
|
||||
"search": "post /v1/vector_stores/{vector_store_id}/search",
|
||||
},
|
||||
"subresources": {
|
||||
"files": {
|
||||
"models": {"vector_store_file": "VectorStoreFileObject"},
|
||||
"methods": {
|
||||
"list": "get /v1/vector_stores/{vector_store_id}/files",
|
||||
"retrieve": "get /v1/vector_stores/{vector_store_id}/files/{file_id}",
|
||||
"update": "post /v1/vector_stores/{vector_store_id}/files/{file_id}",
|
||||
"delete": "delete /v1/vector_stores/{vector_store_id}/files/{file_id}",
|
||||
"create": "post /v1/vector_stores/{vector_store_id}/files",
|
||||
"content": "get /v1/vector_stores/{vector_store_id}/files/{file_id}/content",
|
||||
},
|
||||
},
|
||||
"file_batches": {
|
||||
"models": {
|
||||
"vector_store_file_batches": "VectorStoreFileBatchObject",
|
||||
"list_vector_store_files_in_batch_response": "VectorStoreFilesListInBatchResponse",
|
||||
},
|
||||
"methods": {
|
||||
"create": "post /v1/vector_stores/{vector_store_id}/file_batches",
|
||||
"retrieve": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}",
|
||||
"list_files": "get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files",
|
||||
"cancel": "post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"models": {
|
||||
"models": {
|
||||
"model": "OpenAIModel",
|
||||
"list_models_response": "OpenAIListModelsResponse",
|
||||
},
|
||||
"methods": {
|
||||
"list": {"paginated": False, "endpoint": "get /v1/models"},
|
||||
"retrieve": "get /v1/models/{model_id}",
|
||||
"register": "post /v1/models",
|
||||
"unregister": "delete /v1/models/{model_id}",
|
||||
},
|
||||
"subresources": {"openai": {"methods": {"list": {"paginated": False, "endpoint": "get /v1/models"}}}},
|
||||
},
|
||||
"providers": {
|
||||
"models": {"list_providers_response": "ListProvidersResponse"},
|
||||
"methods": {
|
||||
"list": {"paginated": False, "endpoint": "get /v1/providers"},
|
||||
"retrieve": "get /v1/providers/{provider_id}",
|
||||
},
|
||||
},
|
||||
"routes": {
|
||||
"models": {"list_routes_response": "ListRoutesResponse"},
|
||||
"methods": {"list": {"paginated": False, "endpoint": "get /v1/inspect/routes"}},
|
||||
},
|
||||
"moderations": {
|
||||
"models": {"create_response": "ModerationObject"},
|
||||
"methods": {"create": "post /v1/moderations"},
|
||||
},
|
||||
"safety": {
|
||||
"models": {"run_shield_response": "RunShieldResponse"},
|
||||
"methods": {"run_shield": "post /v1/safety/run-shield"},
|
||||
},
|
||||
"shields": {
|
||||
"models": {"shield": "Shield", "list_shields_response": "ListShieldsResponse"},
|
||||
"methods": {
|
||||
"retrieve": "get /v1/shields/{identifier}",
|
||||
"list": {"paginated": False, "endpoint": "get /v1/shields"},
|
||||
"register": "post /v1/shields",
|
||||
"delete": "delete /v1/shields/{identifier}",
|
||||
},
|
||||
},
|
||||
"scoring": {
|
||||
"methods": {
|
||||
"score": "post /v1/scoring/score",
|
||||
"score_batch": "post /v1/scoring/score-batch",
|
||||
}
|
||||
},
|
||||
"scoring_functions": {
|
||||
"models": {
|
||||
"scoring_fn": "ScoringFn",
|
||||
"scoring_fn_params": "ScoringFnParams",
|
||||
"list_scoring_functions_response": "ListScoringFunctionsResponse",
|
||||
},
|
||||
"methods": {
|
||||
"retrieve": "get /v1/scoring-functions/{scoring_fn_id}",
|
||||
"list": {"paginated": False, "endpoint": "get /v1/scoring-functions"},
|
||||
"register": "post /v1/scoring-functions",
|
||||
"unregister": "delete /v1/scoring-functions/{scoring_fn_id}",
|
||||
},
|
||||
},
|
||||
"files": {
|
||||
"models": {
|
||||
"file": "OpenAIFileObject",
|
||||
"list_files_response": "ListOpenAIFileResponse",
|
||||
"delete_file_response": "OpenAIFileDeleteResponse",
|
||||
},
|
||||
"methods": {
|
||||
"create": "post /v1/files",
|
||||
"list": "get /v1/files",
|
||||
"retrieve": "get /v1/files/{file_id}",
|
||||
"delete": "delete /v1/files/{file_id}",
|
||||
"content": "get /v1/files/{file_id}/content",
|
||||
},
|
||||
},
|
||||
"batches": {
|
||||
"methods": {
|
||||
"create": "post /v1/batches",
|
||||
"list": "get /v1/batches",
|
||||
"retrieve": "get /v1/batches/{batch_id}",
|
||||
"cancel": "post /v1/batches/{batch_id}/cancel",
|
||||
}
|
||||
},
|
||||
"alpha": {
|
||||
"subresources": {
|
||||
"inference": {"methods": {"rerank": "post /v1alpha/inference/rerank"}},
|
||||
"post_training": {
|
||||
"models": {
|
||||
"algorithm_config": "AlgorithmConfig",
|
||||
"post_training_job": "PostTrainingJob",
|
||||
"list_post_training_jobs_response": "ListPostTrainingJobsResponse",
|
||||
},
|
||||
"methods": {
|
||||
"preference_optimize": "post /v1alpha/post-training/preference-optimize",
|
||||
"supervised_fine_tune": "post /v1alpha/post-training/supervised-fine-tune",
|
||||
},
|
||||
"subresources": {
|
||||
"job": {
|
||||
"methods": {
|
||||
"artifacts": "get /v1alpha/post-training/job/artifacts",
|
||||
"cancel": "post /v1alpha/post-training/job/cancel",
|
||||
"status": "get /v1alpha/post-training/job/status",
|
||||
"list": {
|
||||
"paginated": False,
|
||||
"endpoint": "get /v1alpha/post-training/jobs",
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
"benchmarks": {
|
||||
"models": {
|
||||
"benchmark": "Benchmark",
|
||||
"list_benchmarks_response": "ListBenchmarksResponse",
|
||||
},
|
||||
"methods": {
|
||||
"retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}",
|
||||
"list": {
|
||||
"paginated": False,
|
||||
"endpoint": "get /v1alpha/eval/benchmarks",
|
||||
},
|
||||
"register": "post /v1alpha/eval/benchmarks",
|
||||
"unregister": "delete /v1alpha/eval/benchmarks/{benchmark_id}",
|
||||
},
|
||||
},
|
||||
"eval": {
|
||||
"models": {
|
||||
"evaluate_response": "EvaluateResponse",
|
||||
"benchmark_config": "BenchmarkConfig",
|
||||
"job": "Job",
|
||||
},
|
||||
"methods": {
|
||||
"evaluate_rows": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
|
||||
"run_eval": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
|
||||
"evaluate_rows_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations",
|
||||
"run_eval_alpha": "post /v1alpha/eval/benchmarks/{benchmark_id}/jobs",
|
||||
},
|
||||
"subresources": {
|
||||
"jobs": {
|
||||
"methods": {
|
||||
"cancel": "delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
|
||||
"status": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}",
|
||||
"retrieve": "get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result",
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
"beta": {
|
||||
"subresources": {
|
||||
"datasets": {
|
||||
"models": {"list_datasets_response": "ListDatasetsResponse"},
|
||||
"methods": {
|
||||
"register": "post /v1beta/datasets",
|
||||
"retrieve": "get /v1beta/datasets/{dataset_id}",
|
||||
"list": {"paginated": False, "endpoint": "get /v1beta/datasets"},
|
||||
"unregister": "delete /v1beta/datasets/{dataset_id}",
|
||||
"iterrows": "get /v1beta/datasetio/iterrows/{dataset_id}",
|
||||
"appendrows": "post /v1beta/datasetio/append-rows/{dataset_id}",
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
HTTP_METHODS = {"get", "post", "put", "patch", "delete", "options", "head"}
|
||||
|
||||
|
||||
@dataclass
|
||||
class Endpoint:
|
||||
method: str
|
||||
path: str
|
||||
extra: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, value: Any) -> Endpoint:
|
||||
if isinstance(value, str):
|
||||
method, _, path = value.partition(" ")
|
||||
return cls._from_parts(method, path)
|
||||
if isinstance(value, dict) and "endpoint" in value:
|
||||
method, _, path = value["endpoint"].partition(" ")
|
||||
extra = {k: v for k, v in value.items() if k != "endpoint"}
|
||||
endpoint = cls._from_parts(method, path)
|
||||
endpoint.extra.update(extra)
|
||||
return endpoint
|
||||
raise ValueError(f"Unsupported endpoint value: {value!r}")
|
||||
|
||||
@classmethod
|
||||
def _from_parts(cls, method: str, path: str) -> Endpoint:
|
||||
method = method.strip().lower()
|
||||
path = path.strip()
|
||||
if method not in HTTP_METHODS:
|
||||
raise ValueError(f"Unsupported HTTP method for Stainless config: {method!r}")
|
||||
if not path.startswith("/"):
|
||||
raise ValueError(f"Endpoint path must start with '/': {path!r}")
|
||||
return cls(method=method, path=path)
|
||||
|
||||
def to_config(self) -> Any:
|
||||
if not self.extra:
|
||||
return f"{self.method} {self.path}"
|
||||
data = dict(self.extra)
|
||||
data["endpoint"] = f"{self.method} {self.path}"
|
||||
return data
|
||||
|
||||
def route_key(self) -> str:
|
||||
return f"{self.method} {self.path}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Resource:
|
||||
models: dict[str, str] | None = None
|
||||
methods: dict[str, Endpoint] = field(default_factory=dict)
|
||||
subresources: dict[str, Resource] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict[str, Any]) -> Resource:
|
||||
models = data.get("models")
|
||||
methods = {name: Endpoint.from_config(value) for name, value in data.get("methods", {}).items()}
|
||||
subresources = {name: cls.from_dict(value) for name, value in data.get("subresources", {}).items()}
|
||||
return cls(models=models, methods=methods, subresources=subresources)
|
||||
|
||||
def to_config(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {}
|
||||
if self.models:
|
||||
result["models"] = self.models
|
||||
if self.methods:
|
||||
result["methods"] = {name: endpoint.to_config() for name, endpoint in self.methods.items()}
|
||||
if self.subresources:
|
||||
result["subresources"] = {name: resource.to_config() for name, resource in self.subresources.items()}
|
||||
return result
|
||||
|
||||
def collect_endpoint_paths(self) -> set[str]:
|
||||
paths = {endpoint.route_key() for endpoint in self.methods.values()}
|
||||
for subresource in self.subresources.values():
|
||||
paths.update(subresource.collect_endpoint_paths())
|
||||
return paths
|
||||
|
||||
def iter_endpoints(self, prefix: str) -> Iterator[tuple[str, str]]:
|
||||
for method_name, endpoint in self.methods.items():
|
||||
label = f"{prefix}.{method_name}" if prefix else method_name
|
||||
yield endpoint.route_key(), label
|
||||
for sub_name, subresource in self.subresources.items():
|
||||
sub_prefix = f"{prefix}.{sub_name}" if prefix else sub_name
|
||||
yield from subresource.iter_endpoints(sub_prefix)
|
||||
|
||||
|
||||
_RESOURCES = {name: Resource.from_dict(data) for name, data in ALL_RESOURCES.items()}
|
||||
|
||||
|
||||
def _load_openapi_paths(openapi_path: Path) -> set[str]:
|
||||
spec = yaml.safe_load(openapi_path.read_text()) or {}
|
||||
paths: set[str] = set()
|
||||
for path, path_item in (spec.get("paths") or {}).items():
|
||||
if not isinstance(path_item, dict):
|
||||
continue
|
||||
for method, operation in path_item.items():
|
||||
if not isinstance(operation, dict):
|
||||
continue
|
||||
paths.add(f"{str(method).lower()} {path}")
|
||||
return paths
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class StainlessConfig:
|
||||
organization: dict[str, Any]
|
||||
security: list[Any]
|
||||
security_schemes: dict[str, Any]
|
||||
targets: dict[str, Any]
|
||||
client_settings: dict[str, Any]
|
||||
environments: dict[str, Any]
|
||||
pagination: list[dict[str, Any]]
|
||||
settings: dict[str, Any]
|
||||
openapi: dict[str, Any]
|
||||
readme: dict[str, Any]
|
||||
resources: dict[str, Resource]
|
||||
|
||||
@classmethod
|
||||
def make(cls) -> StainlessConfig:
|
||||
return cls(
|
||||
organization=ORGANIZATION,
|
||||
security=SECURITY,
|
||||
security_schemes=SECURITY_SCHEMES,
|
||||
targets=TARGETS,
|
||||
client_settings=CLIENT_SETTINGS,
|
||||
environments=ENVIRONMENTS,
|
||||
pagination=PAGINATION,
|
||||
settings=SETTINGS,
|
||||
openapi=OPENAPI,
|
||||
readme=README,
|
||||
resources=dict(_RESOURCES),
|
||||
)
|
||||
|
||||
def referenced_paths(self) -> set[str]:
|
||||
paths: set[str] = set()
|
||||
for resource in self.resources.values():
|
||||
paths.update(resource.collect_endpoint_paths())
|
||||
paths.update(self.readme_endpoint_paths())
|
||||
return paths
|
||||
|
||||
def readme_endpoint_paths(self) -> set[str]:
|
||||
example_requests = self.readme.get("example_requests", {}) if self.readme else {}
|
||||
paths: set[str] = set()
|
||||
for entry in example_requests.values():
|
||||
endpoint = entry.get("endpoint") if isinstance(entry, dict) else None
|
||||
if isinstance(endpoint, str):
|
||||
method, _, route = endpoint.partition(" ")
|
||||
method = method.strip().lower()
|
||||
route = route.strip()
|
||||
if method and route:
|
||||
paths.add(f"{method} {route}")
|
||||
return paths
|
||||
|
||||
def endpoint_map(self) -> dict[str, list[str]]:
|
||||
mapping: dict[str, list[str]] = {}
|
||||
for resource_name, resource in self.resources.items():
|
||||
for route, label in resource.iter_endpoints(resource_name):
|
||||
mapping.setdefault(route, []).append(label)
|
||||
return mapping
|
||||
|
||||
def validate_unique_endpoints(self) -> None:
|
||||
duplicates: dict[str, list[str]] = {}
|
||||
for route, labels in self.endpoint_map().items():
|
||||
top_levels = {label.split(".", 1)[0] for label in labels}
|
||||
if len(top_levels) > 1:
|
||||
duplicates[route] = labels
|
||||
if duplicates:
|
||||
formatted = "\n".join(
|
||||
f" - {route} defined in: {', '.join(sorted(labels))}" for route, labels in sorted(duplicates.items())
|
||||
)
|
||||
raise ValueError("Duplicate endpoints found across resources:\n" + formatted)
|
||||
|
||||
def validate_readme_endpoints(self) -> None:
|
||||
resource_paths: set[str] = set()
|
||||
for resource in self.resources.values():
|
||||
resource_paths.update(resource.collect_endpoint_paths())
|
||||
missing = sorted(path for path in self.readme_endpoint_paths() if path not in resource_paths)
|
||||
if missing:
|
||||
formatted = "\n".join(f" - {path}" for path in missing)
|
||||
raise ValueError("README example endpoints are not present in Stainless resources:\n" + formatted)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
cfg: dict[str, Any] = {}
|
||||
for section in SECTION_ORDER:
|
||||
if section == "resources":
|
||||
cfg[section] = {name: resource.to_config() for name, resource in self.resources.items()}
|
||||
continue
|
||||
cfg[section] = getattr(self, section)
|
||||
return cfg
|
||||
|
||||
def validate_against_openapi(self, openapi_path: Path) -> None:
|
||||
if not openapi_path.exists():
|
||||
raise FileNotFoundError(f"OpenAPI spec not found at {openapi_path}")
|
||||
spec_paths = _load_openapi_paths(openapi_path)
|
||||
config_paths = self.referenced_paths()
|
||||
missing = sorted(path for path in config_paths if path not in spec_paths)
|
||||
if missing:
|
||||
formatted = "\n".join(f" - {path}" for path in missing)
|
||||
raise ValueError("Stainless config references missing endpoints:\n" + formatted)
|
||||
|
||||
def validate(self, openapi_path: Path | None = None) -> None:
|
||||
self.validate_unique_endpoints()
|
||||
self.validate_readme_endpoints()
|
||||
if openapi_path is not None:
|
||||
self.validate_against_openapi(openapi_path)
|
||||
|
||||
|
||||
def build_config() -> dict[str, Any]:
|
||||
return StainlessConfig.make().to_dict()
|
||||
|
||||
|
||||
def write_config(repo_root: Path, openapi_path: Path | None = None) -> Path:
|
||||
stainless_config = StainlessConfig.make()
|
||||
spec_path = (openapi_path or (repo_root / "client-sdks" / "stainless" / "openapi.yml")).resolve()
|
||||
stainless_config.validate(spec_path)
|
||||
yaml_text = yaml.safe_dump(stainless_config.to_dict(), sort_keys=False)
|
||||
output = repo_root / "client-sdks" / "stainless" / "config.yml"
|
||||
output.write_text(HEADER + yaml_text)
|
||||
return output
|
||||
|
||||
|
||||
def main() -> None:
|
||||
repo_root = Path(__file__).resolve().parents[3]
|
||||
output = write_config(repo_root)
|
||||
print(f"Wrote Stainless config: {output}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -8,7 +8,8 @@
|
|||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from types import UnionType
|
||||
from typing import Annotated, Any, Union, get_args, get_origin
|
||||
|
||||
from pydantic_core import PydanticUndefined
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||
|
|
@ -51,6 +52,41 @@ class ChangedPathTracker:
|
|||
return self._changed_paths
|
||||
|
||||
|
||||
def extract_type_annotation(annotation: Any) -> str:
|
||||
"""extract a type annotation into a clean string representation."""
|
||||
if annotation is None:
|
||||
return "Any"
|
||||
|
||||
if annotation is type(None):
|
||||
return "None"
|
||||
|
||||
origin = get_origin(annotation)
|
||||
args = get_args(annotation)
|
||||
|
||||
# recursive workaround for Annotated types to ignore FieldInfo part
|
||||
if origin is Annotated and args:
|
||||
return extract_type_annotation(args[0])
|
||||
|
||||
if origin in [Union, UnionType]:
|
||||
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||
has_none = len(non_none_args) < len(args)
|
||||
|
||||
if len(non_none_args) == 1:
|
||||
formatted = extract_type_annotation(non_none_args[0])
|
||||
return f"{formatted} | None" if has_none else formatted
|
||||
else:
|
||||
formatted_args = [extract_type_annotation(arg) for arg in non_none_args]
|
||||
result = " | ".join(formatted_args)
|
||||
return f"{result} | None" if has_none else result
|
||||
|
||||
if origin is not None and args:
|
||||
origin_name = getattr(origin, "__name__", str(origin))
|
||||
formatted_args = [extract_type_annotation(arg) for arg in args]
|
||||
return f"{origin_name}[{', '.join(formatted_args)}]"
|
||||
|
||||
return annotation.__name__ if hasattr(annotation, "__name__") else str(annotation)
|
||||
|
||||
|
||||
def get_config_class_info(config_class_path: str) -> dict[str, Any]:
|
||||
"""Extract configuration information from a config class."""
|
||||
try:
|
||||
|
|
@ -78,14 +114,8 @@ def get_config_class_info(config_class_path: str) -> dict[str, Any]:
|
|||
for field_name, field in config_class.model_fields.items():
|
||||
if getattr(field, "exclude", False):
|
||||
continue
|
||||
field_type = str(field.annotation) if field.annotation else "Any"
|
||||
|
||||
# this string replace is ridiculous
|
||||
field_type = field_type.replace("typing.", "").replace("Optional[", "").replace("]", "")
|
||||
field_type = field_type.replace("Annotated[", "").replace("FieldInfo(", "").replace(")", "")
|
||||
field_type = field_type.replace("llama_stack_api.inference.", "")
|
||||
field_type = field_type.replace("llama_stack.providers.", "")
|
||||
field_type = field_type.replace("llama_stack_api.datatypes.", "")
|
||||
field_type = extract_type_annotation(field.annotation)
|
||||
|
||||
default_value = field.default
|
||||
if field.default_factory is not None:
|
||||
|
|
@ -345,8 +375,16 @@ def generate_index_docs(api_name: str, api_docstring: str | None, provider_entri
|
|||
# Add YAML frontmatter for index
|
||||
md_lines.append("---")
|
||||
if api_docstring:
|
||||
clean_desc = api_docstring.strip().replace('"', '\\"')
|
||||
md_lines.append(f'description: "{clean_desc}"')
|
||||
# Handle multi-line descriptions in YAML
|
||||
if "\n" in api_docstring.strip():
|
||||
md_lines.append("description: |")
|
||||
for line in api_docstring.strip().split("\n"):
|
||||
# Avoid trailing whitespace by only adding spaces to non-empty lines
|
||||
md_lines.append(f" {line}" if line.strip() else "")
|
||||
else:
|
||||
# For single line descriptions, format properly for YAML
|
||||
clean_desc = api_docstring.strip().replace('"', '\\"')
|
||||
md_lines.append(f'description: "{clean_desc}"')
|
||||
md_lines.append(f"sidebar_label: {sidebar_label}")
|
||||
md_lines.append(f"title: {api_name.title()}")
|
||||
md_lines.append("---")
|
||||
|
|
|
|||
|
|
@ -17,3 +17,5 @@ PYTHONPATH=$PYTHONPATH:$stack_dir \
|
|||
python3 -m scripts.openapi_generator "$stack_dir"/docs/static
|
||||
|
||||
cp "$stack_dir"/docs/static/stainless-llama-stack-spec.yaml "$stack_dir"/client-sdks/stainless/openapi.yml
|
||||
PYTHONPATH=$PYTHONPATH:$stack_dir \
|
||||
python3 -m scripts.openapi_generator.stainless_config.generate_config
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue