From 4a3f9151e30e0fee8fb2e25e02beebffdb17df8c Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Mon, 24 Nov 2025 11:32:53 -0500 Subject: [PATCH] fix: rename StackRunConfig to StackConfig since this object represents our config for list-deps, run, etc lets rename it to simply `StackConfig` Signed-off-by: Charlie Doern --- src/llama_stack/cli/stack/_list_deps.py | 6 +++--- src/llama_stack/cli/stack/run.py | 8 ++++---- src/llama_stack/cli/stack/utils.py | 4 ++-- src/llama_stack/core/build.py | 6 +++--- src/llama_stack/core/configure.py | 8 ++++---- .../core/conversations/conversations.py | 4 ++-- src/llama_stack/core/datatypes.py | 4 ++-- src/llama_stack/core/external.py | 4 ++-- src/llama_stack/core/inspect.py | 6 +++--- src/llama_stack/core/prompts/prompts.py | 4 ++-- src/llama_stack/core/providers.py | 6 +++--- src/llama_stack/core/resolver.py | 12 ++++++------ src/llama_stack/core/routers/__init__.py | 4 ++-- src/llama_stack/core/server/server.py | 8 ++++---- src/llama_stack/core/stack.py | 18 +++++++++--------- tests/backward_compat/test_run_config.py | 4 ++-- .../test_persistence_integration.py | 6 +++--- tests/unit/conversations/test_conversations.py | 6 +++--- tests/unit/core/test_stack_validation.py | 6 +++--- tests/unit/core/test_storage_references.py | 4 ++-- tests/unit/distribution/test_distribution.py | 6 +++--- tests/unit/prompts/prompts/conftest.py | 4 ++-- tests/unit/server/test_resolver.py | 6 +++--- 23 files changed, 72 insertions(+), 72 deletions(-) diff --git a/src/llama_stack/cli/stack/_list_deps.py b/src/llama_stack/cli/stack/_list_deps.py index f11ce486a..80b67ce62 100644 --- a/src/llama_stack/cli/stack/_list_deps.py +++ b/src/llama_stack/cli/stack/_list_deps.py @@ -12,7 +12,7 @@ import yaml from termcolor import cprint from llama_stack.core.build import get_provider_dependencies -from llama_stack.core.datatypes import Provider, StackRunConfig +from llama_stack.core.datatypes import Provider, StackConfig from llama_stack.core.distribution import get_provider_registry from llama_stack.log import get_logger from llama_stack_api import Api @@ -78,7 +78,7 @@ def run_stack_list_deps_command(args: argparse.Namespace) -> None: with open(config_file) as f: try: contents = yaml.safe_load(f) - run_config = StackRunConfig(**contents) + run_config = StackConfig(**contents) except Exception as e: cprint( f"Could not parse config file {config_file}: {e}", @@ -119,7 +119,7 @@ def run_stack_list_deps_command(args: argparse.Namespace) -> None: file=sys.stderr, ) sys.exit(1) - run_config = StackRunConfig(providers=provider_list, image_name="providers-run") + run_config = StackConfig(providers=provider_list, image_name="providers-run") normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(run_config) normal_deps += SERVER_DEPENDENCIES diff --git a/src/llama_stack/cli/stack/run.py b/src/llama_stack/cli/stack/run.py index 73d8d13d5..c81ac52f4 100644 --- a/src/llama_stack/cli/stack/run.py +++ b/src/llama_stack/cli/stack/run.py @@ -17,7 +17,7 @@ from termcolor import cprint from llama_stack.cli.stack.utils import ImageType from llama_stack.cli.subcommand import Subcommand -from llama_stack.core.datatypes import Api, Provider, StackRunConfig +from llama_stack.core.datatypes import Api, Provider, StackConfig from llama_stack.core.distribution import get_provider_registry from llama_stack.core.stack import cast_image_name_to_string, replace_env_vars from llama_stack.core.storage.datatypes import ( @@ -156,7 +156,7 @@ class StackRun(Subcommand): # Write config to disk in providers-run directory distro_dir = DISTRIBS_BASE_DIR / "providers-run" - config_file = distro_dir / "run.yaml" + config_file = distro_dir / "config.yaml" logger.info(f"Writing generated config to: {config_file}") with open(config_file, "w") as f: @@ -194,7 +194,7 @@ class StackRun(Subcommand): logger_config = LoggingConfig(**cfg) else: logger_config = None - config = StackRunConfig(**cast_image_name_to_string(replace_env_vars(config_contents))) + config = StackConfig(**cast_image_name_to_string(replace_env_vars(config_contents))) port = args.port or config.server.port host = config.server.host or "0.0.0.0" @@ -318,7 +318,7 @@ class StackRun(Subcommand): ), ) - return StackRunConfig( + return StackConfig( image_name="providers-run", apis=apis, providers=providers, diff --git a/src/llama_stack/cli/stack/utils.py b/src/llama_stack/cli/stack/utils.py index d49b142e0..cb4c754d9 100644 --- a/src/llama_stack/cli/stack/utils.py +++ b/src/llama_stack/cli/stack/utils.py @@ -16,7 +16,7 @@ from termcolor import cprint from llama_stack.core.datatypes import ( BuildConfig, Provider, - StackRunConfig, + StackConfig, StorageConfig, ) from llama_stack.core.distribution import get_provider_registry @@ -61,7 +61,7 @@ def generate_run_config( """ apis = list(build_config.distribution_spec.providers.keys()) distro_dir = DISTRIBS_BASE_DIR / image_name - run_config = StackRunConfig( + run_config = StackConfig( container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None), image_name=image_name, apis=apis, diff --git a/src/llama_stack/core/build.py b/src/llama_stack/core/build.py index 630916db4..4e6ccc9f7 100644 --- a/src/llama_stack/core/build.py +++ b/src/llama_stack/core/build.py @@ -9,7 +9,7 @@ import sys from pydantic import BaseModel from termcolor import cprint -from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.datatypes import StackConfig from llama_stack.core.distribution import get_provider_registry from llama_stack.distributions.template import DistributionTemplate from llama_stack.log import get_logger @@ -36,7 +36,7 @@ class ApiInput(BaseModel): def get_provider_dependencies( - config: StackRunConfig, + config: StackConfig, ) -> tuple[list[str], list[str], list[str]]: """Get normal and special dependencies from provider configuration.""" if isinstance(config, DistributionTemplate): @@ -83,7 +83,7 @@ def get_provider_dependencies( return list(set(normal_deps)), list(set(special_deps)), list(set(external_provider_deps)) -def print_pip_install_help(config: StackRunConfig): +def print_pip_install_help(config: StackConfig): normal_deps, special_deps, _ = get_provider_dependencies(config) cprint( diff --git a/src/llama_stack/core/configure.py b/src/llama_stack/core/configure.py index d738b8a61..7ec5b0864 100644 --- a/src/llama_stack/core/configure.py +++ b/src/llama_stack/core/configure.py @@ -10,7 +10,7 @@ from llama_stack.core.datatypes import ( LLAMA_STACK_RUN_CONFIG_VERSION, DistributionSpec, Provider, - StackRunConfig, + StackConfig, ) from llama_stack.core.distribution import ( builtin_automatically_routed_apis, @@ -44,7 +44,7 @@ def configure_single_provider(registry: dict[str, ProviderSpec], provider: Provi ) -def configure_api_providers(config: StackRunConfig, build_spec: DistributionSpec) -> StackRunConfig: +def configure_api_providers(config: StackConfig, build_spec: DistributionSpec) -> StackConfig: is_nux = len(config.providers) == 0 if is_nux: @@ -192,7 +192,7 @@ def upgrade_from_routing_table( return config_dict -def parse_and_maybe_upgrade_config(config_dict: dict[str, Any]) -> StackRunConfig: +def parse_and_maybe_upgrade_config(config_dict: dict[str, Any]) -> StackConfig: if "routing_table" in config_dict: logger.info("Upgrading config...") config_dict = upgrade_from_routing_table(config_dict) @@ -200,4 +200,4 @@ def parse_and_maybe_upgrade_config(config_dict: dict[str, Any]) -> StackRunConfi config_dict["version"] = LLAMA_STACK_RUN_CONFIG_VERSION processed_config_dict = replace_env_vars(config_dict) - return StackRunConfig(**cast_image_name_to_string(processed_config_dict)) + return StackConfig(**cast_image_name_to_string(processed_config_dict)) diff --git a/src/llama_stack/core/conversations/conversations.py b/src/llama_stack/core/conversations/conversations.py index 90402439b..3e867721e 100644 --- a/src/llama_stack/core/conversations/conversations.py +++ b/src/llama_stack/core/conversations/conversations.py @@ -10,7 +10,7 @@ from typing import Any, Literal from pydantic import BaseModel, TypeAdapter -from llama_stack.core.datatypes import AccessRule, StackRunConfig +from llama_stack.core.datatypes import AccessRule, StackConfig from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl from llama_stack.log import get_logger @@ -36,7 +36,7 @@ class ConversationServiceConfig(BaseModel): :param policy: Access control rules """ - run_config: StackRunConfig + run_config: StackConfig policy: list[AccessRule] = [] diff --git a/src/llama_stack/core/datatypes.py b/src/llama_stack/core/datatypes.py index 94d235678..5ab2b43dc 100644 --- a/src/llama_stack/core/datatypes.py +++ b/src/llama_stack/core/datatypes.py @@ -490,7 +490,7 @@ class ServerConfig(BaseModel): ) -class StackRunConfig(BaseModel): +class StackConfig(BaseModel): version: int = LLAMA_STACK_RUN_CONFIG_VERSION image_name: str = Field( @@ -565,7 +565,7 @@ can be instantiated multiple times (with different configs) if necessary. return v @model_validator(mode="after") - def validate_server_stores(self) -> "StackRunConfig": + def validate_server_stores(self) -> "StackConfig": backend_map = self.storage.backends stores = self.storage.stores kv_backends = { diff --git a/src/llama_stack/core/external.py b/src/llama_stack/core/external.py index d1a2d6e42..aa2a0c2c9 100644 --- a/src/llama_stack/core/external.py +++ b/src/llama_stack/core/external.py @@ -7,14 +7,14 @@ import yaml -from llama_stack.core.datatypes import BuildConfig, StackRunConfig +from llama_stack.core.datatypes import BuildConfig, StackConfig from llama_stack.log import get_logger from llama_stack_api import Api, ExternalApiSpec logger = get_logger(name=__name__, category="core") -def load_external_apis(config: StackRunConfig | BuildConfig | None) -> dict[Api, ExternalApiSpec]: +def load_external_apis(config: StackConfig | BuildConfig | None) -> dict[Api, ExternalApiSpec]: """Load external API specifications from the configured directory. Args: diff --git a/src/llama_stack/core/inspect.py b/src/llama_stack/core/inspect.py index 272c9d1bc..3b60027f0 100644 --- a/src/llama_stack/core/inspect.py +++ b/src/llama_stack/core/inspect.py @@ -8,7 +8,7 @@ from importlib.metadata import version from pydantic import BaseModel -from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.datatypes import StackConfig from llama_stack.core.external import load_external_apis from llama_stack.core.server.routes import get_all_api_routes from llama_stack_api import ( @@ -22,7 +22,7 @@ from llama_stack_api import ( class DistributionInspectConfig(BaseModel): - run_config: StackRunConfig + run_config: StackConfig async def get_provider_impl(config, deps): @@ -40,7 +40,7 @@ class DistributionInspectImpl(Inspect): pass async def list_routes(self, api_filter: str | None = None) -> ListRoutesResponse: - run_config: StackRunConfig = self.config.run_config + run_config: StackConfig = self.config.run_config # Helper function to determine if a route should be included based on api_filter def should_include_route(webmethod) -> bool: diff --git a/src/llama_stack/core/prompts/prompts.py b/src/llama_stack/core/prompts/prompts.py index ff67ad138..40539f342 100644 --- a/src/llama_stack/core/prompts/prompts.py +++ b/src/llama_stack/core/prompts/prompts.py @@ -9,7 +9,7 @@ from typing import Any from pydantic import BaseModel -from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.datatypes import StackConfig from llama_stack.core.storage.kvstore import KVStore, kvstore_impl from llama_stack_api import ListPromptsResponse, Prompt, Prompts @@ -20,7 +20,7 @@ class PromptServiceConfig(BaseModel): :param run_config: Stack run configuration containing distribution info """ - run_config: StackRunConfig + run_config: StackConfig async def get_provider_impl(config: PromptServiceConfig, deps: dict[Any, Any]): diff --git a/src/llama_stack/core/providers.py b/src/llama_stack/core/providers.py index e3fe3c7b3..1f0ecae6f 100644 --- a/src/llama_stack/core/providers.py +++ b/src/llama_stack/core/providers.py @@ -12,14 +12,14 @@ from pydantic import BaseModel from llama_stack.log import get_logger from llama_stack_api import HealthResponse, HealthStatus, ListProvidersResponse, ProviderInfo, Providers -from .datatypes import StackRunConfig +from .datatypes import StackConfig from .utils.config import redact_sensitive_fields logger = get_logger(name=__name__, category="core") class ProviderImplConfig(BaseModel): - run_config: StackRunConfig + run_config: StackConfig async def get_provider_impl(config, deps): @@ -42,7 +42,7 @@ class ProviderImpl(Providers): async def list_providers(self) -> ListProvidersResponse: run_config = self.config.run_config - safe_config = StackRunConfig(**redact_sensitive_fields(run_config.model_dump())) + safe_config = StackConfig(**redact_sensitive_fields(run_config.model_dump())) providers_health = await self.get_providers_health() ret = [] for api, providers in safe_config.providers.items(): diff --git a/src/llama_stack/core/resolver.py b/src/llama_stack/core/resolver.py index 6bc32c2d0..193131ace 100644 --- a/src/llama_stack/core/resolver.py +++ b/src/llama_stack/core/resolver.py @@ -14,7 +14,7 @@ from llama_stack.core.datatypes import ( AutoRoutedProviderSpec, Provider, RoutingTableProviderSpec, - StackRunConfig, + StackConfig, ) from llama_stack.core.distribution import builtin_automatically_routed_apis from llama_stack.core.external import load_external_apis @@ -147,7 +147,7 @@ ProviderRegistry = dict[Api, dict[str, ProviderSpec]] async def resolve_impls( - run_config: StackRunConfig, + run_config: StackConfig, provider_registry: ProviderRegistry, dist_registry: DistributionRegistry, policy: list[AccessRule], @@ -217,7 +217,7 @@ def specs_for_autorouted_apis(apis_to_serve: list[str] | set[str]) -> dict[str, def validate_and_prepare_providers( - run_config: StackRunConfig, provider_registry: ProviderRegistry, routing_table_apis: set[Api], router_apis: set[Api] + run_config: StackConfig, provider_registry: ProviderRegistry, routing_table_apis: set[Api], router_apis: set[Api] ) -> dict[str, dict[str, ProviderWithSpec]]: """Validates providers, handles deprecations, and organizes them into a spec dictionary.""" providers_with_specs: dict[str, dict[str, ProviderWithSpec]] = {} @@ -261,7 +261,7 @@ def validate_provider(provider: Provider, api: Api, provider_registry: ProviderR def sort_providers_by_deps( - providers_with_specs: dict[str, dict[str, ProviderWithSpec]], run_config: StackRunConfig + providers_with_specs: dict[str, dict[str, ProviderWithSpec]], run_config: StackConfig ) -> list[tuple[str, ProviderWithSpec]]: """Sorts providers based on their dependencies.""" sorted_providers: list[tuple[str, ProviderWithSpec]] = topological_sort( @@ -278,7 +278,7 @@ async def instantiate_providers( sorted_providers: list[tuple[str, ProviderWithSpec]], router_apis: set[Api], dist_registry: DistributionRegistry, - run_config: StackRunConfig, + run_config: StackConfig, policy: list[AccessRule], internal_impls: dict[Api, Any] | None = None, ) -> dict[Api, Any]: @@ -357,7 +357,7 @@ async def instantiate_provider( deps: dict[Api, Any], inner_impls: dict[str, Any], dist_registry: DistributionRegistry, - run_config: StackRunConfig, + run_config: StackConfig, policy: list[AccessRule], ): provider_spec = provider.spec diff --git a/src/llama_stack/core/routers/__init__.py b/src/llama_stack/core/routers/__init__.py index 289755bcb..1b7fe3556 100644 --- a/src/llama_stack/core/routers/__init__.py +++ b/src/llama_stack/core/routers/__init__.py @@ -10,7 +10,7 @@ from llama_stack.core.datatypes import ( AccessRule, RoutedProtocol, ) -from llama_stack.core.stack import StackRunConfig +from llama_stack.core.datatypes import StackConfig from llama_stack.core.store import DistributionRegistry from llama_stack.providers.utils.inference.inference_store import InferenceStore from llama_stack_api import Api, RoutingTable @@ -51,7 +51,7 @@ async def get_routing_table_impl( async def get_auto_router_impl( - api: Api, routing_table: RoutingTable, deps: dict[str, Any], run_config: StackRunConfig, policy: list[AccessRule] + api: Api, routing_table: RoutingTable, deps: dict[str, Any], run_config: StackConfig, policy: list[AccessRule] ) -> Any: from .datasets import DatasetIORouter from .eval_scoring import EvalRouter, ScoringRouter diff --git a/src/llama_stack/core/server/server.py b/src/llama_stack/core/server/server.py index 0d3513980..0ab7ce260 100644 --- a/src/llama_stack/core/server/server.py +++ b/src/llama_stack/core/server/server.py @@ -34,7 +34,7 @@ from pydantic import BaseModel, ValidationError from llama_stack.core.access_control.access_control import AccessDeniedError from llama_stack.core.datatypes import ( AuthenticationRequiredError, - StackRunConfig, + StackConfig, process_cors_config, ) from llama_stack.core.distribution import builtin_automatically_routed_apis @@ -149,7 +149,7 @@ class StackApp(FastAPI): start background tasks (e.g. refresh model registry periodically) from the lifespan context manager. """ - def __init__(self, config: StackRunConfig, *args, **kwargs): + def __init__(self, config: StackConfig, *args, **kwargs): super().__init__(*args, **kwargs) self.stack: Stack = Stack(config) @@ -385,7 +385,7 @@ def create_app() -> StackApp: logger = get_logger(name=__name__, category="core::server", config=logger_config) config = replace_env_vars(config_contents) - config = StackRunConfig(**cast_image_name_to_string(config)) + config = StackConfig(**cast_image_name_to_string(config)) _log_run_config(run_config=config) @@ -506,7 +506,7 @@ def create_app() -> StackApp: return app -def _log_run_config(run_config: StackRunConfig): +def _log_run_config(run_config: StackConfig): """Logs the run config with redacted fields and disabled providers removed.""" logger.info("Run configuration:") safe_config = redact_sensitive_fields(run_config.model_dump(mode="json")) diff --git a/src/llama_stack/core/stack.py b/src/llama_stack/core/stack.py index 8ba1f2afd..554fae303 100644 --- a/src/llama_stack/core/stack.py +++ b/src/llama_stack/core/stack.py @@ -14,7 +14,7 @@ from typing import Any import yaml from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl -from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig +from llama_stack.core.datatypes import Provider, SafetyConfig, StackConfig, VectorStoresConfig from llama_stack.core.distribution import get_provider_registry from llama_stack.core.inspect import DistributionInspectConfig, DistributionInspectImpl from llama_stack.core.prompts.prompts import PromptServiceConfig, PromptServiceImpl @@ -108,7 +108,7 @@ REGISTRY_REFRESH_TASK = None TEST_RECORDING_CONTEXT = None -async def register_resources(run_config: StackRunConfig, impls: dict[Api, Any]): +async def register_resources(run_config: StackConfig, impls: dict[Api, Any]): for rsrc, api, register_method, list_method in RESOURCES: objects = getattr(run_config.registered_resources, rsrc) if api not in impls: @@ -341,7 +341,7 @@ def cast_image_name_to_string(config_dict: dict[str, Any]) -> dict[str, Any]: return config_dict -def add_internal_implementations(impls: dict[Api, Any], run_config: StackRunConfig) -> None: +def add_internal_implementations(impls: dict[Api, Any], run_config: StackConfig) -> None: """Add internal implementations (inspect and providers) to the implementations dictionary. Args: @@ -373,7 +373,7 @@ def add_internal_implementations(impls: dict[Api, Any], run_config: StackRunConf impls[Api.conversations] = conversations_impl -def _initialize_storage(run_config: StackRunConfig): +def _initialize_storage(run_config: StackConfig): kv_backends: dict[str, StorageBackendConfig] = {} sql_backends: dict[str, StorageBackendConfig] = {} for backend_name, backend_config in run_config.storage.backends.items(): @@ -393,7 +393,7 @@ def _initialize_storage(run_config: StackRunConfig): class Stack: - def __init__(self, run_config: StackRunConfig, provider_registry: ProviderRegistry | None = None): + def __init__(self, run_config: StackConfig, provider_registry: ProviderRegistry | None = None): self.run_config = run_config self.provider_registry = provider_registry self.impls = None @@ -499,7 +499,7 @@ async def refresh_registry_task(impls: dict[Api, Any]): await asyncio.sleep(REGISTRY_REFRESH_INTERVAL_SECONDS) -def get_stack_run_config_from_distro(distro: str) -> StackRunConfig: +def get_stack_run_config_from_distro(distro: str) -> StackConfig: distro_path = importlib.resources.files("llama_stack") / f"distributions/{distro}/run.yaml" with importlib.resources.as_file(distro_path) as path: @@ -507,12 +507,12 @@ def get_stack_run_config_from_distro(distro: str) -> StackRunConfig: raise ValueError(f"Distribution '{distro}' not found at {distro_path}") run_config = yaml.safe_load(path.open()) - return StackRunConfig(**replace_env_vars(run_config)) + return StackConfig(**replace_env_vars(run_config)) def run_config_from_adhoc_config_spec( adhoc_config_spec: str, provider_registry: ProviderRegistry | None = None -) -> StackRunConfig: +) -> StackConfig: """ Create an adhoc distribution from a list of API providers. @@ -552,7 +552,7 @@ def run_config_from_adhoc_config_spec( config=provider_config, ) ] - config = StackRunConfig( + config = StackConfig( image_name="distro-test", apis=list(provider_configs_by_api.keys()), providers=provider_configs_by_api, diff --git a/tests/backward_compat/test_run_config.py b/tests/backward_compat/test_run_config.py index 13aac85e4..ccc18c84f 100644 --- a/tests/backward_compat/test_run_config.py +++ b/tests/backward_compat/test_run_config.py @@ -17,7 +17,7 @@ from pathlib import Path import pytest import yaml -from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.datatypes import StackConfig def get_test_configs(): @@ -49,4 +49,4 @@ def test_load_run_config(config_file): with open(config_file) as f: config_data = yaml.safe_load(f) - StackRunConfig.model_validate(config_data) + StackConfig.model_validate(config_data) diff --git a/tests/integration/test_persistence_integration.py b/tests/integration/test_persistence_integration.py index e9b80dc0c..ff42b451f 100644 --- a/tests/integration/test_persistence_integration.py +++ b/tests/integration/test_persistence_integration.py @@ -6,7 +6,7 @@ import yaml -from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.datatypes import StackConfig from llama_stack.core.storage.datatypes import ( PostgresKVStoreConfig, PostgresSqlStoreConfig, @@ -20,7 +20,7 @@ def test_starter_distribution_config_loads_and_resolves(): with open("llama_stack/distributions/starter/run.yaml") as f: config_dict = yaml.safe_load(f) - config = StackRunConfig(**config_dict) + config = StackConfig(**config_dict) # Config should have named backends and explicit store references assert config.storage is not None @@ -50,7 +50,7 @@ def test_postgres_demo_distribution_config_loads(): with open("llama_stack/distributions/postgres-demo/run.yaml") as f: config_dict = yaml.safe_load(f) - config = StackRunConfig(**config_dict) + config = StackConfig(**config_dict) # Should have postgres backend assert config.storage is not None diff --git a/tests/unit/conversations/test_conversations.py b/tests/unit/conversations/test_conversations.py index 3f9df5fc0..b481be63c 100644 --- a/tests/unit/conversations/test_conversations.py +++ b/tests/unit/conversations/test_conversations.py @@ -16,7 +16,7 @@ from llama_stack.core.conversations.conversations import ( ConversationServiceConfig, ConversationServiceImpl, ) -from llama_stack.core.datatypes import StackRunConfig +from llama_stack.core.datatypes import StackConfig from llama_stack.core.storage.datatypes import ( ServerStoresConfig, SqliteSqlStoreConfig, @@ -44,7 +44,7 @@ async def service(): ), ) register_sqlstore_backends({"sql_test": storage.backends["sql_test"]}) - run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage) + run_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage) config = ConversationServiceConfig(run_config=run_config, policy=[]) service = ConversationServiceImpl(config, {}) @@ -151,7 +151,7 @@ async def test_policy_configuration(): ), ) register_sqlstore_backends({"sql_test": storage.backends["sql_test"]}) - run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage) + run_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage) config = ConversationServiceConfig(run_config=run_config, policy=restrictive_policy) service = ConversationServiceImpl(config, {}) diff --git a/tests/unit/core/test_stack_validation.py b/tests/unit/core/test_stack_validation.py index 5f75bc522..06c274a21 100644 --- a/tests/unit/core/test_stack_validation.py +++ b/tests/unit/core/test_stack_validation.py @@ -10,7 +10,7 @@ from unittest.mock import AsyncMock import pytest -from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, VectorStoresConfig +from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackConfig, VectorStoresConfig from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config from llama_stack.core.storage.datatypes import ServerStoresConfig, StorageConfig from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield @@ -19,7 +19,7 @@ from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, class TestVectorStoresValidation: async def test_validate_missing_model(self): """Test validation fails when model not found.""" - run_config = StackRunConfig( + run_config = StackConfig( image_name="test", providers={}, storage=StorageConfig( @@ -47,7 +47,7 @@ class TestVectorStoresValidation: async def test_validate_success(self): """Test validation passes with valid model.""" - run_config = StackRunConfig( + run_config = StackConfig( image_name="test", providers={}, storage=StorageConfig( diff --git a/tests/unit/core/test_storage_references.py b/tests/unit/core/test_storage_references.py index 7bceba74d..68afa5b73 100644 --- a/tests/unit/core/test_storage_references.py +++ b/tests/unit/core/test_storage_references.py @@ -11,7 +11,7 @@ from pydantic import ValidationError from llama_stack.core.datatypes import ( LLAMA_STACK_RUN_CONFIG_VERSION, - StackRunConfig, + StackConfig, ) from llama_stack.core.storage.datatypes import ( InferenceStoreReference, @@ -51,7 +51,7 @@ def _base_run_config(**overrides): ), ), ) - return StackRunConfig( + return StackConfig( version=LLAMA_STACK_RUN_CONFIG_VERSION, image_name="test-distro", apis=[], diff --git a/tests/unit/distribution/test_distribution.py b/tests/unit/distribution/test_distribution.py index b8d6ba55d..b8ff484a7 100644 --- a/tests/unit/distribution/test_distribution.py +++ b/tests/unit/distribution/test_distribution.py @@ -11,7 +11,7 @@ import pytest import yaml from pydantic import BaseModel, Field, ValidationError -from llama_stack.core.datatypes import Api, Provider, StackRunConfig +from llama_stack.core.datatypes import Api, Provider, StackConfig from llama_stack.core.distribution import INTERNAL_APIS, get_provider_registry, providable_apis from llama_stack.core.storage.datatypes import ( InferenceStoreReference, @@ -53,7 +53,7 @@ def _default_storage() -> StorageConfig: ) -def make_stack_config(**overrides) -> StackRunConfig: +def make_stack_config(**overrides) -> StackConfig: storage = overrides.pop("storage", _default_storage()) defaults = dict( image_name="test_image", @@ -62,7 +62,7 @@ def make_stack_config(**overrides) -> StackRunConfig: storage=storage, ) defaults.update(overrides) - return StackRunConfig(**defaults) + return StackConfig(**defaults) @pytest.fixture diff --git a/tests/unit/prompts/prompts/conftest.py b/tests/unit/prompts/prompts/conftest.py index 8bfc1f03c..8ed5b429a 100644 --- a/tests/unit/prompts/prompts/conftest.py +++ b/tests/unit/prompts/prompts/conftest.py @@ -27,7 +27,7 @@ async def temp_prompt_store(tmp_path_factory): temp_dir = tmp_path_factory.getbasetemp() db_path = str(temp_dir / f"{unique_id}.db") - from llama_stack.core.datatypes import StackRunConfig + from llama_stack.core.datatypes import StackConfig storage = StorageConfig( backends={ @@ -41,7 +41,7 @@ async def temp_prompt_store(tmp_path_factory): prompts=KVStoreReference(backend="kv_test", namespace="prompts"), ), ) - mock_run_config = StackRunConfig( + mock_run_config = StackConfig( image_name="test-distribution", apis=[], providers={}, diff --git a/tests/unit/server/test_resolver.py b/tests/unit/server/test_resolver.py index a1b03f630..5ba4b5cfc 100644 --- a/tests/unit/server/test_resolver.py +++ b/tests/unit/server/test_resolver.py @@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, MagicMock from pydantic import BaseModel, Field -from llama_stack.core.datatypes import Api, Provider, StackRunConfig +from llama_stack.core.datatypes import Api, Provider, StackConfig from llama_stack.core.resolver import resolve_impls from llama_stack.core.routers.inference import InferenceRouter from llama_stack.core.routing_tables.models import ModelsRoutingTable @@ -71,7 +71,7 @@ class SampleImpl: pass -def make_run_config(**overrides) -> StackRunConfig: +def make_run_config(**overrides) -> StackConfig: storage = overrides.pop( "storage", StorageConfig( @@ -97,7 +97,7 @@ def make_run_config(**overrides) -> StackRunConfig: storage=storage, ) defaults.update(overrides) - return StackRunConfig(**defaults) + return StackConfig(**defaults) async def test_resolve_impls_basic():