fix: rename StackRunConfig to StackConfig

since this object represents our config for list-deps, run, etc lets rename it to simply `StackConfig`

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-24 11:32:53 -05:00
parent 17f8ab31b5
commit 4a3f9151e3
23 changed files with 72 additions and 72 deletions

View file

@ -17,7 +17,7 @@ from pathlib import Path
import pytest
import yaml
from llama_stack.core.datatypes import StackRunConfig
from llama_stack.core.datatypes import StackConfig
def get_test_configs():
@ -49,4 +49,4 @@ def test_load_run_config(config_file):
with open(config_file) as f:
config_data = yaml.safe_load(f)
StackRunConfig.model_validate(config_data)
StackConfig.model_validate(config_data)

View file

@ -6,7 +6,7 @@
import yaml
from llama_stack.core.datatypes import StackRunConfig
from llama_stack.core.datatypes import StackConfig
from llama_stack.core.storage.datatypes import (
PostgresKVStoreConfig,
PostgresSqlStoreConfig,
@ -20,7 +20,7 @@ def test_starter_distribution_config_loads_and_resolves():
with open("llama_stack/distributions/starter/run.yaml") as f:
config_dict = yaml.safe_load(f)
config = StackRunConfig(**config_dict)
config = StackConfig(**config_dict)
# Config should have named backends and explicit store references
assert config.storage is not None
@ -50,7 +50,7 @@ def test_postgres_demo_distribution_config_loads():
with open("llama_stack/distributions/postgres-demo/run.yaml") as f:
config_dict = yaml.safe_load(f)
config = StackRunConfig(**config_dict)
config = StackConfig(**config_dict)
# Should have postgres backend
assert config.storage is not None

View file

@ -16,7 +16,7 @@ from llama_stack.core.conversations.conversations import (
ConversationServiceConfig,
ConversationServiceImpl,
)
from llama_stack.core.datatypes import StackRunConfig
from llama_stack.core.datatypes import StackConfig
from llama_stack.core.storage.datatypes import (
ServerStoresConfig,
SqliteSqlStoreConfig,
@ -44,7 +44,7 @@ async def service():
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage)
run_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage)
config = ConversationServiceConfig(run_config=run_config, policy=[])
service = ConversationServiceImpl(config, {})
@ -151,7 +151,7 @@ async def test_policy_configuration():
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage)
run_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage)
config = ConversationServiceConfig(run_config=run_config, policy=restrictive_policy)
service = ConversationServiceImpl(config, {})

View file

@ -10,7 +10,7 @@ from unittest.mock import AsyncMock
import pytest
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, VectorStoresConfig
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackConfig, VectorStoresConfig
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config
from llama_stack.core.storage.datatypes import ServerStoresConfig, StorageConfig
from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
@ -19,7 +19,7 @@ from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model,
class TestVectorStoresValidation:
async def test_validate_missing_model(self):
"""Test validation fails when model not found."""
run_config = StackRunConfig(
run_config = StackConfig(
image_name="test",
providers={},
storage=StorageConfig(
@ -47,7 +47,7 @@ class TestVectorStoresValidation:
async def test_validate_success(self):
"""Test validation passes with valid model."""
run_config = StackRunConfig(
run_config = StackConfig(
image_name="test",
providers={},
storage=StorageConfig(

View file

@ -11,7 +11,7 @@ from pydantic import ValidationError
from llama_stack.core.datatypes import (
LLAMA_STACK_RUN_CONFIG_VERSION,
StackRunConfig,
StackConfig,
)
from llama_stack.core.storage.datatypes import (
InferenceStoreReference,
@ -51,7 +51,7 @@ def _base_run_config(**overrides):
),
),
)
return StackRunConfig(
return StackConfig(
version=LLAMA_STACK_RUN_CONFIG_VERSION,
image_name="test-distro",
apis=[],

View file

@ -11,7 +11,7 @@ import pytest
import yaml
from pydantic import BaseModel, Field, ValidationError
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
from llama_stack.core.datatypes import Api, Provider, StackConfig
from llama_stack.core.distribution import INTERNAL_APIS, get_provider_registry, providable_apis
from llama_stack.core.storage.datatypes import (
InferenceStoreReference,
@ -53,7 +53,7 @@ def _default_storage() -> StorageConfig:
)
def make_stack_config(**overrides) -> StackRunConfig:
def make_stack_config(**overrides) -> StackConfig:
storage = overrides.pop("storage", _default_storage())
defaults = dict(
image_name="test_image",
@ -62,7 +62,7 @@ def make_stack_config(**overrides) -> StackRunConfig:
storage=storage,
)
defaults.update(overrides)
return StackRunConfig(**defaults)
return StackConfig(**defaults)
@pytest.fixture

View file

@ -27,7 +27,7 @@ async def temp_prompt_store(tmp_path_factory):
temp_dir = tmp_path_factory.getbasetemp()
db_path = str(temp_dir / f"{unique_id}.db")
from llama_stack.core.datatypes import StackRunConfig
from llama_stack.core.datatypes import StackConfig
storage = StorageConfig(
backends={
@ -41,7 +41,7 @@ async def temp_prompt_store(tmp_path_factory):
prompts=KVStoreReference(backend="kv_test", namespace="prompts"),
),
)
mock_run_config = StackRunConfig(
mock_run_config = StackConfig(
image_name="test-distribution",
apis=[],
providers={},

View file

@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, MagicMock
from pydantic import BaseModel, Field
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
from llama_stack.core.datatypes import Api, Provider, StackConfig
from llama_stack.core.resolver import resolve_impls
from llama_stack.core.routers.inference import InferenceRouter
from llama_stack.core.routing_tables.models import ModelsRoutingTable
@ -71,7 +71,7 @@ class SampleImpl:
pass
def make_run_config(**overrides) -> StackRunConfig:
def make_run_config(**overrides) -> StackConfig:
storage = overrides.pop(
"storage",
StorageConfig(
@ -97,7 +97,7 @@ def make_run_config(**overrides) -> StackRunConfig:
storage=storage,
)
defaults.update(overrides)
return StackRunConfig(**defaults)
return StackConfig(**defaults)
async def test_resolve_impls_basic():