mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 00:32:38 +00:00
feat: remove usage of build yaml (#4192)
Some checks failed
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 4s
Test Llama Stack Build / generate-matrix (push) Failing after 3s
Test Llama Stack Build / build (push) Has been skipped
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test llama stack list-deps / generate-matrix (push) Failing after 3s
Test llama stack list-deps / list-deps (push) Has been skipped
API Conformance Tests / check-schema-compatibility (push) Successful in 11s
Python Package Build Test / build (3.13) (push) Successful in 19s
Python Package Build Test / build (3.12) (push) Successful in 23s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / show-single-provider (push) Successful in 36s
Test llama stack list-deps / list-deps-from-config (push) Successful in 44s
Vector IO Integration Tests / test-matrix (push) Failing after 57s
Test External API and Providers / test-external (venv) (push) Failing after 1m37s
Unit Tests / unit-tests (3.12) (push) Failing after 1m56s
UI Tests / ui-tests (22) (push) Successful in 2m2s
Unit Tests / unit-tests (3.13) (push) Failing after 2m35s
Pre-commit / pre-commit (22) (push) Successful in 3m16s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 3m34s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 3m59s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 4m30s
Some checks failed
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 4s
Test Llama Stack Build / generate-matrix (push) Failing after 3s
Test Llama Stack Build / build (push) Has been skipped
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test llama stack list-deps / generate-matrix (push) Failing after 3s
Test llama stack list-deps / list-deps (push) Has been skipped
API Conformance Tests / check-schema-compatibility (push) Successful in 11s
Python Package Build Test / build (3.13) (push) Successful in 19s
Python Package Build Test / build (3.12) (push) Successful in 23s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / show-single-provider (push) Successful in 36s
Test llama stack list-deps / list-deps-from-config (push) Successful in 44s
Vector IO Integration Tests / test-matrix (push) Failing after 57s
Test External API and Providers / test-external (venv) (push) Failing after 1m37s
Unit Tests / unit-tests (3.12) (push) Failing after 1m56s
UI Tests / ui-tests (22) (push) Successful in 2m2s
Unit Tests / unit-tests (3.13) (push) Failing after 2m35s
Pre-commit / pre-commit (22) (push) Successful in 3m16s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 3m34s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 3m59s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 4m30s
# What does this PR do? the build.yaml is only used in the following ways: 1. list-deps 2. distribution code-gen since `llama stack build` no longer exists, I found myself asking "why do we need two different files for list-deps and run"? Removing the BuildConfig and altering the usage of the DistributionTemplate in llama stack list-deps is the first step in removing the build yaml entirely. Removing the BuildConfig and build.yaml cuts the files users need to maintain in half, and allows us to focus on the stability of _just_ the run.yaml This PR removes the build.yaml, BuildConfig datatype, and its usage throughout the codebase. Users are now expected to point to run.yaml files when running list-deps, and our codebase automatically uses these types now for things like `get_provider_registry`. **Additionally, two renames: `StackRunConfig` -> `StackConfig` and `run.yaml` -> `config.yaml`.** The build.yaml made sense for when we were managing the build process for the user and actually _producing_ a run.yaml _from_ the build.yaml, but now that we are simply just getting the provider registry and listing the deps, switching to config.yaml simplifies the scope here greatly. ## Test Plan existing list-deps usage should work in the tests. --------- Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
17e6912288
commit
661985e240
103 changed files with 972 additions and 1031 deletions
|
|
@ -299,7 +299,7 @@ def test_providers_flag_generates_config_with_api_keys():
|
|||
# Read the generated config file
|
||||
from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR
|
||||
|
||||
config_file = DISTRIBS_BASE_DIR / "providers-run" / "run.yaml"
|
||||
config_file = DISTRIBS_BASE_DIR / "providers-run" / "config.yaml"
|
||||
with open(config_file) as f:
|
||||
config_dict = yaml.safe_load(f)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ from llama_stack.core.conversations.conversations import (
|
|||
ConversationServiceConfig,
|
||||
ConversationServiceImpl,
|
||||
)
|
||||
from llama_stack.core.datatypes import StackRunConfig
|
||||
from llama_stack.core.datatypes import StackConfig
|
||||
from llama_stack.core.storage.datatypes import (
|
||||
ServerStoresConfig,
|
||||
SqliteSqlStoreConfig,
|
||||
|
|
@ -44,9 +44,9 @@ async def service():
|
|||
),
|
||||
)
|
||||
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
|
||||
run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage)
|
||||
stack_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage)
|
||||
|
||||
config = ConversationServiceConfig(run_config=run_config, policy=[])
|
||||
config = ConversationServiceConfig(config=stack_config, policy=[])
|
||||
service = ConversationServiceImpl(config, {})
|
||||
await service.initialize()
|
||||
yield service
|
||||
|
|
@ -151,9 +151,9 @@ async def test_policy_configuration():
|
|||
),
|
||||
)
|
||||
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
|
||||
run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage)
|
||||
stack_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage)
|
||||
|
||||
config = ConversationServiceConfig(run_config=run_config, policy=restrictive_policy)
|
||||
config = ConversationServiceConfig(config=stack_config, policy=restrictive_policy)
|
||||
service = ConversationServiceImpl(config, {})
|
||||
await service.initialize()
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from unittest.mock import AsyncMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, VectorStoresConfig
|
||||
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackConfig, VectorStoresConfig
|
||||
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config
|
||||
from llama_stack.core.storage.datatypes import ServerStoresConfig, StorageConfig
|
||||
from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
|
||||
|
|
@ -19,7 +19,7 @@ from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model,
|
|||
class TestVectorStoresValidation:
|
||||
async def test_validate_missing_model(self):
|
||||
"""Test validation fails when model not found."""
|
||||
run_config = StackRunConfig(
|
||||
run_config = StackConfig(
|
||||
image_name="test",
|
||||
providers={},
|
||||
storage=StorageConfig(
|
||||
|
|
@ -47,7 +47,7 @@ class TestVectorStoresValidation:
|
|||
|
||||
async def test_validate_success(self):
|
||||
"""Test validation passes with valid model."""
|
||||
run_config = StackRunConfig(
|
||||
run_config = StackConfig(
|
||||
image_name="test",
|
||||
providers={},
|
||||
storage=StorageConfig(
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from pydantic import ValidationError
|
|||
|
||||
from llama_stack.core.datatypes import (
|
||||
LLAMA_STACK_RUN_CONFIG_VERSION,
|
||||
StackRunConfig,
|
||||
StackConfig,
|
||||
)
|
||||
from llama_stack.core.storage.datatypes import (
|
||||
InferenceStoreReference,
|
||||
|
|
@ -51,7 +51,7 @@ def _base_run_config(**overrides):
|
|||
),
|
||||
),
|
||||
)
|
||||
return StackRunConfig(
|
||||
return StackConfig(
|
||||
version=LLAMA_STACK_RUN_CONFIG_VERSION,
|
||||
image_name="test-distro",
|
||||
apis=[],
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import pytest
|
|||
import yaml
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
|
||||
from llama_stack.core.datatypes import Api, Provider, StackConfig
|
||||
from llama_stack.core.distribution import INTERNAL_APIS, get_provider_registry, providable_apis
|
||||
from llama_stack.core.storage.datatypes import (
|
||||
InferenceStoreReference,
|
||||
|
|
@ -53,7 +53,7 @@ def _default_storage() -> StorageConfig:
|
|||
)
|
||||
|
||||
|
||||
def make_stack_config(**overrides) -> StackRunConfig:
|
||||
def make_stack_config(**overrides) -> StackConfig:
|
||||
storage = overrides.pop("storage", _default_storage())
|
||||
defaults = dict(
|
||||
image_name="test_image",
|
||||
|
|
@ -62,7 +62,7 @@ def make_stack_config(**overrides) -> StackRunConfig:
|
|||
storage=storage,
|
||||
)
|
||||
defaults.update(overrides)
|
||||
return StackRunConfig(**defaults)
|
||||
return StackConfig(**defaults)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
@ -270,7 +270,7 @@ class TestProviderRegistry:
|
|||
external_providers_dir="/nonexistent/dir",
|
||||
)
|
||||
with pytest.raises(FileNotFoundError):
|
||||
get_provider_registry(config)
|
||||
get_provider_registry(config=config)
|
||||
|
||||
def test_empty_api_directory(self, api_directories, mock_providers, base_config):
|
||||
"""Test handling of empty API directory."""
|
||||
|
|
@ -339,7 +339,7 @@ pip_packages:
|
|||
]
|
||||
},
|
||||
)
|
||||
registry = get_provider_registry(config)
|
||||
registry = get_provider_registry(config=config)
|
||||
assert Api.inference in registry
|
||||
assert "external_test" in registry[Api.inference]
|
||||
provider = registry[Api.inference]["external_test"]
|
||||
|
|
@ -368,7 +368,7 @@ pip_packages:
|
|||
},
|
||||
)
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
get_provider_registry(config)
|
||||
get_provider_registry(config=config)
|
||||
assert "get_provider_spec not found" in str(exc_info.value)
|
||||
|
||||
def test_external_provider_from_module_missing_get_provider_spec(self, mock_providers):
|
||||
|
|
@ -391,31 +391,29 @@ pip_packages:
|
|||
},
|
||||
)
|
||||
with pytest.raises(AttributeError):
|
||||
get_provider_registry(config)
|
||||
get_provider_registry(config=config)
|
||||
|
||||
def test_external_provider_from_module_building(self, mock_providers):
|
||||
"""Test loading an external provider from a module during build (building=True, partial spec)."""
|
||||
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
|
||||
def test_external_provider_from_module_listing(self, mock_providers):
|
||||
"""Test loading an external provider from a module during list-deps (listing=True, partial spec)."""
|
||||
from llama_stack.core.datatypes import StackConfig
|
||||
from llama_stack_api import Api
|
||||
|
||||
# No importlib patch needed, should not import module when type of `config` is BuildConfig or DistributionSpec
|
||||
build_config = BuildConfig(
|
||||
version=2,
|
||||
image_type="container",
|
||||
# No importlib patch needed, should not import module when listing
|
||||
config = StackConfig(
|
||||
image_name="test_image",
|
||||
distribution_spec=DistributionSpec(
|
||||
description="test",
|
||||
providers={
|
||||
"inference": [
|
||||
BuildProvider(
|
||||
provider_type="external_test",
|
||||
module="external_test",
|
||||
)
|
||||
]
|
||||
},
|
||||
),
|
||||
apis=[],
|
||||
providers={
|
||||
"inference": [
|
||||
Provider(
|
||||
provider_id="external_test",
|
||||
provider_type="external_test",
|
||||
config={},
|
||||
module="external_test",
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
registry = get_provider_registry(build_config)
|
||||
registry = get_provider_registry(config=config, listing=True)
|
||||
assert Api.inference in registry
|
||||
assert "external_test" in registry[Api.inference]
|
||||
provider = registry[Api.inference]["external_test"]
|
||||
|
|
@ -448,7 +446,7 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
# Should not add anything to registry
|
||||
assert len(result[Api.inference]) == 0
|
||||
|
||||
|
|
@ -487,36 +485,34 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
assert "versioned_test" in result[Api.inference]
|
||||
assert result[Api.inference]["versioned_test"].module == "versioned_test==1.0.0"
|
||||
|
||||
def test_buildconfig_does_not_import_module(self, mock_providers):
|
||||
"""Test that BuildConfig does not import the module (building=True)."""
|
||||
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
|
||||
"""Test that StackConfig does not import the module when listing (listing=True)."""
|
||||
from llama_stack.core.datatypes import StackConfig
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
|
||||
build_config = BuildConfig(
|
||||
version=2,
|
||||
image_type="container",
|
||||
config = StackConfig(
|
||||
image_name="test_image",
|
||||
distribution_spec=DistributionSpec(
|
||||
description="test",
|
||||
providers={
|
||||
"inference": [
|
||||
BuildProvider(
|
||||
provider_type="build_test",
|
||||
module="build_test==1.0.0",
|
||||
)
|
||||
]
|
||||
},
|
||||
),
|
||||
apis=[],
|
||||
providers={
|
||||
"inference": [
|
||||
Provider(
|
||||
provider_id="build_test",
|
||||
provider_type="build_test",
|
||||
config={},
|
||||
module="build_test==1.0.0",
|
||||
)
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
# Should not call import_module at all when building
|
||||
# Should not call import_module at all when listing
|
||||
with patch("importlib.import_module") as mock_import:
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, build_config, building=True)
|
||||
result = get_external_providers_from_module(registry, config, listing=True)
|
||||
|
||||
# Verify module was NOT imported
|
||||
mock_import.assert_not_called()
|
||||
|
|
@ -530,35 +526,31 @@ class TestGetExternalProvidersFromModule:
|
|||
assert provider.api == Api.inference
|
||||
|
||||
def test_buildconfig_multiple_providers(self, mock_providers):
|
||||
"""Test BuildConfig with multiple providers for the same API."""
|
||||
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
|
||||
"""Test StackConfig with multiple providers for the same API."""
|
||||
from llama_stack.core.datatypes import StackConfig
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
|
||||
build_config = BuildConfig(
|
||||
version=2,
|
||||
image_type="container",
|
||||
config = StackConfig(
|
||||
image_name="test_image",
|
||||
distribution_spec=DistributionSpec(
|
||||
description="test",
|
||||
providers={
|
||||
"inference": [
|
||||
BuildProvider(provider_type="provider1", module="provider1"),
|
||||
BuildProvider(provider_type="provider2", module="provider2"),
|
||||
]
|
||||
},
|
||||
),
|
||||
apis=[],
|
||||
providers={
|
||||
"inference": [
|
||||
Provider(provider_id="provider1", provider_type="provider1", config={}, module="provider1"),
|
||||
Provider(provider_id="provider2", provider_type="provider2", config={}, module="provider2"),
|
||||
]
|
||||
},
|
||||
)
|
||||
|
||||
with patch("importlib.import_module") as mock_import:
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, build_config, building=True)
|
||||
result = get_external_providers_from_module(registry, config, listing=True)
|
||||
|
||||
mock_import.assert_not_called()
|
||||
assert "provider1" in result[Api.inference]
|
||||
assert "provider2" in result[Api.inference]
|
||||
|
||||
def test_distributionspec_does_not_import_module(self, mock_providers):
|
||||
"""Test that DistributionSpec does not import the module (building=True)."""
|
||||
"""Test that DistributionSpec does not import the module (listing=True)."""
|
||||
from llama_stack.core.datatypes import BuildProvider, DistributionSpec
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
|
||||
|
|
@ -574,10 +566,10 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
|
||||
# Should not call import_module at all when building
|
||||
# Should not call import_module at all when listing
|
||||
with patch("importlib.import_module") as mock_import:
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, dist_spec, building=True)
|
||||
result = get_external_providers_from_module(registry, dist_spec, listing=True)
|
||||
|
||||
# Verify module was NOT imported
|
||||
mock_import.assert_not_called()
|
||||
|
|
@ -631,7 +623,7 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
# Only the matching provider_type should be added
|
||||
assert "list_test" in result[Api.inference]
|
||||
|
|
@ -679,7 +671,7 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
# Only the matching provider_type should be added
|
||||
assert "wanted" in result[Api.inference]
|
||||
|
|
@ -734,7 +726,7 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
# Both provider types should be added to registry
|
||||
assert "remote::ollama" in result[Api.inference]
|
||||
|
|
@ -768,7 +760,7 @@ class TestGetExternalProvidersFromModule:
|
|||
registry = {Api.inference: {}}
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
get_external_providers_from_module(registry, config, building=False)
|
||||
get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
assert "get_provider_spec not found" in str(exc_info.value)
|
||||
|
||||
|
|
@ -805,7 +797,7 @@ class TestGetExternalProvidersFromModule:
|
|||
registry = {Api.inference: {}}
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
get_external_providers_from_module(registry, config, building=False)
|
||||
get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
assert "Something went wrong" in str(exc_info.value)
|
||||
|
||||
|
|
@ -818,7 +810,7 @@ class TestGetExternalProvidersFromModule:
|
|||
providers={},
|
||||
)
|
||||
registry = {Api.inference: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
# Should return registry unchanged
|
||||
assert result == registry
|
||||
|
|
@ -874,7 +866,7 @@ class TestGetExternalProvidersFromModule:
|
|||
},
|
||||
)
|
||||
registry = {Api.inference: {}, Api.safety: {}}
|
||||
result = get_external_providers_from_module(registry, config, building=False)
|
||||
result = get_external_providers_from_module(registry, config, listing=False)
|
||||
|
||||
assert "inf_test" in result[Api.inference]
|
||||
assert "safe_test" in result[Api.safety]
|
||||
|
|
|
|||
|
|
@ -31,8 +31,7 @@ def mock_distribs_base_dir(tmp_path):
|
|||
# Create a custom distribution
|
||||
starter_custom = custom_dir / "starter"
|
||||
starter_custom.mkdir()
|
||||
(starter_custom / "starter-build.yaml").write_text("# build config")
|
||||
(starter_custom / "starter-run.yaml").write_text("# run config")
|
||||
(starter_custom / "starter-config.yaml").write_text("# config")
|
||||
|
||||
return custom_dir
|
||||
|
||||
|
|
@ -47,8 +46,7 @@ def mock_distro_dir(tmp_path):
|
|||
for distro_name in ["starter", "nvidia", "dell"]:
|
||||
distro_path = distro_dir / distro_name
|
||||
distro_path.mkdir()
|
||||
(distro_path / "build.yaml").write_text("# build config")
|
||||
(distro_path / "run.yaml").write_text("# run config")
|
||||
(distro_path / "config.yaml").write_text("# config")
|
||||
|
||||
return distro_dir
|
||||
|
||||
|
|
@ -112,7 +110,7 @@ class TestStackList:
|
|||
# Add a hidden directory
|
||||
hidden_dir = mock_distro_dir / ".hidden"
|
||||
hidden_dir.mkdir()
|
||||
(hidden_dir / "build.yaml").write_text("# build")
|
||||
(hidden_dir / "config.yaml").write_text("# config")
|
||||
|
||||
# Add a __pycache__ directory
|
||||
pycache_dir = mock_distro_dir / "__pycache__"
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ async def temp_prompt_store(tmp_path_factory):
|
|||
temp_dir = tmp_path_factory.getbasetemp()
|
||||
db_path = str(temp_dir / f"{unique_id}.db")
|
||||
|
||||
from llama_stack.core.datatypes import StackRunConfig
|
||||
from llama_stack.core.datatypes import StackConfig
|
||||
|
||||
storage = StorageConfig(
|
||||
backends={
|
||||
|
|
@ -41,13 +41,13 @@ async def temp_prompt_store(tmp_path_factory):
|
|||
prompts=KVStoreReference(backend="kv_test", namespace="prompts"),
|
||||
),
|
||||
)
|
||||
mock_run_config = StackRunConfig(
|
||||
mock_run_config = StackConfig(
|
||||
image_name="test-distribution",
|
||||
apis=[],
|
||||
providers={},
|
||||
storage=storage,
|
||||
)
|
||||
config = PromptServiceConfig(run_config=mock_run_config)
|
||||
config = PromptServiceConfig(config=mock_run_config)
|
||||
store = PromptServiceImpl(config, deps={})
|
||||
|
||||
register_kvstore_backends({"kv_test": storage.backends["kv_test"]})
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
|
||||
from llama_stack.core.datatypes import Api, Provider, StackConfig
|
||||
from llama_stack.core.resolver import resolve_impls
|
||||
from llama_stack.core.routers.inference import InferenceRouter
|
||||
from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
||||
|
|
@ -71,7 +71,7 @@ class SampleImpl:
|
|||
pass
|
||||
|
||||
|
||||
def make_run_config(**overrides) -> StackRunConfig:
|
||||
def make_run_config(**overrides) -> StackConfig:
|
||||
storage = overrides.pop(
|
||||
"storage",
|
||||
StorageConfig(
|
||||
|
|
@ -97,7 +97,7 @@ def make_run_config(**overrides) -> StackRunConfig:
|
|||
storage=storage,
|
||||
)
|
||||
defaults.update(overrides)
|
||||
return StackRunConfig(**defaults)
|
||||
return StackConfig(**defaults)
|
||||
|
||||
|
||||
async def test_resolve_impls_basic():
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue