fix: rename some more usages

change some more run.yaml into config.yaml references, alter some parameter names, etc

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-26 15:52:21 -05:00
parent 0cd98c957e
commit f05d5138e8
10 changed files with 26 additions and 26 deletions

View file

@ -44,14 +44,14 @@ jobs:
- name: Print distro dependencies - name: Print distro dependencies
run: | run: |
uv run --no-sync llama stack list-deps tests/external/run-byoa.yaml uv run --no-sync llama stack list-deps tests/external/config.yaml
- name: Build distro from config file - name: Build distro from config file
run: | run: |
uv venv ci-test uv venv ci-test
source ci-test/bin/activate source ci-test/bin/activate
uv pip install -e . uv pip install -e .
LLAMA_STACK_LOGGING=all=CRITICAL llama stack list-deps tests/external/run-byoa.yaml | xargs -L1 uv pip install LLAMA_STACK_LOGGING=all=CRITICAL llama stack list-deps tests/external/config.yaml | xargs -L1 uv pip install
- name: Start Llama Stack server in background - name: Start Llama Stack server in background
if: ${{ matrix.image-type }} == 'venv' if: ${{ matrix.image-type }} == 'venv'
@ -62,7 +62,7 @@ jobs:
# Use the virtual environment created by the build step (name comes from build config) # Use the virtual environment created by the build step (name comes from build config)
source ci-test/bin/activate source ci-test/bin/activate
uv pip list uv pip list
nohup llama stack run tests/external/run-byoa.yaml > server.log 2>&1 & nohup llama stack run tests/external/config.yaml > server.log 2>&1 &
- name: Wait for Llama Stack server to be ready - name: Wait for Llama Stack server to be ready
run: | run: |

View file

@ -337,7 +337,7 @@ uv pip install -e .
7. Configure Llama Stack to use the provider: 7. Configure Llama Stack to use the provider:
```yaml ```yaml
# ~/.llama/run-byoa.yaml # ~/.llama/config.yaml
version: "2" version: "2"
image_name: "llama-stack-api-weather" image_name: "llama-stack-api-weather"
apis: apis:
@ -356,7 +356,7 @@ server:
8. Run the server: 8. Run the server:
```bash ```bash
llama stack run ~/.llama/run-byoa.yaml llama stack run ~/.llama/config.yaml
``` ```
9. Test the API: 9. Test the API:

View file

@ -78,7 +78,7 @@ def run_stack_list_deps_command(args: argparse.Namespace) -> None:
with open(config_file) as f: with open(config_file) as f:
try: try:
contents = yaml.safe_load(f) contents = yaml.safe_load(f)
run_config = StackConfig(**contents) config = StackConfig(**contents)
except Exception as e: except Exception as e:
cprint( cprint(
f"Could not parse config file {config_file}: {e}", f"Could not parse config file {config_file}: {e}",
@ -119,16 +119,16 @@ def run_stack_list_deps_command(args: argparse.Namespace) -> None:
file=sys.stderr, file=sys.stderr,
) )
sys.exit(1) sys.exit(1)
run_config = StackConfig(providers=provider_list, image_name="providers-run") config = StackConfig(providers=provider_list, image_name="providers-run")
normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(run_config) normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(config)
normal_deps += SERVER_DEPENDENCIES normal_deps += SERVER_DEPENDENCIES
# Add external API dependencies # Add external API dependencies
if run_config.external_apis_dir: if config.external_apis_dir:
from llama_stack.core.external import load_external_apis from llama_stack.core.external import load_external_apis
external_apis = load_external_apis(run_config) external_apis = load_external_apis(config)
if external_apis: if external_apis:
for _, api_spec in external_apis.items(): for _, api_spec in external_apis.items():
normal_deps.extend(api_spec.pip_packages) normal_deps.extend(api_spec.pip_packages)

View file

@ -36,7 +36,7 @@ class ConversationServiceConfig(BaseModel):
:param policy: Access control rules :param policy: Access control rules
""" """
run_config: StackConfig config: StackConfig
policy: list[AccessRule] = [] policy: list[AccessRule] = []
@ -56,7 +56,7 @@ class ConversationServiceImpl(Conversations):
self.policy = config.policy self.policy = config.policy
# Use conversations store reference from run config # Use conversations store reference from run config
conversations_ref = config.run_config.storage.stores.conversations conversations_ref = config.config.storage.stores.conversations
if not conversations_ref: if not conversations_ref:
raise ValueError("storage.stores.conversations must be configured in run config") raise ValueError("storage.stores.conversations must be configured in run config")

View file

@ -22,7 +22,7 @@ from llama_stack_api import (
class DistributionInspectConfig(BaseModel): class DistributionInspectConfig(BaseModel):
run_config: StackConfig config: StackConfig
async def get_provider_impl(config, deps): async def get_provider_impl(config, deps):
@ -40,7 +40,7 @@ class DistributionInspectImpl(Inspect):
pass pass
async def list_routes(self, api_filter: str | None = None) -> ListRoutesResponse: async def list_routes(self, api_filter: str | None = None) -> ListRoutesResponse:
run_config: StackConfig = self.config.run_config config: StackConfig = self.config.config
# Helper function to determine if a route should be included based on api_filter # Helper function to determine if a route should be included based on api_filter
def should_include_route(webmethod) -> bool: def should_include_route(webmethod) -> bool:
@ -55,7 +55,7 @@ class DistributionInspectImpl(Inspect):
return not webmethod.deprecated and webmethod.level == api_filter return not webmethod.deprecated and webmethod.level == api_filter
ret = [] ret = []
external_apis = load_external_apis(run_config) external_apis = load_external_apis(config)
all_endpoints = get_all_api_routes(external_apis) all_endpoints = get_all_api_routes(external_apis)
for api, endpoints in all_endpoints.items(): for api, endpoints in all_endpoints.items():
# Always include provider and inspect APIs, filter others based on run config # Always include provider and inspect APIs, filter others based on run config
@ -72,7 +72,7 @@ class DistributionInspectImpl(Inspect):
] ]
) )
else: else:
providers = run_config.providers.get(api.value, []) providers = config.providers.get(api.value, [])
if providers: # Only process if there are providers for this API if providers: # Only process if there are providers for this API
ret.extend( ret.extend(
[ [

View file

@ -20,7 +20,7 @@ class PromptServiceConfig(BaseModel):
:param run_config: Stack run configuration containing distribution info :param run_config: Stack run configuration containing distribution info
""" """
run_config: StackConfig config: StackConfig
async def get_provider_impl(config: PromptServiceConfig, deps: dict[Any, Any]): async def get_provider_impl(config: PromptServiceConfig, deps: dict[Any, Any]):
@ -40,7 +40,7 @@ class PromptServiceImpl(Prompts):
async def initialize(self) -> None: async def initialize(self) -> None:
# Use prompts store reference from run config # Use prompts store reference from run config
prompts_ref = self.config.run_config.storage.stores.prompts prompts_ref = self.config.config.storage.stores.prompts
if not prompts_ref: if not prompts_ref:
raise ValueError("storage.stores.prompts must be configured in run config") raise ValueError("storage.stores.prompts must be configured in run config")
self.kvstore = await kvstore_impl(prompts_ref) self.kvstore = await kvstore_impl(prompts_ref)

View file

@ -19,7 +19,7 @@ logger = get_logger(name=__name__, category="core")
class ProviderImplConfig(BaseModel): class ProviderImplConfig(BaseModel):
run_config: StackConfig config: StackConfig
async def get_provider_impl(config, deps): async def get_provider_impl(config, deps):
@ -41,7 +41,7 @@ class ProviderImpl(Providers):
pass pass
async def list_providers(self) -> ListProvidersResponse: async def list_providers(self) -> ListProvidersResponse:
run_config = self.config.run_config run_config = self.config
safe_config = StackConfig(**redact_sensitive_fields(run_config.model_dump())) safe_config = StackConfig(**redact_sensitive_fields(run_config.model_dump()))
providers_health = await self.get_providers_health() providers_health = await self.get_providers_health()
ret = [] ret = []

View file

@ -341,7 +341,7 @@ def cast_image_name_to_string(config_dict: dict[str, Any]) -> dict[str, Any]:
return config_dict return config_dict
def add_internal_implementations(impls: dict[Api, Any], run_config: StackConfig) -> None: def add_internal_implementations(impls: dict[Api, Any], config: StackConfig) -> None:
"""Add internal implementations (inspect and providers) to the implementations dictionary. """Add internal implementations (inspect and providers) to the implementations dictionary.
Args: Args:
@ -349,25 +349,25 @@ def add_internal_implementations(impls: dict[Api, Any], run_config: StackConfig)
run_config: Stack run configuration run_config: Stack run configuration
""" """
inspect_impl = DistributionInspectImpl( inspect_impl = DistributionInspectImpl(
DistributionInspectConfig(run_config=run_config), DistributionInspectConfig(config=config),
deps=impls, deps=impls,
) )
impls[Api.inspect] = inspect_impl impls[Api.inspect] = inspect_impl
providers_impl = ProviderImpl( providers_impl = ProviderImpl(
ProviderImplConfig(run_config=run_config), ProviderImplConfig(config=config),
deps=impls, deps=impls,
) )
impls[Api.providers] = providers_impl impls[Api.providers] = providers_impl
prompts_impl = PromptServiceImpl( prompts_impl = PromptServiceImpl(
PromptServiceConfig(run_config=run_config), PromptServiceConfig(config=config),
deps=impls, deps=impls,
) )
impls[Api.prompts] = prompts_impl impls[Api.prompts] = prompts_impl
conversations_impl = ConversationServiceImpl( conversations_impl = ConversationServiceImpl(
ConversationServiceConfig(run_config=run_config), ConversationServiceConfig(config=config),
deps=impls, deps=impls,
) )
impls[Api.conversations] = conversations_impl impls[Api.conversations] = conversations_impl

View file

@ -47,7 +47,7 @@ async def temp_prompt_store(tmp_path_factory):
providers={}, providers={},
storage=storage, storage=storage,
) )
config = PromptServiceConfig(run_config=mock_run_config) config = PromptServiceConfig(config=mock_run_config)
store = PromptServiceImpl(config, deps={}) store = PromptServiceImpl(config, deps={})
register_kvstore_backends({"kv_test": storage.backends["kv_test"]}) register_kvstore_backends({"kv_test": storage.backends["kv_test"]})