From 444f6c88f3e03d553efb1658c72e8c575625ab34 Mon Sep 17 00:00:00 2001 From: ehhuang Date: Mon, 20 Oct 2025 16:28:15 -0700 Subject: [PATCH] chore: remove build.py (#3869) # What does this PR do? ## Test Plan CI --- llama_stack/cli/stack/_build.py | 519 ------------------ llama_stack/cli/stack/build.py | 106 ---- llama_stack/cli/stack/stack.py | 2 - llama_stack/core/build_container.sh | 410 -------------- llama_stack/core/build_venv.sh | 220 -------- llama_stack/core/library_client.py | 2 +- llama_stack/core/ui/README.md | 2 +- .../distributions/dell/doc_template.md | 2 +- .../remote/datasetio/nvidia/README.md | 2 +- .../remote/inference/nvidia/NVIDIA.md | 2 +- .../remote/inference/nvidia/__init__.py | 2 +- .../remote/post_training/nvidia/README.md | 2 +- .../providers/remote/safety/nvidia/README.md | 2 +- tests/integration/fixtures/common.py | 2 +- tests/unit/distribution/test_build_path.py | 40 -- 15 files changed, 9 insertions(+), 1306 deletions(-) delete mode 100644 llama_stack/cli/stack/_build.py delete mode 100644 llama_stack/cli/stack/build.py delete mode 100755 llama_stack/core/build_container.sh delete mode 100755 llama_stack/core/build_venv.sh delete mode 100644 tests/unit/distribution/test_build_path.py diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py deleted file mode 100644 index 2a30ff394..000000000 --- a/llama_stack/cli/stack/_build.py +++ /dev/null @@ -1,519 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import argparse -import importlib.resources -import json -import os -import shutil -import sys -import textwrap -from functools import lru_cache -from importlib.abc import Traversable -from pathlib import Path - -import yaml -from prompt_toolkit import prompt -from prompt_toolkit.completion import WordCompleter -from prompt_toolkit.validation import Validator -from termcolor import colored, cprint - -from llama_stack.cli.stack.utils import ImageType -from llama_stack.cli.table import print_table -from llama_stack.core.build import ( - SERVER_DEPENDENCIES, - build_image, - get_provider_dependencies, -) -from llama_stack.core.configure import parse_and_maybe_upgrade_config -from llama_stack.core.datatypes import ( - BuildConfig, - BuildProvider, - DistributionSpec, - Provider, - StackRunConfig, -) -from llama_stack.core.distribution import get_provider_registry -from llama_stack.core.external import load_external_apis -from llama_stack.core.resolver import InvalidProviderError -from llama_stack.core.stack import replace_env_vars -from llama_stack.core.storage.datatypes import ( - InferenceStoreReference, - KVStoreReference, - ServerStoresConfig, - SqliteKVStoreConfig, - SqliteSqlStoreConfig, - SqlStoreReference, - StorageConfig, -) -from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR -from llama_stack.core.utils.dynamic import instantiate_class_type -from llama_stack.core.utils.exec import formulate_run_args, run_command -from llama_stack.core.utils.image_types import LlamaStackImageType -from llama_stack.providers.datatypes import Api - -DISTRIBS_PATH = Path(__file__).parent.parent.parent / "distributions" - - -@lru_cache -def available_distros_specs() -> dict[str, BuildConfig]: - import yaml - - distro_specs = {} - for p in DISTRIBS_PATH.rglob("*build.yaml"): - distro_name = p.parent.name - with open(p) as f: - build_config = BuildConfig(**yaml.safe_load(f)) - distro_specs[distro_name] = build_config - return distro_specs - - -def run_stack_build_command(args: argparse.Namespace) -> None: - if args.list_distros: - return _run_distro_list_cmd() - - if args.image_type == ImageType.VENV.value: - current_venv = os.environ.get("VIRTUAL_ENV") - image_name = args.image_name or current_venv - else: - image_name = args.image_name - - if args.template: - cprint( - "The --template argument is deprecated. Please use --distro instead.", - color="red", - file=sys.stderr, - ) - distro_name = args.template - else: - distro_name = args.distribution - - if distro_name: - available_distros = available_distros_specs() - if distro_name not in available_distros: - cprint( - f"Could not find distribution {distro_name}. Please run `llama stack build --list-distros` to check out the available distributions", - color="red", - file=sys.stderr, - ) - sys.exit(1) - build_config = available_distros[distro_name] - if args.image_type: - build_config.image_type = args.image_type - else: - cprint( - f"Please specify a image-type ({' | '.join(e.value for e in ImageType)}) for {distro_name}", - color="red", - file=sys.stderr, - ) - sys.exit(1) - elif args.providers: - provider_list: dict[str, list[BuildProvider]] = dict() - for api_provider in args.providers.split(","): - if "=" not in api_provider: - cprint( - "Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2", - color="red", - file=sys.stderr, - ) - sys.exit(1) - api, provider_type = api_provider.split("=") - providers_for_api = get_provider_registry().get(Api(api), None) - if providers_for_api is None: - cprint( - f"{api} is not a valid API.", - color="red", - file=sys.stderr, - ) - sys.exit(1) - if provider_type in providers_for_api: - provider = BuildProvider( - provider_type=provider_type, - module=None, - ) - provider_list.setdefault(api, []).append(provider) - else: - cprint( - f"{provider} is not a valid provider for the {api} API.", - color="red", - file=sys.stderr, - ) - sys.exit(1) - distribution_spec = DistributionSpec( - providers=provider_list, - description=",".join(args.providers), - ) - if not args.image_type: - cprint( - f"Please specify a image-type (container | venv) for {args.template}", - color="red", - file=sys.stderr, - ) - sys.exit(1) - - build_config = BuildConfig(image_type=args.image_type, distribution_spec=distribution_spec) - elif not args.config and not distro_name: - name = prompt( - "> Enter a name for your Llama Stack (e.g. my-local-stack): ", - validator=Validator.from_callable( - lambda x: len(x) > 0, - error_message="Name cannot be empty, please enter a name", - ), - ) - - image_type = prompt( - "> Enter the image type you want your Llama Stack to be built as (use to see options): ", - completer=WordCompleter([e.value for e in ImageType]), - complete_while_typing=True, - validator=Validator.from_callable( - lambda x: x in [e.value for e in ImageType], - error_message="Invalid image type. Use to see options", - ), - ) - - image_name = f"llamastack-{name}" - - cprint( - textwrap.dedent( - """ - Llama Stack is composed of several APIs working together. Let's select - the provider types (implementations) you want to use for these APIs. - """, - ), - color="green", - file=sys.stderr, - ) - - cprint("Tip: use to see options for the providers.\n", color="green", file=sys.stderr) - - providers: dict[str, list[BuildProvider]] = dict() - for api, providers_for_api in get_provider_registry().items(): - available_providers = [x for x in providers_for_api.keys() if x not in ("remote", "remote::sample")] - if not available_providers: - continue - api_provider = prompt( - f"> Enter provider for API {api.value}: ", - completer=WordCompleter(available_providers), - complete_while_typing=True, - validator=Validator.from_callable( - lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847 - error_message="Invalid provider, use to see options", - ), - ) - - string_providers = api_provider.split(" ") - - for provider in string_providers: - providers.setdefault(api.value, []).append(BuildProvider(provider_type=provider)) - - description = prompt( - "\n > (Optional) Enter a short description for your Llama Stack: ", - default="", - ) - - distribution_spec = DistributionSpec( - providers=providers, - description=description, - ) - - build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec) - else: - with open(args.config) as f: - try: - contents = yaml.safe_load(f) - contents = replace_env_vars(contents) - build_config = BuildConfig(**contents) - if args.image_type: - build_config.image_type = args.image_type - except Exception as e: - cprint( - f"Could not parse config file {args.config}: {e}", - color="red", - file=sys.stderr, - ) - sys.exit(1) - - if args.print_deps_only: - print(f"# Dependencies for {distro_name or args.config or image_name}") - normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(build_config) - normal_deps += SERVER_DEPENDENCIES - print(f"uv pip install {' '.join(normal_deps)}") - for special_dep in special_deps: - print(f"uv pip install {special_dep}") - for external_dep in external_provider_dependencies: - print(f"uv pip install {external_dep}") - return - - try: - run_config = _run_stack_build_command_from_build_config( - build_config, - image_name=image_name, - config_path=args.config, - distro_name=distro_name, - ) - - except (Exception, RuntimeError) as exc: - import traceback - - cprint( - f"Error building stack: {exc}", - color="red", - file=sys.stderr, - ) - cprint("Stack trace:", color="red", file=sys.stderr) - traceback.print_exc() - sys.exit(1) - - if run_config is None: - cprint( - "Run config path is empty", - color="red", - file=sys.stderr, - ) - sys.exit(1) - - if args.run: - config_dict = yaml.safe_load(run_config.read_text()) - config = parse_and_maybe_upgrade_config(config_dict) - if config.external_providers_dir and not config.external_providers_dir.exists(): - config.external_providers_dir.mkdir(exist_ok=True) - run_args = formulate_run_args(args.image_type, image_name or config.image_name) - run_args.extend([str(os.getenv("LLAMA_STACK_PORT", 8321)), "--config", str(run_config)]) - run_command(run_args) - - -def _generate_run_config( - build_config: BuildConfig, - build_dir: Path, - image_name: str, -) -> Path: - """ - Generate a run.yaml template file for user to edit from a build.yaml file - """ - apis = list(build_config.distribution_spec.providers.keys()) - distro_dir = DISTRIBS_BASE_DIR / image_name - storage = StorageConfig( - backends={ - "kv_default": SqliteKVStoreConfig( - db_path=f"${{env.SQLITE_STORE_DIR:={distro_dir}}}/kvstore.db", - ), - "sql_default": SqliteSqlStoreConfig( - db_path=f"${{env.SQLITE_STORE_DIR:={distro_dir}}}/sql_store.db", - ), - }, - stores=ServerStoresConfig( - metadata=KVStoreReference( - backend="kv_default", - namespace="registry", - ), - inference=InferenceStoreReference( - backend="sql_default", - table_name="inference_store", - ), - conversations=SqlStoreReference( - backend="sql_default", - table_name="openai_conversations", - ), - ), - ) - - run_config = StackRunConfig( - container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None), - image_name=image_name, - apis=apis, - providers={}, - storage=storage, - external_providers_dir=build_config.external_providers_dir - if build_config.external_providers_dir - else EXTERNAL_PROVIDERS_DIR, - ) - # build providers dict - provider_registry = get_provider_registry(build_config) - for api in apis: - run_config.providers[api] = [] - providers = build_config.distribution_spec.providers[api] - - for provider in providers: - pid = provider.provider_type.split("::")[-1] - - p = provider_registry[Api(api)][provider.provider_type] - if p.deprecation_error: - raise InvalidProviderError(p.deprecation_error) - - try: - config_type = instantiate_class_type(provider_registry[Api(api)][provider.provider_type].config_class) - except (ModuleNotFoundError, ValueError) as exc: - # HACK ALERT: - # This code executes after building is done, the import cannot work since the - # package is either available in the venv or container - not available on the host. - # TODO: use a "is_external" flag in ProviderSpec to check if the provider is - # external - cprint( - f"Failed to import provider {provider.provider_type} for API {api} - assuming it's external, skipping: {exc}", - color="yellow", - file=sys.stderr, - ) - # Set config_type to None to avoid UnboundLocalError - config_type = None - - if config_type is not None and hasattr(config_type, "sample_run_config"): - config = config_type.sample_run_config(__distro_dir__=f"~/.llama/distributions/{image_name}") - else: - config = {} - - p_spec = Provider( - provider_id=pid, - provider_type=provider.provider_type, - config=config, - module=provider.module, - ) - run_config.providers[api].append(p_spec) - - run_config_file = build_dir / f"{image_name}-run.yaml" - - with open(run_config_file, "w") as f: - to_write = json.loads(run_config.model_dump_json()) - f.write(yaml.dump(to_write, sort_keys=False)) - - # Only print this message for non-container builds since it will be displayed before the - # container is built - # For non-container builds, the run.yaml is generated at the very end of the build process so it - # makes sense to display this message - if build_config.image_type != LlamaStackImageType.CONTAINER.value: - cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr) - return run_config_file - - -def _run_stack_build_command_from_build_config( - build_config: BuildConfig, - image_name: str | None = None, - distro_name: str | None = None, - config_path: str | None = None, -) -> Path | Traversable: - image_name = image_name or build_config.image_name - if build_config.image_type == LlamaStackImageType.CONTAINER.value: - if distro_name: - image_name = f"distribution-{distro_name}" - else: - if not image_name: - raise ValueError("Please specify an image name when building a container image without a template") - else: - if not image_name and os.environ.get("UV_SYSTEM_PYTHON"): - image_name = "__system__" - if not image_name: - raise ValueError("Please specify an image name when building a venv image") - - # At this point, image_name should be guaranteed to be a string - if image_name is None: - raise ValueError("image_name should not be None after validation") - - if distro_name: - build_dir = DISTRIBS_BASE_DIR / distro_name - build_file_path = build_dir / f"{distro_name}-build.yaml" - else: - if image_name is None: - raise ValueError("image_name cannot be None") - build_dir = DISTRIBS_BASE_DIR / image_name - build_file_path = build_dir / f"{image_name}-build.yaml" - - os.makedirs(build_dir, exist_ok=True) - run_config_file = None - # Generate the run.yaml so it can be included in the container image with the proper entrypoint - # Only do this if we're building a container image and we're not using a template - if build_config.image_type == LlamaStackImageType.CONTAINER.value and not distro_name and config_path: - cprint("Generating run.yaml file", color="yellow", file=sys.stderr) - run_config_file = _generate_run_config(build_config, build_dir, image_name) - - with open(build_file_path, "w") as f: - to_write = json.loads(build_config.model_dump_json(exclude_none=True)) - f.write(yaml.dump(to_write, sort_keys=False)) - - # We first install the external APIs so that the build process can use them and discover the - # providers dependencies - if build_config.external_apis_dir: - cprint("Installing external APIs", color="yellow", file=sys.stderr) - external_apis = load_external_apis(build_config) - if external_apis: - # install the external APIs - packages = [] - for _, api_spec in external_apis.items(): - if api_spec.pip_packages: - packages.extend(api_spec.pip_packages) - cprint( - f"Installing {api_spec.name} with pip packages {api_spec.pip_packages}", - color="yellow", - file=sys.stderr, - ) - return_code = run_command(["uv", "pip", "install", *packages]) - if return_code != 0: - packages_str = ", ".join(packages) - raise RuntimeError( - f"Failed to install external APIs packages: {packages_str} (return code: {return_code})" - ) - - return_code = build_image( - build_config, - image_name, - distro_or_config=distro_name or config_path or str(build_file_path), - run_config=run_config_file.as_posix() if run_config_file else None, - ) - if return_code != 0: - raise RuntimeError(f"Failed to build image {image_name}") - - if distro_name: - # copy run.yaml from distribution to build_dir instead of generating it again - distro_path = importlib.resources.files("llama_stack") / f"distributions/{distro_name}/run.yaml" - run_config_file = build_dir / f"{distro_name}-run.yaml" - - with importlib.resources.as_file(distro_path) as path: - shutil.copy(path, run_config_file) - - cprint("Build Successful!", color="green", file=sys.stderr) - cprint(f"You can find the newly-built distribution here: {run_config_file}", color="blue", file=sys.stderr) - if build_config.image_type == LlamaStackImageType.VENV: - cprint( - "You can run the new Llama Stack distro (after activating " - + colored(image_name, "cyan") - + ") via: " - + colored(f"llama stack run {run_config_file}", "blue"), - color="green", - file=sys.stderr, - ) - elif build_config.image_type == LlamaStackImageType.CONTAINER: - cprint( - "You can run the container with: " - + colored( - f"docker run -p 8321:8321 -v ~/.llama:/root/.llama localhost/{image_name} --port 8321", "blue" - ), - color="green", - file=sys.stderr, - ) - return distro_path - else: - return _generate_run_config(build_config, build_dir, image_name) - - -def _run_distro_list_cmd() -> None: - headers = [ - "Distribution Name", - # "Providers", - "Description", - ] - - rows = [] - for distro_name, spec in available_distros_specs().items(): - rows.append( - [ - distro_name, - # json.dumps(spec.distribution_spec.providers, indent=2), - spec.distribution_spec.description, - ] - ) - print_table( - rows, - headers, - separate_rows=True, - ) diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py deleted file mode 100644 index cbe8ed881..000000000 --- a/llama_stack/cli/stack/build.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -import argparse -import textwrap - -from llama_stack.cli.stack.utils import ImageType -from llama_stack.cli.subcommand import Subcommand -from llama_stack.log import get_logger - -logger = get_logger(__name__, category="cli") - - -class StackBuild(Subcommand): - def __init__(self, subparsers: argparse._SubParsersAction): - super().__init__() - self.parser = subparsers.add_parser( - "build", - prog="llama stack build", - description="[DEPRECATED] Build a Llama stack container. This command is deprecated and will be removed in a future release. Use `llama stack list-deps ' instead.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - self._add_arguments() - self.parser.set_defaults(func=self._run_stack_build_command) - - def _add_arguments(self): - self.parser.add_argument( - "--config", - type=str, - default=None, - help="Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to enter information interactively", - ) - - self.parser.add_argument( - "--template", - type=str, - default=None, - help="""(deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions""", - ) - self.parser.add_argument( - "--distro", - "--distribution", - dest="distribution", - type=str, - default=None, - help="""Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions""", - ) - - self.parser.add_argument( - "--list-distros", - "--list-distributions", - action="store_true", - dest="list_distros", - default=False, - help="Show the available distributions for building a Llama Stack distribution", - ) - - self.parser.add_argument( - "--image-type", - type=str, - help="Image Type to use for the build. If not specified, will use the image type from the template config.", - choices=[e.value for e in ImageType], - default=None, # no default so we can detect if a user specified --image-type and override image_type in the config - ) - - self.parser.add_argument( - "--image-name", - type=str, - help=textwrap.dedent( - f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the virtual environment to use for -the build. If not specified, currently active environment will be used if found. - """ - ), - default=None, - ) - self.parser.add_argument( - "--print-deps-only", - default=False, - action="store_true", - help="Print the dependencies for the stack only, without building the stack", - ) - - self.parser.add_argument( - "--run", - action="store_true", - default=False, - help="Run the stack after building using the same image type, name, and other applicable arguments", - ) - self.parser.add_argument( - "--providers", - type=str, - default=None, - help="Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per API.", - ) - - def _run_stack_build_command(self, args: argparse.Namespace) -> None: - logger.warning( - "The 'llama stack build' command is deprecated and will be removed in a future release. Please use 'llama stack list-deps'" - ) - # always keep implementation completely silo-ed away from CLI so CLI - # can be fast to load and reduces dependencies - from ._build import run_stack_build_command - - return run_stack_build_command(args) diff --git a/llama_stack/cli/stack/stack.py b/llama_stack/cli/stack/stack.py index fd0a4edf5..351da972f 100644 --- a/llama_stack/cli/stack/stack.py +++ b/llama_stack/cli/stack/stack.py @@ -11,7 +11,6 @@ from llama_stack.cli.stack.list_stacks import StackListBuilds from llama_stack.cli.stack.utils import print_subcommand_description from llama_stack.cli.subcommand import Subcommand -from .build import StackBuild from .list_apis import StackListApis from .list_deps import StackListDeps from .list_providers import StackListProviders @@ -41,7 +40,6 @@ class StackParser(Subcommand): # Add sub-commands StackListDeps.create(subparsers) - StackBuild.create(subparsers) StackListApis.create(subparsers) StackListProviders.create(subparsers) StackRun.create(subparsers) diff --git a/llama_stack/core/build_container.sh b/llama_stack/core/build_container.sh deleted file mode 100755 index 03ed846d9..000000000 --- a/llama_stack/core/build_container.sh +++ /dev/null @@ -1,410 +0,0 @@ -#!/usr/bin/env bash - -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} -LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR:-} - -TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} -PYPI_VERSION=${PYPI_VERSION:-} -BUILD_PLATFORM=${BUILD_PLATFORM:-} -# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out -# Reference: https://github.com/astral-sh/uv/pull/1694 -UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500} - -# mounting is not supported by docker buildx, so we use COPY instead -USE_COPY_NOT_MOUNT=${USE_COPY_NOT_MOUNT:-} -# Path to the run.yaml file in the container -RUN_CONFIG_PATH=/app/run.yaml - -BUILD_CONTEXT_DIR=$(pwd) - -set -euo pipefail - -# Define color codes -RED='\033[0;31m' -NC='\033[0m' # No Color - -# Usage function -usage() { - echo "Usage: $0 --image-name --container-base --normal-deps [--run-config ] [--external-provider-deps ] [--optional-deps ]" - echo "Example: $0 --image-name llama-stack-img --container-base python:3.12-slim --normal-deps 'numpy pandas' --run-config ./run.yaml --external-provider-deps 'foo' --optional-deps 'bar'" - exit 1 -} - -# Parse arguments -image_name="" -container_base="" -normal_deps="" -external_provider_deps="" -optional_deps="" -run_config="" -distro_or_config="" - -while [[ $# -gt 0 ]]; do - key="$1" - case "$key" in - --image-name) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --image-name requires a string value" >&2 - usage - fi - image_name="$2" - shift 2 - ;; - --container-base) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --container-base requires a string value" >&2 - usage - fi - container_base="$2" - shift 2 - ;; - --normal-deps) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --normal-deps requires a string value" >&2 - usage - fi - normal_deps="$2" - shift 2 - ;; - --external-provider-deps) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --external-provider-deps requires a string value" >&2 - usage - fi - external_provider_deps="$2" - shift 2 - ;; - --optional-deps) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --optional-deps requires a string value" >&2 - usage - fi - optional_deps="$2" - shift 2 - ;; - --run-config) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --run-config requires a string value" >&2 - usage - fi - run_config="$2" - shift 2 - ;; - --distro-or-config) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --distro-or-config requires a string value" >&2 - usage - fi - distro_or_config="$2" - shift 2 - ;; - *) - echo "Unknown option: $1" >&2 - usage - ;; - esac -done - -# Check required arguments -if [[ -z "$image_name" || -z "$container_base" || -z "$normal_deps" ]]; then - echo "Error: --image-name, --container-base, and --normal-deps are required." >&2 - usage -fi - -CONTAINER_BINARY=${CONTAINER_BINARY:-docker} -CONTAINER_OPTS=${CONTAINER_OPTS:---progress=plain} -TEMP_DIR=$(mktemp -d) -SCRIPT_DIR=$(dirname "$(readlink -f "$0")") -source "$SCRIPT_DIR/common.sh" - -add_to_container() { - output_file="$TEMP_DIR/Containerfile" - if [ -t 0 ]; then - printf '%s\n' "$1" >>"$output_file" - else - cat >>"$output_file" - fi -} - -if ! is_command_available "$CONTAINER_BINARY"; then - printf "${RED}Error: ${CONTAINER_BINARY} command not found. Is ${CONTAINER_BINARY} installed and in your PATH?${NC}" >&2 - exit 1 -fi - -if [[ $container_base == *"registry.access.redhat.com/ubi9"* ]]; then - add_to_container << EOF -FROM $container_base -WORKDIR /app - -# We install the Python 3.12 dev headers and build tools so that any -# C-extension wheels (e.g. polyleven, faiss-cpu) can compile successfully. - -RUN dnf -y update && dnf install -y iputils git net-tools wget \ - vim-minimal python3.12 python3.12-pip python3.12-wheel \ - python3.12-setuptools python3.12-devel gcc gcc-c++ make && \ - ln -s /bin/pip3.12 /bin/pip && ln -s /bin/python3.12 /bin/python && dnf clean all - -ENV UV_SYSTEM_PYTHON=1 -RUN pip install uv -EOF -else - add_to_container << EOF -FROM $container_base -WORKDIR /app - -RUN apt-get update && apt-get install -y \ - iputils-ping net-tools iproute2 dnsutils telnet \ - curl wget telnet git\ - procps psmisc lsof \ - traceroute \ - bubblewrap \ - gcc g++ \ - && rm -rf /var/lib/apt/lists/* - -ENV UV_SYSTEM_PYTHON=1 -RUN pip install uv -EOF -fi - -# Add pip dependencies first since llama-stack is what will change most often -# so we can reuse layers. -if [ -n "$normal_deps" ]; then - read -ra pip_args <<< "$normal_deps" - quoted_deps=$(printf " %q" "${pip_args[@]}") - add_to_container << EOF -RUN uv pip install --no-cache $quoted_deps -EOF -fi - -if [ -n "$optional_deps" ]; then - IFS='#' read -ra parts <<<"$optional_deps" - for part in "${parts[@]}"; do - read -ra pip_args <<< "$part" - quoted_deps=$(printf " %q" "${pip_args[@]}") - add_to_container <=')[0].split('<=')[0].split('!=')[0].split('<')[0].split('>')[0] - module = importlib.import_module(f'{package_name}.provider') - spec = module.get_provider_spec() - if hasattr(spec, 'pip_packages') and spec.pip_packages: - if isinstance(spec.pip_packages, (list, tuple)): - print('\n'.join(spec.pip_packages)) -except Exception as e: - print(f'Error getting provider spec for {package_name}: {e}', file=sys.stderr) -PYTHON -EOF - done -fi - -get_python_cmd() { - if is_command_available python; then - echo "python" - elif is_command_available python3; then - echo "python3" - else - echo "Error: Neither python nor python3 is installed. Please install Python to continue." >&2 - exit 1 - fi -} - -if [ -n "$run_config" ]; then - # Copy the run config to the build context since it's an absolute path - cp "$run_config" "$BUILD_CONTEXT_DIR/run.yaml" - - # Parse the run.yaml configuration to identify external provider directories - # If external providers are specified, copy their directory to the container - # and update the configuration to reference the new container path - python_cmd=$(get_python_cmd) - external_providers_dir=$($python_cmd -c "import yaml; config = yaml.safe_load(open('$run_config')); print(config.get('external_providers_dir') or '')") - external_providers_dir=$(eval echo "$external_providers_dir") - if [ -n "$external_providers_dir" ]; then - if [ -d "$external_providers_dir" ]; then - echo "Copying external providers directory: $external_providers_dir" - cp -r "$external_providers_dir" "$BUILD_CONTEXT_DIR/providers.d" - add_to_container << EOF -COPY providers.d /.llama/providers.d -EOF - fi - - # Edit the run.yaml file to change the external_providers_dir to /.llama/providers.d - if [ "$(uname)" = "Darwin" ]; then - sed -i.bak -e 's|external_providers_dir:.*|external_providers_dir: /.llama/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml" - rm -f "$BUILD_CONTEXT_DIR/run.yaml.bak" - else - sed -i 's|external_providers_dir:.*|external_providers_dir: /.llama/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml" - fi - fi - - # Copy run config into docker image - add_to_container << EOF -COPY run.yaml $RUN_CONFIG_PATH -EOF -fi - -stack_mount="/app/llama-stack-source" -client_mount="/app/llama-stack-client-source" - -install_local_package() { - local dir="$1" - local mount_point="$2" - local name="$3" - - if [ ! -d "$dir" ]; then - echo "${RED}Warning: $name is set but directory does not exist: $dir${NC}" >&2 - exit 1 - fi - - if [ "$USE_COPY_NOT_MOUNT" = "true" ]; then - add_to_container << EOF -COPY $dir $mount_point -EOF - fi - add_to_container << EOF -RUN uv pip install --no-cache -e $mount_point -EOF -} - - -if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then - install_local_package "$LLAMA_STACK_CLIENT_DIR" "$client_mount" "LLAMA_STACK_CLIENT_DIR" -fi - -if [ -n "$LLAMA_STACK_DIR" ]; then - install_local_package "$LLAMA_STACK_DIR" "$stack_mount" "LLAMA_STACK_DIR" -else - if [ -n "$TEST_PYPI_VERSION" ]; then - # these packages are damaged in test-pypi, so install them first - add_to_container << EOF -RUN uv pip install --no-cache fastapi libcst -EOF - add_to_container << EOF -RUN uv pip install --no-cache --extra-index-url https://test.pypi.org/simple/ \ - --index-strategy unsafe-best-match \ - llama-stack==$TEST_PYPI_VERSION - -EOF - else - if [ -n "$PYPI_VERSION" ]; then - SPEC_VERSION="llama-stack==${PYPI_VERSION}" - else - SPEC_VERSION="llama-stack" - fi - add_to_container << EOF -RUN uv pip install --no-cache $SPEC_VERSION -EOF - fi -fi - -# remove uv after installation - add_to_container << EOF -RUN pip uninstall -y uv -EOF - -# If a run config is provided, we use the llama stack CLI -if [[ -n "$run_config" ]]; then - add_to_container << EOF -ENTRYPOINT ["llama", "stack", "run", "$RUN_CONFIG_PATH"] -EOF -elif [[ "$distro_or_config" != *.yaml ]]; then - add_to_container << EOF -ENTRYPOINT ["llama", "stack", "run", "$distro_or_config"] -EOF -fi - -# Add other require item commands genearic to all containers -add_to_container << EOF - -RUN mkdir -p /.llama /.cache && chmod -R g+rw /.llama /.cache && (chmod -R g+rw /app 2>/dev/null || true) -EOF - -printf "Containerfile created successfully in %s/Containerfile\n\n" "$TEMP_DIR" -cat "$TEMP_DIR"/Containerfile -printf "\n" - -# Start building the CLI arguments -CLI_ARGS=() - -# Read CONTAINER_OPTS and put it in an array -read -ra CLI_ARGS <<< "$CONTAINER_OPTS" - -if [ "$USE_COPY_NOT_MOUNT" != "true" ]; then - if [ -n "$LLAMA_STACK_DIR" ]; then - CLI_ARGS+=("-v" "$(readlink -f "$LLAMA_STACK_DIR"):$stack_mount") - fi - if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then - CLI_ARGS+=("-v" "$(readlink -f "$LLAMA_STACK_CLIENT_DIR"):$client_mount") - fi -fi - -if is_command_available selinuxenabled && selinuxenabled; then - # Disable SELinux labels -- we don't want to relabel the llama-stack source dir - CLI_ARGS+=("--security-opt" "label=disable") -fi - -# Set version tag based on PyPI version -if [ -n "$PYPI_VERSION" ]; then - version_tag="$PYPI_VERSION" -elif [ -n "$TEST_PYPI_VERSION" ]; then - version_tag="test-$TEST_PYPI_VERSION" -elif [[ -n "$LLAMA_STACK_DIR" || -n "$LLAMA_STACK_CLIENT_DIR" ]]; then - version_tag="dev" -else - URL="https://pypi.org/pypi/llama-stack/json" - version_tag=$(curl -s $URL | jq -r '.info.version') -fi - -# Add version tag to image name -image_tag="$image_name:$version_tag" - -# Detect platform architecture -ARCH=$(uname -m) -if [ -n "$BUILD_PLATFORM" ]; then - CLI_ARGS+=("--platform" "$BUILD_PLATFORM") -elif [ "$ARCH" = "arm64" ] || [ "$ARCH" = "aarch64" ]; then - CLI_ARGS+=("--platform" "linux/arm64") -elif [ "$ARCH" = "x86_64" ]; then - CLI_ARGS+=("--platform" "linux/amd64") -else - echo "Unsupported architecture: $ARCH" - exit 1 -fi - -echo "PWD: $(pwd)" -echo "Containerfile: $TEMP_DIR/Containerfile" -set -x - -$CONTAINER_BINARY build \ - "${CLI_ARGS[@]}" \ - -t "$image_tag" \ - -f "$TEMP_DIR/Containerfile" \ - "$BUILD_CONTEXT_DIR" - -# clean up tmp/configs -rm -rf "$BUILD_CONTEXT_DIR/run.yaml" "$TEMP_DIR" -set +x - -echo "Success!" diff --git a/llama_stack/core/build_venv.sh b/llama_stack/core/build_venv.sh deleted file mode 100755 index 04927d71e..000000000 --- a/llama_stack/core/build_venv.sh +++ /dev/null @@ -1,220 +0,0 @@ -#!/bin/bash - -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-} -LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR:-} -TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} -# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out -# Reference: https://github.com/astral-sh/uv/pull/1694 -UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500} -UV_SYSTEM_PYTHON=${UV_SYSTEM_PYTHON:-} -VIRTUAL_ENV=${VIRTUAL_ENV:-} - -set -euo pipefail - -# Define color codes -RED='\033[0;31m' -NC='\033[0m' # No Color - -SCRIPT_DIR=$(dirname "$(readlink -f "$0")") -source "$SCRIPT_DIR/common.sh" - -# Usage function -usage() { - echo "Usage: $0 --env-name --normal-deps [--external-provider-deps ] [--optional-deps ]" - echo "Example: $0 --env-name mybuild --normal-deps 'numpy pandas scipy' --external-provider-deps 'foo' --optional-deps 'bar'" - exit 1 -} - -# Parse arguments -env_name="" -normal_deps="" -external_provider_deps="" -optional_deps="" - -while [[ $# -gt 0 ]]; do - key="$1" - case "$key" in - --env-name) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --env-name requires a string value" >&2 - usage - fi - env_name="$2" - shift 2 - ;; - --normal-deps) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --normal-deps requires a string value" >&2 - usage - fi - normal_deps="$2" - shift 2 - ;; - --external-provider-deps) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --external-provider-deps requires a string value" >&2 - usage - fi - external_provider_deps="$2" - shift 2 - ;; - --optional-deps) - if [[ -z "$2" || "$2" == --* ]]; then - echo "Error: --optional-deps requires a string value" >&2 - usage - fi - optional_deps="$2" - shift 2 - ;; - *) - echo "Unknown option: $1" >&2 - usage - ;; - esac -done - -# Check required arguments -if [[ -z "$env_name" || -z "$normal_deps" ]]; then - echo "Error: --env-name and --normal-deps are required." >&2 - usage -fi - -if [ -n "$LLAMA_STACK_DIR" ]; then - echo "Using llama-stack-dir=$LLAMA_STACK_DIR" -fi -if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then - echo "Using llama-stack-client-dir=$LLAMA_STACK_CLIENT_DIR" -fi - -ENVNAME="" - -# pre-run checks to make sure we can proceed with the installation -pre_run_checks() { - local env_name="$1" - - if ! is_command_available uv; then - echo "uv is not installed, trying to install it." - if ! is_command_available pip; then - echo "pip is not installed, cannot automatically install 'uv'." - echo "Follow this link to install it:" - echo "https://docs.astral.sh/uv/getting-started/installation/" - exit 1 - else - pip install uv - fi - fi - - # checking if an environment with the same name already exists - if [ -d "$env_name" ]; then - echo "Environment '$env_name' already exists, re-using it." - fi -} - -run() { - # Use only global variables set by flag parser - if [ -n "$UV_SYSTEM_PYTHON" ] || [ "$env_name" == "__system__" ]; then - echo "Installing dependencies in system Python environment" - export UV_SYSTEM_PYTHON=1 - elif [ "$VIRTUAL_ENV" == "$env_name" ]; then - echo "Virtual environment $env_name is already active" - else - echo "Using virtual environment $env_name" - uv venv "$env_name" - source "$env_name/bin/activate" - fi - - if [ -n "$TEST_PYPI_VERSION" ]; then - uv pip install fastapi libcst - uv pip install --extra-index-url https://test.pypi.org/simple/ \ - --index-strategy unsafe-best-match \ - llama-stack=="$TEST_PYPI_VERSION" \ - $normal_deps - if [ -n "$optional_deps" ]; then - IFS='#' read -ra parts <<<"$optional_deps" - for part in "${parts[@]}"; do - echo "$part" - uv pip install $part - done - fi - if [ -n "$external_provider_deps" ]; then - IFS='#' read -ra parts <<<"$external_provider_deps" - for part in "${parts[@]}"; do - echo "$part" - uv pip install "$part" - done - fi - else - if [ -n "$LLAMA_STACK_DIR" ]; then - # only warn if DIR does not start with "git+" - if [ ! -d "$LLAMA_STACK_DIR" ] && [[ "$LLAMA_STACK_DIR" != git+* ]]; then - printf "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: %s${NC}\n" "$LLAMA_STACK_DIR" >&2 - exit 1 - fi - printf "Installing from LLAMA_STACK_DIR: %s\n" "$LLAMA_STACK_DIR" - # editable only if LLAMA_STACK_DIR does not start with "git+" - if [[ "$LLAMA_STACK_DIR" != git+* ]]; then - EDITABLE="-e" - else - EDITABLE="" - fi - uv pip install --no-cache-dir $EDITABLE "$LLAMA_STACK_DIR" - else - uv pip install --no-cache-dir llama-stack - fi - - if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then - # only warn if DIR does not start with "git+" - if [ ! -d "$LLAMA_STACK_CLIENT_DIR" ] && [[ "$LLAMA_STACK_CLIENT_DIR" != git+* ]]; then - printf "${RED}Warning: LLAMA_STACK_CLIENT_DIR is set but directory does not exist: %s${NC}\n" "$LLAMA_STACK_CLIENT_DIR" >&2 - exit 1 - fi - printf "Installing from LLAMA_STACK_CLIENT_DIR: %s\n" "$LLAMA_STACK_CLIENT_DIR" - # editable only if LLAMA_STACK_CLIENT_DIR does not start with "git+" - if [[ "$LLAMA_STACK_CLIENT_DIR" != git+* ]]; then - EDITABLE="-e" - else - EDITABLE="" - fi - uv pip install --no-cache-dir $EDITABLE "$LLAMA_STACK_CLIENT_DIR" - fi - - printf "Installing pip dependencies\n" - uv pip install $normal_deps - if [ -n "$optional_deps" ]; then - IFS='#' read -ra parts <<<"$optional_deps" - for part in "${parts[@]}"; do - echo "Installing special provider module: $part" - uv pip install $part - done - fi - if [ -n "$external_provider_deps" ]; then - IFS='#' read -ra parts <<<"$external_provider_deps" - for part in "${parts[@]}"; do - echo "Installing external provider module: $part" - uv pip install "$part" - echo "Getting provider spec for module: $part and installing dependencies" - package_name=$(echo "$part" | sed 's/[<>=!].*//') - python3 -c " -import importlib -import sys -try: - module = importlib.import_module(f'$package_name.provider') - spec = module.get_provider_spec() - if hasattr(spec, 'pip_packages') and spec.pip_packages: - print('\\n'.join(spec.pip_packages)) -except Exception as e: - print(f'Error getting provider spec for $package_name: {e}', file=sys.stderr) -" | uv pip install -r - - done - fi - fi -} - -pre_run_checks "$env_name" -run diff --git a/llama_stack/core/library_client.py b/llama_stack/core/library_client.py index 1179075cd..328ca9c6e 100644 --- a/llama_stack/core/library_client.py +++ b/llama_stack/core/library_client.py @@ -278,7 +278,7 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient): else: prefix = "!" if in_notebook() else "" cprint( - f"Please run:\n\n{prefix}llama stack build --distro {self.config_path_or_distro_name} --image-type venv\n\n", + f"Please run:\n\n{prefix}llama stack list-deps {self.config_path_or_distro_name} | xargs -L1 uv pip install\n\n", "yellow", file=sys.stderr, ) diff --git a/llama_stack/core/ui/README.md b/llama_stack/core/ui/README.md index f1d85454b..37f1501c9 100644 --- a/llama_stack/core/ui/README.md +++ b/llama_stack/core/ui/README.md @@ -9,7 +9,7 @@ 1. Start up Llama Stack API server. More details [here](https://llamastack.github.io/latest/getting_started/index.htmll). ``` -llama stack build --distro together --image-type venv +llama stack list-deps together | xargs -L1 uv pip install llama stack run together ``` diff --git a/llama_stack/distributions/dell/doc_template.md b/llama_stack/distributions/dell/doc_template.md index 852e78d0e..4e28673e8 100644 --- a/llama_stack/distributions/dell/doc_template.md +++ b/llama_stack/distributions/dell/doc_template.md @@ -157,7 +157,7 @@ docker run \ Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available. ```bash -llama stack build --distro {{ name }} --image-type conda +llama stack list-deps {{ name }} | xargs -L1 pip install INFERENCE_MODEL=$INFERENCE_MODEL \ DEH_URL=$DEH_URL \ CHROMA_URL=$CHROMA_URL \ diff --git a/llama_stack/providers/remote/datasetio/nvidia/README.md b/llama_stack/providers/remote/datasetio/nvidia/README.md index 74e0895f4..da57d5550 100644 --- a/llama_stack/providers/remote/datasetio/nvidia/README.md +++ b/llama_stack/providers/remote/datasetio/nvidia/README.md @@ -20,7 +20,7 @@ This provider enables dataset management using NVIDIA's NeMo Customizer service. Build the NVIDIA environment: ```bash -llama stack build --distro nvidia --image-type venv +uv run llama stack list-deps nvidia | xargs -L1 uv pip install ``` ### Basic Usage using the LlamaStack Python Client diff --git a/llama_stack/providers/remote/inference/nvidia/NVIDIA.md b/llama_stack/providers/remote/inference/nvidia/NVIDIA.md index 692b9125b..f1a828413 100644 --- a/llama_stack/providers/remote/inference/nvidia/NVIDIA.md +++ b/llama_stack/providers/remote/inference/nvidia/NVIDIA.md @@ -18,7 +18,7 @@ This provider enables running inference using NVIDIA NIM. Build the NVIDIA environment: ```bash -llama stack build --distro nvidia --image-type venv +uv run llama stack list-deps nvidia | xargs -L1 uv pip install ``` ### Basic Usage using the LlamaStack Python Client diff --git a/llama_stack/providers/remote/inference/nvidia/__init__.py b/llama_stack/providers/remote/inference/nvidia/__init__.py index 1869cb748..b4926f33e 100644 --- a/llama_stack/providers/remote/inference/nvidia/__init__.py +++ b/llama_stack/providers/remote/inference/nvidia/__init__.py @@ -10,7 +10,7 @@ from .config import NVIDIAConfig async def get_adapter_impl(config: NVIDIAConfig, _deps) -> Inference: - # import dynamically so `llama stack build` does not fail due to missing dependencies + # import dynamically so `llama stack list-deps` does not fail due to missing dependencies from .nvidia import NVIDIAInferenceAdapter if not isinstance(config, NVIDIAConfig): diff --git a/llama_stack/providers/remote/post_training/nvidia/README.md b/llama_stack/providers/remote/post_training/nvidia/README.md index 9b088a615..789514b1e 100644 --- a/llama_stack/providers/remote/post_training/nvidia/README.md +++ b/llama_stack/providers/remote/post_training/nvidia/README.md @@ -22,7 +22,7 @@ This provider enables fine-tuning of LLMs using NVIDIA's NeMo Customizer service Build the NVIDIA environment: ```bash -llama stack build --distro nvidia --image-type venv +uv run llama stack list-deps nvidia | xargs -L1 uv pip install ``` ### Basic Usage using the LlamaStack Python Client diff --git a/llama_stack/providers/remote/safety/nvidia/README.md b/llama_stack/providers/remote/safety/nvidia/README.md index 784ab464f..e589afe84 100644 --- a/llama_stack/providers/remote/safety/nvidia/README.md +++ b/llama_stack/providers/remote/safety/nvidia/README.md @@ -19,7 +19,7 @@ This provider enables safety checks and guardrails for LLM interactions using NV Build the NVIDIA environment: ```bash -llama stack build --distro nvidia --image-type venv +uv run llama stack list-deps nvidia | xargs -L1 uv pip install ``` ### Basic Usage using the LlamaStack Python Client diff --git a/tests/integration/fixtures/common.py b/tests/integration/fixtures/common.py index ffd49033d..6a9e1f3b2 100644 --- a/tests/integration/fixtures/common.py +++ b/tests/integration/fixtures/common.py @@ -40,7 +40,7 @@ def is_port_available(port: int, host: str = "localhost") -> bool: def start_llama_stack_server(config_name: str) -> subprocess.Popen: """Start a llama stack server with the given config.""" - cmd = f"uv run --with llama-stack llama stack build --distro {config_name} --image-type venv --run" + cmd = f"uv run llama stack run {config_name}" devnull = open(os.devnull, "w") process = subprocess.Popen( shlex.split(cmd), diff --git a/tests/unit/distribution/test_build_path.py b/tests/unit/distribution/test_build_path.py deleted file mode 100644 index 52a71286b..000000000 --- a/tests/unit/distribution/test_build_path.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from pathlib import Path - -from llama_stack.cli.stack._build import ( - _run_stack_build_command_from_build_config, -) -from llama_stack.core.datatypes import BuildConfig, DistributionSpec -from llama_stack.core.utils.image_types import LlamaStackImageType - - -def test_container_build_passes_path(monkeypatch, tmp_path): - called_with = {} - - def spy_build_image(build_config, image_name, distro_or_config, run_config=None): - called_with["path"] = distro_or_config - called_with["run_config"] = run_config - return 0 - - monkeypatch.setattr( - "llama_stack.cli.stack._build.build_image", - spy_build_image, - raising=True, - ) - - cfg = BuildConfig( - image_type=LlamaStackImageType.CONTAINER.value, - distribution_spec=DistributionSpec(providers={}, description=""), - ) - - _run_stack_build_command_from_build_config(cfg, image_name="dummy") - - assert "path" in called_with - assert isinstance(called_with["path"], str) - assert Path(called_with["path"]).exists() - assert called_with["run_config"] is None