diff --git a/distributions/dell-tgi/run.yaml b/distributions/dell-tgi/run.yaml
index 3f8a98779..cd6ddcfdf 100644
--- a/distributions/dell-tgi/run.yaml
+++ b/distributions/dell-tgi/run.yaml
@@ -1,6 +1,6 @@
version: '2'
image_name: local
-docker_image: null
+container_image: null
conda_env: local
apis:
- shields
diff --git a/distributions/meta-reference-quantized-gpu/run.yaml b/distributions/meta-reference-quantized-gpu/run.yaml
index 19c726b09..eb631adaa 100644
--- a/distributions/meta-reference-quantized-gpu/run.yaml
+++ b/distributions/meta-reference-quantized-gpu/run.yaml
@@ -1,6 +1,6 @@
version: '2'
image_name: local
-docker_image: null
+container_image: null
conda_env: local
apis:
- shields
diff --git a/distributions/vllm-gpu/run.yaml b/distributions/vllm-gpu/run.yaml
index f42c942a3..a75a4c451 100644
--- a/distributions/vllm-gpu/run.yaml
+++ b/distributions/vllm-gpu/run.yaml
@@ -1,6 +1,6 @@
version: '2'
image_name: local
-docker_image: null
+container_image: null
conda_env: local
apis:
- shields
diff --git a/docs/getting_started.ipynb b/docs/getting_started.ipynb
index 921869b33..1db7c0280 100644
--- a/docs/getting_started.ipynb
+++ b/docs/getting_started.ipynb
@@ -481,7 +481,7 @@
"- telemetry\n",
"conda_env: together\n",
"datasets: []\n",
- "docker_image: null\n",
+ "container_image: null\n",
"eval_tasks: []\n",
"image_name: together\n",
"memory_banks: []\n",
@@ -600,7 +600,7 @@
"- telemetry\n",
"conda_env: together\n",
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
- "docker_image: null\n",
+ "container_image: null\n",
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
"image_name: together\n",
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
diff --git a/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb b/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
index 730017232..a552ce69d 100644
--- a/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
+++ b/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
@@ -369,7 +369,7 @@
"- telemetry\n",
"- tool_runtime\n",
"datasets: []\n",
- "docker_image: null\n",
+ "container_image: null\n",
"eval_tasks: []\n",
"image_name: together\n",
"memory_banks: []\n",
@@ -550,7 +550,7 @@
"- telemetry\n",
"- tool_runtime\n",
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
- "docker_image: null\n",
+ "container_image: null\n",
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
"image_name: together\n",
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
diff --git a/docs/notebooks/Llama_Stack_Building_AI_Applications.ipynb b/docs/notebooks/Llama_Stack_Building_AI_Applications.ipynb
index bed1aa2a8..df8995fd4 100644
--- a/docs/notebooks/Llama_Stack_Building_AI_Applications.ipynb
+++ b/docs/notebooks/Llama_Stack_Building_AI_Applications.ipynb
@@ -760,7 +760,7 @@
"- tool_runtime\n",
"conda_env: together\n",
"datasets: []\n",
- "docker_image: null\n",
+ "container_image: null\n",
"eval_tasks: []\n",
"image_name: together\n",
"memory_banks: []\n",
@@ -942,7 +942,7 @@
"- tool_runtime\n",
"conda_env: together\n",
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
- "docker_image: null\n",
+ "container_image: null\n",
"eval_tasks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
"image_name: together\n",
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
diff --git a/docs/source/distributions/building_distro.md b/docs/source/distributions/building_distro.md
index aaf2462f7..83069aa05 100644
--- a/docs/source/distributions/building_distro.md
+++ b/docs/source/distributions/building_distro.md
@@ -17,13 +17,13 @@ pip install -e .
llama stack build -h
```
-We will start build our distribution (in the form of a Conda environment, or Docker image). In this step, we will specify:
+We will start build our distribution (in the form of a Conda environment, or Container image). In this step, we will specify:
- `name`: the name for our distribution (e.g. `my-stack`)
-- `image_type`: our build image type (`conda | docker`)
+- `image_type`: our build image type (`conda | container`)
- `distribution_spec`: our distribution specs for specifying API providers
- `description`: a short description of the configurations for the distribution
- `providers`: specifies the underlying implementation for serving each API endpoint
- - `image_type`: `conda` | `docker` to specify whether to build the distribution in the form of Docker image or Conda environment.
+ - `image_type`: `conda` | `container` to specify whether to build the distribution in the form of Container image or Conda environment.
After this step is complete, a file named `-build.yaml` and template file `-run.yaml` will be generated and saved at the output file path specified at the end of the command.
@@ -35,7 +35,7 @@ After this step is complete, a file named `-build.yaml` and template file
llama stack build
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
-> Enter the image type you want your Llama Stack to be built as (docker or conda): conda
+> Enter the image type you want your Llama Stack to be built as (container or conda): conda
Llama Stack is composed of several APIs working together. Let's select
the provider types (implementations) you want to use for these APIs.
@@ -348,26 +348,26 @@ llama stack build --config llama_stack/templates/ollama/build.yaml
```
:::
-:::{tab-item} Building Docker
+:::{tab-item} Building Container
> [!TIP]
-> Podman is supported as an alternative to Docker. Set `DOCKER_BINARY` to `podman` in your environment to use Podman.
+> Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
-To build a docker image, you may start off from a template and use the `--image-type docker` flag to specify `docker` as the build image type.
+To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
```
-llama stack build --template ollama --image-type docker
+llama stack build --template ollama --image-type container
```
```
-$ llama stack build --template ollama --image-type docker
+$ llama stack build --template ollama --image-type container
...
-Dockerfile created successfully in /tmp/tmp.viA3a3Rdsg/DockerfileFROM python:3.10-slim
+Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim
...
You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml`
```
-After this step is successful, you should be able to find the built docker image and test it with `llama stack run `.
+After this step is successful, you should be able to find the built container image and test it with `llama stack run `.
:::
::::
diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py
index 08c987a50..16ca670f7 100644
--- a/llama_stack/cli/stack/_build.py
+++ b/llama_stack/cli/stack/_build.py
@@ -182,8 +182,8 @@ def _generate_run_config(
"""
apis = list(build_config.distribution_spec.providers.keys())
run_config = StackRunConfig(
- docker_image=(
- image_name if build_config.image_type == ImageType.docker.value else None
+ container_image=(
+ image_name if build_config.image_type == ImageType.container.value else None
),
image_name=image_name,
apis=apis,
@@ -238,7 +238,7 @@ def _run_stack_build_command_from_build_config(
image_name: Optional[str] = None,
template_name: Optional[str] = None,
) -> None:
- if build_config.image_type == ImageType.docker.value:
+ if build_config.image_type == ImageType.container.value:
if template_name:
image_name = f"distribution-{template_name}"
else:
diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py
index d00157710..48c811839 100644
--- a/llama_stack/cli/stack/build.py
+++ b/llama_stack/cli/stack/build.py
@@ -47,8 +47,8 @@ class StackBuild(Subcommand):
self.parser.add_argument(
"--image-type",
type=str,
- help="Image Type to use for the build. This can be either conda or docker. If not specified, will use the image type from the template config.",
- choices=["conda", "docker", "venv"],
+ help="Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config.",
+ choices=["conda", "container", "venv"],
default="conda",
)
diff --git a/llama_stack/cli/stack/configure.py b/llama_stack/cli/stack/configure.py
index 11d3f705a..56f4feceb 100644
--- a/llama_stack/cli/stack/configure.py
+++ b/llama_stack/cli/stack/configure.py
@@ -27,7 +27,7 @@ class StackConfigure(Subcommand):
self.parser.add_argument(
"config",
type=str,
- help="Path to the build config file (e.g. ~/.llama/builds//-build.yaml). For docker, this could also be the name of the docker image. ",
+ help="Path to the build config file (e.g. ~/.llama/builds//-build.yaml). For container, this could also be the name of the container image. ",
)
self.parser.add_argument(
diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py
index 9fa82bd61..e1e02d10c 100644
--- a/llama_stack/cli/stack/run.py
+++ b/llama_stack/cli/stack/run.py
@@ -92,9 +92,9 @@ class StackRun(Subcommand):
)
if not config_file.exists() and not has_yaml_suffix:
- # check if it's a build config saved to docker dir
+ # check if it's a build config saved to container dir
config_file = Path(
- BUILDS_BASE_DIR / ImageType.docker.value / f"{args.config}-run.yaml"
+ BUILDS_BASE_DIR / ImageType.container.value / f"{args.config}-run.yaml"
)
if not config_file.exists() and not has_yaml_suffix:
@@ -115,12 +115,12 @@ class StackRun(Subcommand):
config_dict = yaml.safe_load(config_file.read_text())
config = parse_and_maybe_upgrade_config(config_dict)
- if config.docker_image:
+ if config.container_image:
script = (
importlib.resources.files("llama_stack")
/ "distribution/start_container.sh"
)
- run_args = [script, config.docker_image]
+ run_args = [script, config.container_image]
else:
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
image_name = args.image_name or current_conda_env
diff --git a/llama_stack/distribution/build.py b/llama_stack/distribution/build.py
index b8b4188ac..b8d35ccdc 100644
--- a/llama_stack/distribution/build.py
+++ b/llama_stack/distribution/build.py
@@ -38,7 +38,7 @@ SERVER_DEPENDENCIES = [
class ImageType(Enum):
- docker = "docker"
+ container = "container"
conda = "conda"
venv = "venv"
@@ -77,8 +77,8 @@ def get_provider_dependencies(
provider_spec = providers_for_api[provider_type]
deps.extend(provider_spec.pip_packages)
- if provider_spec.docker_image:
- raise ValueError("A stack's dependencies cannot have a docker image")
+ if provider_spec.container_image:
+ raise ValueError("A stack's dependencies cannot have a container image")
normal_deps = []
special_deps = []
@@ -109,23 +109,25 @@ def build_image(
image_name: str,
template_name: Optional[str] = None,
):
- docker_image = build_config.distribution_spec.docker_image or "python:3.10-slim"
+ container_image = (
+ build_config.distribution_spec.container_image or "python:3.10-slim"
+ )
normal_deps, special_deps = get_provider_dependencies(
build_config.distribution_spec.providers
)
normal_deps += SERVER_DEPENDENCIES
- if build_config.image_type == ImageType.docker.value:
+ if build_config.image_type == ImageType.container.value:
script = str(
importlib.resources.files("llama_stack") / "distribution/build_container.sh"
)
args = [
script,
image_name,
- docker_image,
+ container_image,
str(build_file_path),
- str(BUILDS_BASE_DIR / ImageType.docker.value),
+ str(BUILDS_BASE_DIR / ImageType.container.value),
" ".join(normal_deps),
]
elif build_config.image_type == ImageType.conda.value:
diff --git a/llama_stack/distribution/build_container.sh b/llama_stack/distribution/build_container.sh
index 17902de0a..4c2425004 100755
--- a/llama_stack/distribution/build_container.sh
+++ b/llama_stack/distribution/build_container.sh
@@ -13,7 +13,7 @@ PYPI_VERSION=${PYPI_VERSION:-}
BUILD_PLATFORM=${BUILD_PLATFORM:-}
if [ "$#" -lt 4 ]; then
- echo "Usage: $0 []" >&2
+ echo "Usage: $0 []" >&2
echo "Example: $0 my-fastapi-app python:3.9-slim 'fastapi uvicorn' " >&2
exit 1
fi
@@ -24,7 +24,7 @@ set -euo pipefail
build_name="$1"
image_name="distribution-$build_name"
-docker_base=$2
+container_base=$2
build_file_path=$3
host_build_dir=$4
pip_dependencies=$5
@@ -36,14 +36,14 @@ NC='\033[0m' # No Color
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
REPO_DIR=$(dirname $(dirname "$SCRIPT_DIR"))
-DOCKER_BINARY=${DOCKER_BINARY:-docker}
-DOCKER_OPTS=${DOCKER_OPTS:-}
+CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
+CONTAINER_OPTS=${CONTAINER_OPTS:-}
TEMP_DIR=$(mktemp -d)
-add_to_docker() {
+add_to_container() {
local input
- output_file="$TEMP_DIR/Dockerfile"
+ output_file="$TEMP_DIR/Containerfile"
if [ -t 0 ]; then
printf '%s\n' "$1" >>"$output_file"
else
@@ -53,9 +53,9 @@ add_to_docker() {
}
# Update and install UBI9 components if UBI9 base image is used
-if [[ $docker_base == *"registry.access.redhat.com/ubi9"* ]]; then
- add_to_docker << EOF
-FROM $docker_base
+if [[ $container_base == *"registry.access.redhat.com/ubi9"* ]]; then
+ add_to_container << EOF
+FROM $container_base
WORKDIR /app
RUN microdnf -y update && microdnf install -y iputils net-tools wget \
@@ -64,8 +64,8 @@ RUN microdnf -y update && microdnf install -y iputils net-tools wget \
EOF
else
- add_to_docker << EOF
-FROM $docker_base
+ add_to_container << EOF
+FROM $container_base
WORKDIR /app
RUN apt-get update && apt-get install -y \
@@ -82,7 +82,7 @@ fi
# Add pip dependencies first since llama-stack is what will change most often
# so we can reuse layers.
if [ -n "$pip_dependencies" ]; then
- add_to_docker << EOF
+ add_to_container << EOF
RUN pip install --no-cache $pip_dependencies
EOF
fi
@@ -90,7 +90,7 @@ fi
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
- add_to_docker </dev/null && selinuxenabled; then
# Disable SELinux labels -- we don't want to relabel the llama-stack source dir
- DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
+ CONTAINER_OPTS="$CONTAINER_OPTS --security-opt label=disable"
fi
# Set version tag based on PyPI version
@@ -200,7 +200,7 @@ else
fi
set -x
-$DOCKER_BINARY build $DOCKER_OPTS $PLATFORM -t $image_tag -f "$TEMP_DIR/Dockerfile" "$REPO_DIR" $mounts
+$CONTAINER_BINARY build $CONTAINER_OPTS $PLATFORM -t $image_tag -f "$TEMP_DIR/Containerfile" "$REPO_DIR" $mounts
# clean up tmp/configs
set +x
diff --git a/llama_stack/distribution/configure_container.sh b/llama_stack/distribution/configure_container.sh
index 5f64531eb..b01251e46 100755
--- a/llama_stack/distribution/configure_container.sh
+++ b/llama_stack/distribution/configure_container.sh
@@ -6,8 +6,8 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-DOCKER_BINARY=${DOCKER_BINARY:-docker}
-DOCKER_OPTS=${DOCKER_OPTS:-}
+CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
+CONTAINER_OPTS=${CONTAINER_OPTS:-}
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
set -euo pipefail
@@ -24,13 +24,13 @@ if [ $# -lt 2 ]; then
exit 1
fi
-docker_image="$1"
+container_image="$1"
host_build_dir="$2"
container_build_dir="/app/builds"
if command -v selinuxenabled &> /dev/null && selinuxenabled; then
# Disable SELinux labels
- DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
+ CONTAINER_OPTS="$CONTAINER_OPTS --security-opt label=disable"
fi
mounts=""
@@ -39,9 +39,9 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
fi
set -x
-$DOCKER_BINARY run $DOCKER_OPTS -it \
+$CONTAINER_BINARY run $CONTAINER_OPTS -it \
--entrypoint "/usr/local/bin/llama" \
-v $host_build_dir:$container_build_dir \
$mounts \
- $docker_image \
+ $container_image \
stack configure ./llamastack-build.yaml --output-dir $container_build_dir
diff --git a/llama_stack/distribution/datatypes.py b/llama_stack/distribution/datatypes.py
index 0a293cbc2..c1a91cf6c 100644
--- a/llama_stack/distribution/datatypes.py
+++ b/llama_stack/distribution/datatypes.py
@@ -73,7 +73,7 @@ class AutoRoutedProviderSpec(ProviderSpec):
provider_type: str = "router"
config_class: str = ""
- docker_image: Optional[str] = None
+ container_image: Optional[str] = None
routing_table_api: Api
module: str
provider_data_validator: Optional[str] = Field(
@@ -89,7 +89,7 @@ class AutoRoutedProviderSpec(ProviderSpec):
class RoutingTableProviderSpec(ProviderSpec):
provider_type: str = "routing_table"
config_class: str = ""
- docker_image: Optional[str] = None
+ container_image: Optional[str] = None
router_api: Api
module: str
@@ -101,7 +101,7 @@ class DistributionSpec(BaseModel):
default="",
description="Description of the distribution",
)
- docker_image: Optional[str] = None
+ container_image: Optional[str] = None
providers: Dict[str, Union[str, List[str]]] = Field(
default_factory=dict,
description="""
@@ -127,9 +127,9 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p
this could be just a hash
""",
)
- docker_image: Optional[str] = Field(
+ container_image: Optional[str] = Field(
default=None,
- description="Reference to the docker image if this package refers to a container",
+ description="Reference to the container image if this package refers to a container",
)
apis: List[str] = Field(
default_factory=list,
@@ -168,5 +168,5 @@ class BuildConfig(BaseModel):
)
image_type: str = Field(
default="conda",
- description="Type of package to build (conda | docker | venv)",
+ description="Type of package to build (conda | container | venv)",
)
diff --git a/llama_stack/distribution/start_container.sh b/llama_stack/distribution/start_container.sh
index 3b49a22f8..1a55bf96d 100755
--- a/llama_stack/distribution/start_container.sh
+++ b/llama_stack/distribution/start_container.sh
@@ -6,8 +6,8 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-DOCKER_BINARY=${DOCKER_BINARY:-docker}
-DOCKER_OPTS=${DOCKER_OPTS:-}
+CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
+CONTAINER_OPTS=${CONTAINER_OPTS:-}
LLAMA_CHECKPOINT_DIR=${LLAMA_CHECKPOINT_DIR:-}
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
@@ -31,7 +31,7 @@ if [ $# -lt 3 ]; then
fi
build_name="$1"
-docker_image="localhost/distribution-$build_name"
+container_image="localhost/distribution-$build_name"
shift
yaml_config="$1"
@@ -64,7 +64,7 @@ set -x
if command -v selinuxenabled &> /dev/null && selinuxenabled; then
# Disable SELinux labels
- DOCKER_OPTS="$DOCKER_OPTS --security-opt label=disable"
+ CONTAINER_OPTS="$CONTAINER_OPTS --security-opt label=disable"
fi
mounts=""
@@ -73,7 +73,7 @@ if [ -n "$LLAMA_STACK_DIR" ]; then
fi
if [ -n "$LLAMA_CHECKPOINT_DIR" ]; then
mounts="$mounts -v $LLAMA_CHECKPOINT_DIR:/root/.llama"
- DOCKER_OPTS="$DOCKER_OPTS --gpus=all"
+ CONTAINER_OPTS="$CONTAINER_OPTS --gpus=all"
fi
version_tag="latest"
@@ -85,11 +85,11 @@ elif [ -n "$TEST_PYPI_VERSION" ]; then
version_tag="test-$TEST_PYPI_VERSION"
fi
-$DOCKER_BINARY run $DOCKER_OPTS -it \
+$CONTAINER_BINARY run $CONTAINER_OPTS -it \
-p $port:$port \
$env_vars \
-v "$yaml_config:/app/config.yaml" \
$mounts \
--env LLAMA_STACK_PORT=$port \
--entrypoint='["python", "-m", "llama_stack.distribution.server.server", "--yaml-config", "/app/config.yaml"]' \
- $docker_image:$version_tag
+ $container_image:$version_tag
diff --git a/llama_stack/providers/datatypes.py b/llama_stack/providers/datatypes.py
index 3e64a62a1..94563879c 100644
--- a/llama_stack/providers/datatypes.py
+++ b/llama_stack/providers/datatypes.py
@@ -150,11 +150,11 @@ class InlineProviderSpec(ProviderSpec):
default_factory=list,
description="The pip dependencies needed for this implementation",
)
- docker_image: Optional[str] = Field(
+ container_image: Optional[str] = Field(
default=None,
description="""
-The docker image to use for this implementation. If one is provided, pip_packages will be ignored.
-If a provider depends on other providers, the dependencies MUST NOT specify a docker image.
+The container image to use for this implementation. If one is provided, pip_packages will be ignored.
+If a provider depends on other providers, the dependencies MUST NOT specify a container image.
""",
)
module: str = Field(
@@ -197,7 +197,7 @@ API responses, specify the adapter here.
)
@property
- def docker_image(self) -> Optional[str]:
+ def container_image(self) -> Optional[str]:
return None
@property
diff --git a/llama_stack/templates/bedrock/bedrock.py b/llama_stack/templates/bedrock/bedrock.py
index c80625cf6..da792e461 100644
--- a/llama_stack/templates/bedrock/bedrock.py
+++ b/llama_stack/templates/bedrock/bedrock.py
@@ -70,7 +70,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use AWS Bedrock for running LLM inference and safety",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,
diff --git a/llama_stack/templates/cerebras/cerebras.py b/llama_stack/templates/cerebras/cerebras.py
index df3b55ddd..8f6bd77af 100644
--- a/llama_stack/templates/cerebras/cerebras.py
+++ b/llama_stack/templates/cerebras/cerebras.py
@@ -92,7 +92,7 @@ def get_distribution_template() -> DistributionTemplate:
name="cerebras",
distro_type="self_hosted",
description="Use Cerebras for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,
diff --git a/llama_stack/templates/experimental-post-training/build.yaml b/llama_stack/templates/experimental-post-training/build.yaml
index 4997ab8a3..618e8ff97 100644
--- a/llama_stack/templates/experimental-post-training/build.yaml
+++ b/llama_stack/templates/experimental-post-training/build.yaml
@@ -2,7 +2,7 @@ version: '2'
name: experimental-post-training
distribution_spec:
description: Experimental template for post training
- docker_image: null
+ container_image: null
providers:
inference:
- inline::meta-reference
diff --git a/llama_stack/templates/experimental-post-training/run.yaml b/llama_stack/templates/experimental-post-training/run.yaml
index 2e0ee029b..87465137f 100644
--- a/llama_stack/templates/experimental-post-training/run.yaml
+++ b/llama_stack/templates/experimental-post-training/run.yaml
@@ -1,6 +1,6 @@
version: '2'
image_name: experimental-post-training
-docker_image: null
+container_image: null
conda_env: experimental-post-training
apis:
- agents
diff --git a/llama_stack/templates/fireworks/fireworks.py b/llama_stack/templates/fireworks/fireworks.py
index 8add75f7d..c94074a70 100644
--- a/llama_stack/templates/fireworks/fireworks.py
+++ b/llama_stack/templates/fireworks/fireworks.py
@@ -98,7 +98,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use Fireworks.AI for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,
diff --git a/llama_stack/templates/hf-endpoint/hf_endpoint.py b/llama_stack/templates/hf-endpoint/hf_endpoint.py
index 54aaa56ac..04e2a53b5 100644
--- a/llama_stack/templates/hf-endpoint/hf_endpoint.py
+++ b/llama_stack/templates/hf-endpoint/hf_endpoint.py
@@ -88,7 +88,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=None,
providers=providers,
default_models=[inference_model, safety_model],
diff --git a/llama_stack/templates/hf-serverless/hf_serverless.py b/llama_stack/templates/hf-serverless/hf_serverless.py
index 788faa986..af8d77629 100644
--- a/llama_stack/templates/hf-serverless/hf_serverless.py
+++ b/llama_stack/templates/hf-serverless/hf_serverless.py
@@ -89,7 +89,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=None,
providers=providers,
default_models=[inference_model, safety_model],
diff --git a/llama_stack/templates/nvidia/nvidia.py b/llama_stack/templates/nvidia/nvidia.py
index cfa86dbe7..d5518ecc9 100644
--- a/llama_stack/templates/nvidia/nvidia.py
+++ b/llama_stack/templates/nvidia/nvidia.py
@@ -68,7 +68,7 @@ def get_distribution_template() -> DistributionTemplate:
name="nvidia",
distro_type="remote_hosted",
description="Use NVIDIA NIM for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,
diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py
index 0473f8692..2288ea3a6 100644
--- a/llama_stack/templates/ollama/ollama.py
+++ b/llama_stack/templates/ollama/ollama.py
@@ -90,7 +90,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) Ollama server for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=[inference_model, safety_model],
diff --git a/llama_stack/templates/template.py b/llama_stack/templates/template.py
index 5bb88c821..d9696b23d 100644
--- a/llama_stack/templates/template.py
+++ b/llama_stack/templates/template.py
@@ -37,7 +37,7 @@ class RunConfigSettings(BaseModel):
self,
name: str,
providers: Dict[str, List[str]],
- docker_image: Optional[str] = None,
+ container_image: Optional[str] = None,
) -> StackRunConfig:
provider_registry = get_provider_registry()
@@ -83,7 +83,7 @@ class RunConfigSettings(BaseModel):
return StackRunConfig(
image_name=name,
- docker_image=docker_image,
+ container_image=container_image,
conda_env=name,
apis=apis,
providers=provider_configs,
@@ -113,7 +113,7 @@ class DistributionTemplate(BaseModel):
# Optional configuration
run_config_env_vars: Optional[Dict[str, Tuple[str, str]]] = None
- docker_image: Optional[str] = None
+ container_image: Optional[str] = None
default_models: Optional[List[ModelInput]] = None
@@ -122,7 +122,7 @@ class DistributionTemplate(BaseModel):
name=self.name,
distribution_spec=DistributionSpec(
description=self.description,
- docker_image=self.docker_image,
+ container_image=self.container_image,
providers=self.providers,
),
image_type="conda", # default to conda, can be overridden
@@ -170,7 +170,7 @@ class DistributionTemplate(BaseModel):
for yaml_pth, settings in self.run_configs.items():
run_config = settings.run_config(
- self.name, self.providers, self.docker_image
+ self.name, self.providers, self.container_image
)
with open(yaml_output_dir / yaml_pth, "w") as f:
yaml.safe_dump(
diff --git a/llama_stack/templates/tgi/tgi.py b/llama_stack/templates/tgi/tgi.py
index b62e7719e..02187f986 100644
--- a/llama_stack/templates/tgi/tgi.py
+++ b/llama_stack/templates/tgi/tgi.py
@@ -92,7 +92,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) TGI server for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=[inference_model, safety_model],
diff --git a/llama_stack/templates/together/together.py b/llama_stack/templates/together/together.py
index b51918a6c..28c01095a 100644
--- a/llama_stack/templates/together/together.py
+++ b/llama_stack/templates/together/together.py
@@ -96,7 +96,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use Together.AI for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,
diff --git a/llama_stack/templates/vllm-gpu/vllm.py b/llama_stack/templates/vllm-gpu/vllm.py
index dd80c15dc..1f3cf4b35 100644
--- a/llama_stack/templates/vllm-gpu/vllm.py
+++ b/llama_stack/templates/vllm-gpu/vllm.py
@@ -84,7 +84,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use a built-in vLLM engine for running LLM inference",
- docker_image=None,
+ container_image=None,
template_path=None,
providers=providers,
default_models=[inference_model],