This commit is contained in:
Derek Higgins 2025-07-24 12:35:44 -07:00 committed by GitHub
commit 108dfdc516
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 379 additions and 327 deletions

View file

@ -107,7 +107,7 @@ def build_image(
normal_deps += SERVER_DEPENDENCIES
if build_config.image_type == LlamaStackImageType.CONTAINER.value:
script = str(importlib.resources.files("llama_stack") / "distribution/build_container.sh")
script = str(importlib.resources.files("llama_stack") / "distribution/build_container.py")
args = [
script,
template_or_config,

View file

@ -0,0 +1,373 @@
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import json
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import urllib.request
import yaml
class Colors:
RED = "\033[0;31m"
NC = "\033[0m" # No Color
def is_command_available(command: str) -> bool:
"""Check if a command is available in the system."""
return shutil.which(command) is not None
class ContainerBuilder:
def __init__(self):
# Environment variables with defaults
self.llama_stack_dir = os.getenv("LLAMA_STACK_DIR", "")
self.llama_stack_client_dir = os.getenv("LLAMA_STACK_CLIENT_DIR", "")
self.test_pypi_version = os.getenv("TEST_PYPI_VERSION", "")
self.pypi_version = os.getenv("PYPI_VERSION", "")
self.build_platform = os.getenv("BUILD_PLATFORM", "")
self.uv_http_timeout = os.getenv("UV_HTTP_TIMEOUT", "500")
self.use_copy_not_mount = os.getenv("USE_COPY_NOT_MOUNT", "")
self.mount_cache = os.getenv("MOUNT_CACHE", "--mount=type=cache,id=llama-stack-cache,target=/root/.cache")
self.container_binary = os.getenv("CONTAINER_BINARY", "docker")
self.container_opts = os.getenv("CONTAINER_OPTS", "--progress=plain")
# Constants
self.run_config_path = "/app/run.yaml"
self.build_context_dir = os.getcwd()
self.stack_mount = "/app/llama-stack-source"
self.client_mount = "/app/llama-stack-client-source"
# Temporary directory and Containerfile
self.temp_dir = tempfile.mkdtemp()
self.containerfile_path = os.path.join(self.temp_dir, "Containerfile")
def cleanup(self):
"""Clean up temporary files."""
if os.path.exists(self.temp_dir):
try:
shutil.rmtree(self.temp_dir)
except Exception as e:
print(f"Warning: Could not clean up temporary directory: {e}", file=sys.stderr)
# Clean up copied files in build context
run_yaml_path = os.path.join(self.build_context_dir, "run.yaml")
if os.path.exists(run_yaml_path):
try:
os.remove(run_yaml_path)
except Exception as e:
print(f"Warning: Could not clean up run.yaml: {e}", file=sys.stderr)
def add_to_container(self, content: str):
"""Add content to the Containerfile."""
with open(self.containerfile_path, "a") as f:
f.write(content + "\n")
def validate_args(self, args):
"""Validate command line arguments."""
if not is_command_available(self.container_binary):
print(
f"{Colors.RED}Error: {self.container_binary} command not found. "
f"Is {self.container_binary} installed and in your PATH?{Colors.NC}",
file=sys.stderr,
)
sys.exit(1)
def generate_base_image_setup(self, container_base: str):
"""Generate the base image setup commands."""
self.add_to_container(f"""FROM {container_base}
WORKDIR /app""")
if "registry.access.redhat.com/ubi9" in container_base:
self.add_to_container("""# We install the Python 3.12 dev headers and build tools so that any
# C-extension wheels (e.g. polyleven, faiss-cpu) can compile successfully.
RUN dnf -y update && dnf install -y iputils git net-tools wget \\
vim-minimal python3.12 python3.12-pip python3.12-wheel \\
python3.12-setuptools python3.12-devel gcc make && \\
ln -s /bin/pip3.12 /bin/pip && ln -s /bin/python3.12 /bin/python && dnf clean all""")
else:
self.add_to_container("""RUN apt-get update && apt-get install -y \\
iputils-ping net-tools iproute2 dnsutils telnet \\
curl wget telnet git\\
procps psmisc lsof \\
traceroute \\
bubblewrap \\
gcc \\
&& rm -rf /var/lib/apt/lists/*""")
self.add_to_container("""ENV UV_SYSTEM_PYTHON=1
RUN pip install uv""")
def add_pip_dependencies(self, pip_dependencies: str, special_pip_deps: str):
"""Add pip dependencies to the container."""
# Set link mode to copy
self.add_to_container("ENV UV_LINK_MODE=copy")
# Add regular pip dependencies
if pip_dependencies:
pip_args = shlex.split(pip_dependencies)
quoted_deps = " ".join(shlex.quote(dep) for dep in pip_args)
self.add_to_container(f"RUN {self.mount_cache} uv pip install {quoted_deps}")
# Add special pip dependencies
if special_pip_deps:
parts = special_pip_deps.split("#")
for part in parts:
if part.strip():
pip_args = shlex.split(part.strip())
quoted_deps = " ".join(shlex.quote(dep) for dep in pip_args)
self.add_to_container(f"RUN {self.mount_cache} uv pip install {quoted_deps}")
def handle_run_config(self, run_config: str):
"""Handle run configuration file."""
if not run_config:
return
# Copy the run config to the build context
run_yaml_dest = os.path.join(self.build_context_dir, "run.yaml")
shutil.copy2(run_config, run_yaml_dest)
# Parse the run.yaml configuration for external providers
try:
with open(run_config) as f:
config = yaml.safe_load(f)
external_providers_dir = config.get("external_providers_dir", "")
if external_providers_dir:
# Expand environment variables in path
external_providers_dir = os.path.expandvars(external_providers_dir)
if os.path.isdir(external_providers_dir):
print(f"Copying external providers directory: {external_providers_dir}")
providers_dest = os.path.join(self.build_context_dir, "providers.d")
shutil.copytree(external_providers_dir, providers_dest, dirs_exist_ok=True)
self.add_to_container("COPY providers.d /.llama/providers.d")
# Update the run.yaml file to change external_providers_dir
with open(run_yaml_dest) as f:
content = f.read()
# Replace external_providers_dir line
content = re.sub(
r"external_providers_dir:.*", "external_providers_dir: /.llama/providers.d", content
)
with open(run_yaml_dest, "w") as f:
f.write(content)
except Exception as e:
print(f"Warning: Could not parse run.yaml: {e}", file=sys.stderr)
# Copy run config into docker image
self.add_to_container(f"COPY run.yaml {self.run_config_path}")
def install_local_package(self, directory: str, mount_point: str, name: str):
"""Install a local package in the container."""
if not os.path.isdir(directory):
print(
f"{Colors.RED}Warning: {name} is set but directory does not exist: {directory}{Colors.NC}",
file=sys.stderr,
)
sys.exit(1)
if self.use_copy_not_mount == "true":
self.add_to_container(f"COPY {directory} {mount_point}")
self.add_to_container(f"RUN {self.mount_cache} uv pip install -e {mount_point}")
def install_llama_stack(self):
"""Install llama-stack package."""
if self.llama_stack_client_dir:
self.install_local_package(self.llama_stack_client_dir, self.client_mount, "LLAMA_STACK_CLIENT_DIR")
if self.llama_stack_dir:
self.install_local_package(self.llama_stack_dir, self.stack_mount, "LLAMA_STACK_DIR")
else:
if self.test_pypi_version:
# Install damaged packages first for test-pypi
self.add_to_container(f"RUN {self.mount_cache} uv pip install fastapi libcst")
self.add_to_container(f"""RUN {self.mount_cache} uv pip install --extra-index-url https://test.pypi.org/simple/ \\
--index-strategy unsafe-best-match \\
llama-stack=={self.test_pypi_version}""")
else:
if self.pypi_version:
spec_version = f"llama-stack=={self.pypi_version}"
else:
spec_version = "llama-stack"
self.add_to_container(f"RUN {self.mount_cache} uv pip install {spec_version}")
def add_entrypoint(self, template_or_config: str, run_config: str):
"""Add the container entrypoint."""
# Remove uv after installation
self.add_to_container("RUN pip uninstall -y uv")
# Set entrypoint based on configuration
if run_config:
self.add_to_container(
f'ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--config", "{self.run_config_path}"]'
)
elif not template_or_config.endswith(".yaml"):
self.add_to_container(
f'ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--template", "{template_or_config}"]'
)
# Add generic container setup
self.add_to_container("""RUN mkdir -p /.llama /.cache && chmod -R g+rw /app /.llama /.cache""")
def get_version_tag(self):
"""Get the version tag for the image."""
if self.pypi_version:
return self.pypi_version
elif self.test_pypi_version:
return f"test-{self.test_pypi_version}"
elif self.llama_stack_dir or self.llama_stack_client_dir:
return "dev"
else:
try:
url = "https://pypi.org/pypi/llama-stack/json"
with urllib.request.urlopen(url) as response:
data = json.loads(response.read())
return data["info"]["version"]
except Exception:
return "latest"
def build_container(self, image_name: str) -> tuple[list[str], str]:
"""Build the container and return CLI arguments."""
cli_args = shlex.split(self.container_opts)
# Add volume mounts if not using copy mode
if self.use_copy_not_mount != "true":
if self.llama_stack_dir:
abs_path = os.path.abspath(self.llama_stack_dir)
cli_args.extend(["-v", f"{abs_path}:{self.stack_mount}"])
if self.llama_stack_client_dir:
abs_path = os.path.abspath(self.llama_stack_client_dir)
cli_args.extend(["-v", f"{abs_path}:{self.client_mount}"])
# Handle SELinux if available
try:
if is_command_available("selinuxenabled"):
result = subprocess.run(["selinuxenabled"], capture_output=True)
if result.returncode == 0:
cli_args.extend(["--security-opt", "label=disable"])
except Exception:
pass
# Set platform
arch = platform.machine()
if self.build_platform:
cli_args.extend(["--platform", self.build_platform])
elif arch in ["arm64", "aarch64"]:
cli_args.extend(["--platform", "linux/arm64"])
elif arch == "x86_64":
cli_args.extend(["--platform", "linux/amd64"])
else:
print(f"Unsupported architecture: {arch}")
sys.exit(1)
# Create image tag
version_tag = self.get_version_tag()
image_tag = f"{image_name}:{version_tag}"
return cli_args, image_tag
def run_build(self, cli_args: list[str], image_tag: str):
"""Execute the container build command."""
print(f"PWD: {os.getcwd()}")
print(f"Containerfile: {self.containerfile_path}")
# Print Containerfile content
print(f"Containerfile created successfully in {self.containerfile_path}\n")
with open(self.containerfile_path) as f:
print(f.read())
print()
# Build the container
cmd = [
self.container_binary,
"build",
*cli_args,
"-t",
image_tag,
"-f",
self.containerfile_path,
self.build_context_dir,
]
print("Running command:")
print(" ".join(shlex.quote(arg) for arg in cmd))
try:
subprocess.run(cmd, check=True)
print("Success!")
except subprocess.CalledProcessError as e:
print(f"Build failed with exit code {e.returncode}")
sys.exit(e.returncode)
def main():
if len(sys.argv) < 5:
print(
"Usage: build_container.py <template_or_config> <image_name> <container_base> <pip_dependencies> [<run_config>] [<special_pip_deps>]",
file=sys.stderr,
)
sys.exit(1)
parser = argparse.ArgumentParser(
description="Build container images for llama-stack", formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("template_or_config", help="Template name or path to config file")
parser.add_argument("image_name", help="Name for the container image")
parser.add_argument("container_base", help="Base container image")
parser.add_argument("pip_dependencies", help="Pip dependencies to install")
parser.add_argument("run_config", nargs="?", default="", help="Optional path to run.yaml config file")
parser.add_argument("special_pip_deps", nargs="?", default="", help="Optional special pip dependencies")
args = parser.parse_args()
# Handle the complex argument parsing logic from the bash script
# If we have 5+ args and the 5th arg doesn't end with .yaml, it's special_pip_deps
if len(sys.argv) >= 6:
if not sys.argv[5].endswith(".yaml"):
args.special_pip_deps = args.run_config
args.run_config = ""
builder = ContainerBuilder()
try:
builder.validate_args(args)
# Generate Containerfile
builder.generate_base_image_setup(args.container_base)
builder.add_pip_dependencies(args.pip_dependencies, args.special_pip_deps)
builder.handle_run_config(args.run_config)
builder.install_llama_stack()
builder.add_entrypoint(args.template_or_config, args.run_config)
# Build container
cli_args, image_tag = builder.build_container(args.image_name)
builder.run_build(cli_args, image_tag)
finally:
builder.cleanup()
if __name__ == "__main__":
main()

View file

@ -6,154 +6,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR:-}
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
PYPI_VERSION=${PYPI_VERSION:-}
BUILD_PLATFORM=${BUILD_PLATFORM:-}
# This timeout (in seconds) is necessary when installing PyTorch via uv since it's likely to time out
# Reference: https://github.com/astral-sh/uv/pull/1694
UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500}
# mounting is not supported by docker buildx, so we use COPY instead
USE_COPY_NOT_MOUNT=${USE_COPY_NOT_MOUNT:-}
# Mount command for cache container .cache, can be overridden by the user if needed
MOUNT_CACHE=${MOUNT_CACHE:-"--mount=type=cache,id=llama-stack-cache,target=/root/.cache"}
# Path to the run.yaml file in the container
RUN_CONFIG_PATH=/app/run.yaml
BUILD_CONTEXT_DIR=$(pwd)
if [ "$#" -lt 4 ]; then
# This only works for templates
echo "Usage: $0 <template_or_config> <image_name> <container_base> <pip_dependencies> [<run_config>] [<special_pip_deps>]" >&2
exit 1
fi
set -euo pipefail
template_or_config="$1"
shift
image_name="$1"
shift
container_base="$1"
shift
pip_dependencies="$1"
shift
# Handle optional arguments
run_config=""
special_pip_deps=""
# Check if there are more arguments
# The logics is becoming cumbersom, we should refactor it if we can do better
if [ $# -gt 0 ]; then
# Check if the argument ends with .yaml
if [[ "$1" == *.yaml ]]; then
run_config="$1"
shift
# If there's another argument after .yaml, it must be special_pip_deps
if [ $# -gt 0 ]; then
special_pip_deps="$1"
fi
else
# If it's not .yaml, it must be special_pip_deps
special_pip_deps="$1"
fi
fi
# Define color codes
RED='\033[0;31m'
NC='\033[0m' # No Color
CONTAINER_BINARY=${CONTAINER_BINARY:-docker}
CONTAINER_OPTS=${CONTAINER_OPTS:---progress=plain}
TEMP_DIR=$(mktemp -d)
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
source "$SCRIPT_DIR/common.sh"
add_to_container() {
output_file="$TEMP_DIR/Containerfile"
if [ -t 0 ]; then
printf '%s\n' "$1" >>"$output_file"
else
# If stdin is not a terminal, read from it (heredoc)
cat >>"$output_file"
fi
}
# Check if container command is available
if ! is_command_available "$CONTAINER_BINARY"; then
printf "${RED}Error: ${CONTAINER_BINARY} command not found. Is ${CONTAINER_BINARY} installed and in your PATH?${NC}" >&2
exit 1
fi
# Update and install UBI9 components if UBI9 base image is used
if [[ $container_base == *"registry.access.redhat.com/ubi9"* ]]; then
add_to_container << EOF
FROM $container_base
WORKDIR /app
# We install the Python 3.12 dev headers and build tools so that any
# C-extension wheels (e.g. polyleven, faiss-cpu) can compile successfully.
RUN dnf -y update && dnf install -y iputils git net-tools wget \
vim-minimal python3.12 python3.12-pip python3.12-wheel \
python3.12-setuptools python3.12-devel gcc make && \
ln -s /bin/pip3.12 /bin/pip && ln -s /bin/python3.12 /bin/python && dnf clean all
ENV UV_SYSTEM_PYTHON=1
RUN pip install uv
EOF
else
add_to_container << EOF
FROM $container_base
WORKDIR /app
RUN apt-get update && apt-get install -y \
iputils-ping net-tools iproute2 dnsutils telnet \
curl wget telnet git\
procps psmisc lsof \
traceroute \
bubblewrap \
gcc \
&& rm -rf /var/lib/apt/lists/*
ENV UV_SYSTEM_PYTHON=1
RUN pip install uv
EOF
fi
# Set the link mode to copy so that uv doesn't attempt to symlink to the cache directory
add_to_container << EOF
ENV UV_LINK_MODE=copy
EOF
# Add pip dependencies first since llama-stack is what will change most often
# so we can reuse layers.
if [ -n "$pip_dependencies" ]; then
read -ra pip_args <<< "$pip_dependencies"
quoted_deps=$(printf " %q" "${pip_args[@]}")
add_to_container << EOF
RUN $MOUNT_CACHE uv pip install $quoted_deps
EOF
fi
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
read -ra pip_args <<< "$part"
quoted_deps=$(printf " %q" "${pip_args[@]}")
add_to_container <<EOF
RUN $MOUNT_CACHE uv pip install $quoted_deps
EOF
done
fi
# Function to get Python command
get_python_cmd() {
if is_command_available python; then
@ -165,182 +17,9 @@ get_python_cmd() {
exit 1
fi
}
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
source "$SCRIPT_DIR/common.sh"
if [ -n "$run_config" ]; then
# Copy the run config to the build context since it's an absolute path
cp "$run_config" "$BUILD_CONTEXT_DIR/run.yaml"
# Parse the run.yaml configuration to identify external provider directories
# If external providers are specified, copy their directory to the container
# and update the configuration to reference the new container path
python_cmd=$(get_python_cmd)
external_providers_dir=$($python_cmd -c "import yaml; config = yaml.safe_load(open('$run_config')); print(config.get('external_providers_dir') or '')")
external_providers_dir=$(eval echo "$external_providers_dir")
if [ -n "$external_providers_dir" ]; then
if [ -d "$external_providers_dir" ]; then
echo "Copying external providers directory: $external_providers_dir"
cp -r "$external_providers_dir" "$BUILD_CONTEXT_DIR/providers.d"
add_to_container << EOF
COPY providers.d /.llama/providers.d
EOF
fi
# Edit the run.yaml file to change the external_providers_dir to /.llama/providers.d
if [ "$(uname)" = "Darwin" ]; then
sed -i.bak -e 's|external_providers_dir:.*|external_providers_dir: /.llama/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml"
rm -f "$BUILD_CONTEXT_DIR/run.yaml.bak"
else
sed -i 's|external_providers_dir:.*|external_providers_dir: /.llama/providers.d|' "$BUILD_CONTEXT_DIR/run.yaml"
fi
fi
# Copy run config into docker image
add_to_container << EOF
COPY run.yaml $RUN_CONFIG_PATH
EOF
fi
stack_mount="/app/llama-stack-source"
client_mount="/app/llama-stack-client-source"
install_local_package() {
local dir="$1"
local mount_point="$2"
local name="$3"
if [ ! -d "$dir" ]; then
echo "${RED}Warning: $name is set but directory does not exist: $dir${NC}" >&2
exit 1
fi
if [ "$USE_COPY_NOT_MOUNT" = "true" ]; then
add_to_container << EOF
COPY $dir $mount_point
EOF
fi
add_to_container << EOF
RUN $MOUNT_CACHE uv pip install -e $mount_point
EOF
}
if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then
install_local_package "$LLAMA_STACK_CLIENT_DIR" "$client_mount" "LLAMA_STACK_CLIENT_DIR"
fi
if [ -n "$LLAMA_STACK_DIR" ]; then
install_local_package "$LLAMA_STACK_DIR" "$stack_mount" "LLAMA_STACK_DIR"
else
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
add_to_container << EOF
RUN $MOUNT_CACHE uv pip install fastapi libcst
EOF
add_to_container << EOF
RUN $MOUNT_CACHE uv pip install --extra-index-url https://test.pypi.org/simple/ \
--index-strategy unsafe-best-match \
llama-stack==$TEST_PYPI_VERSION
EOF
else
if [ -n "$PYPI_VERSION" ]; then
SPEC_VERSION="llama-stack==${PYPI_VERSION}"
else
SPEC_VERSION="llama-stack"
fi
add_to_container << EOF
RUN $MOUNT_CACHE uv pip install $SPEC_VERSION
EOF
fi
fi
# remove uv after installation
add_to_container << EOF
RUN pip uninstall -y uv
EOF
# If a run config is provided, we use the --config flag
if [[ -n "$run_config" ]]; then
add_to_container << EOF
ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--config", "$RUN_CONFIG_PATH"]
EOF
# If a template is provided (not a yaml file), we use the --template flag
elif [[ "$template_or_config" != *.yaml ]]; then
add_to_container << EOF
ENTRYPOINT ["python", "-m", "llama_stack.distribution.server.server", "--template", "$template_or_config"]
EOF
fi
# Add other require item commands genearic to all containers
add_to_container << EOF
RUN mkdir -p /.llama /.cache && chmod -R g+rw /app /.llama /.cache
EOF
printf "Containerfile created successfully in %s/Containerfile\n\n" "$TEMP_DIR"
cat "$TEMP_DIR"/Containerfile
printf "\n"
# Start building the CLI arguments
CLI_ARGS=()
# Read CONTAINER_OPTS and put it in an array
read -ra CLI_ARGS <<< "$CONTAINER_OPTS"
if [ "$USE_COPY_NOT_MOUNT" != "true" ]; then
if [ -n "$LLAMA_STACK_DIR" ]; then
CLI_ARGS+=("-v" "$(readlink -f "$LLAMA_STACK_DIR"):$stack_mount")
fi
if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then
CLI_ARGS+=("-v" "$(readlink -f "$LLAMA_STACK_CLIENT_DIR"):$client_mount")
fi
fi
if is_command_available selinuxenabled && selinuxenabled; then
# Disable SELinux labels -- we don't want to relabel the llama-stack source dir
CLI_ARGS+=("--security-opt" "label=disable")
fi
# Set version tag based on PyPI version
if [ -n "$PYPI_VERSION" ]; then
version_tag="$PYPI_VERSION"
elif [ -n "$TEST_PYPI_VERSION" ]; then
version_tag="test-$TEST_PYPI_VERSION"
elif [[ -n "$LLAMA_STACK_DIR" || -n "$LLAMA_STACK_CLIENT_DIR" ]]; then
version_tag="dev"
else
URL="https://pypi.org/pypi/llama-stack/json"
version_tag=$(curl -s $URL | jq -r '.info.version')
fi
# Add version tag to image name
image_tag="$image_name:$version_tag"
# Detect platform architecture
ARCH=$(uname -m)
if [ -n "$BUILD_PLATFORM" ]; then
CLI_ARGS+=("--platform" "$BUILD_PLATFORM")
elif [ "$ARCH" = "arm64" ] || [ "$ARCH" = "aarch64" ]; then
CLI_ARGS+=("--platform" "linux/arm64")
elif [ "$ARCH" = "x86_64" ]; then
CLI_ARGS+=("--platform" "linux/amd64")
else
echo "Unsupported architecture: $ARCH"
exit 1
fi
echo "PWD: $(pwd)"
echo "Containerfile: $TEMP_DIR/Containerfile"
set -x
$CONTAINER_BINARY build \
"${CLI_ARGS[@]}" \
-t "$image_tag" \
-f "$TEMP_DIR/Containerfile" \
"$BUILD_CONTEXT_DIR"
# clean up tmp/configs
rm -rf "$BUILD_CONTEXT_DIR/run.yaml" "$TEMP_DIR"
set +x
echo "Success!"
# call the python script
CMD=$(get_python_cmd)
$CMD $SCRIPT_DIR/build_container.py "$@"