From 3fe9ca7540d9a7c396b8f549db15b09142f33449 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Sun, 15 Sep 2024 00:08:33 -0700 Subject: [PATCH] configure from docker image name --- llama_toolchain/cli/stack/configure.py | 40 ++++++++++++++++++--- llama_toolchain/core/configure_container.sh | 26 ++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) create mode 100755 llama_toolchain/core/configure_container.sh diff --git a/llama_toolchain/cli/stack/configure.py b/llama_toolchain/cli/stack/configure.py index d48ba47a4..afb2dd9fe 100644 --- a/llama_toolchain/cli/stack/configure.py +++ b/llama_toolchain/cli/stack/configure.py @@ -8,10 +8,13 @@ import argparse import json from pathlib import Path -import yaml +import pkg_resources +import yaml from llama_toolchain.cli.subcommand import Subcommand from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR + +from llama_toolchain.common.exec import run_with_pty from termcolor import cprint from llama_toolchain.core.datatypes import * # noqa: F403 import os @@ -35,7 +38,7 @@ class StackConfigure(Subcommand): self.parser.add_argument( "config", type=str, - help="Path to the build config file (e.g. ~/.llama/builds//-build.yaml)", + help="Path to the build config file (e.g. ~/.llama/builds//-build.yaml). For docker, this could also be the name of the docker image. ", ) def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None: @@ -43,10 +46,37 @@ class StackConfigure(Subcommand): build_config_file = Path(args.config) if not build_config_file.exists(): - self.parser.error( - f"Could not find {build_config_file}. Please run `llama stack build` first" + cprint( + f"Could not find {build_config_file}. Trying docker image name instead...", + color="green", ) - return + + build_dir = ( + Path(os.path.expanduser("./.llama/distributions")) + / ImageType.docker.value + ) + build_config_file = build_dir / f"{args.config}-build.yaml" + + os.makedirs(build_dir, exist_ok=True) + + script = pkg_resources.resource_filename( + "llama_toolchain", "core/configure_container.sh" + ) + script_args = [ + script, + args.config, + str(build_config_file), + ] + + return_code = run_with_pty(script_args) + + # we have regenerated the build config file with script, now check if it exists + build_config_file = Path(str(build_config_file)) + if return_code != 0 or not build_config_file.exists(): + self.parser.error( + f"Can not find {build_config_file}. Please run llama stack build first or check if docker image exists" + ) + return with open(build_config_file, "r") as f: build_config = BuildConfig(**yaml.safe_load(f)) diff --git a/llama_toolchain/core/configure_container.sh b/llama_toolchain/core/configure_container.sh new file mode 100755 index 000000000..cbcc263d9 --- /dev/null +++ b/llama_toolchain/core/configure_container.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +set -euo pipefail + +error_handler() { + echo "Error occurred in script at line: ${1}" >&2 + exit 1 +} + +trap 'error_handler ${LINENO}' ERR + +if [ $# -lt 2 ]; then + echo "Usage: $0 " + exit 1 +fi + +docker_image="$1" +build_file_path="$2" + +podman run -it $docker_image cat build.yaml >> ./$build_file_path