From 1128f69674d81b503f54a0993e5f87cee02c9e2d Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Wed, 18 Sep 2024 14:25:53 -0700 Subject: [PATCH] CLI: add build templates support, move imports (#77) * list templates implementation * relative path * finalize templates * remove imports * remove templates from name, name templates * fix docker * fix docker --- llama_stack/cli/stack/build.py | 118 ++++++++++++++++-- llama_stack/cli/stack/configure.py | 22 ++-- llama_stack/cli/stack/run.py | 7 +- .../docker/local-docker-example-build.yaml | 10 -- .../local-build.yaml} | 2 +- .../local-fireworks-build.yaml} | 2 +- .../local-ollama-build.yaml} | 2 +- .../local-tgi-build.yaml} | 2 +- .../local-together-build.yaml} | 2 +- 9 files changed, 130 insertions(+), 37 deletions(-) delete mode 100644 llama_stack/distribution/example_configs/docker/local-docker-example-build.yaml rename llama_stack/distribution/{example_configs/conda/local-conda-example-build.yaml => templates/local-build.yaml} (91%) rename llama_stack/distribution/{example_configs/conda/local-fireworks-conda-example-build.yaml => templates/local-fireworks-build.yaml} (87%) rename llama_stack/distribution/{example_configs/conda/local-ollama-conda-example-build.yaml => templates/local-ollama-build.yaml} (88%) rename llama_stack/distribution/{example_configs/conda/local-tgi-conda-example-build.yaml => templates/local-tgi-build.yaml} (92%) rename llama_stack/distribution/{example_configs/conda/local-together-conda-example-build.yaml => templates/local-together-build.yaml} (89%) diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py index e49da58f0..f35c6ceab 100644 --- a/llama_stack/cli/stack/build.py +++ b/llama_stack/cli/stack/build.py @@ -8,9 +8,26 @@ import argparse from llama_stack.cli.subcommand import Subcommand from llama_stack.distribution.datatypes import * # noqa: F403 +import os +from functools import lru_cache from pathlib import Path -import yaml +TEMPLATES_PATH = ( + Path(os.path.relpath(__file__)).parent.parent.parent / "distribution" / "templates" +) + + +@lru_cache() +def available_templates_specs() -> List[BuildConfig]: + import yaml + + template_specs = [] + for p in TEMPLATES_PATH.rglob("*.yaml"): + with open(p, "r") as f: + build_config = BuildConfig(**yaml.safe_load(f)) + template_specs.append(build_config) + + return template_specs class StackBuild(Subcommand): @@ -27,17 +44,38 @@ class StackBuild(Subcommand): def _add_arguments(self): self.parser.add_argument( - "config", + "--config", type=str, default=None, - nargs="*", help="Path to a config file to use for the build. You can find example configs in llama_stack/distribution/example_configs. If this argument is not provided, you will be prompted to enter information interactively", ) + self.parser.add_argument( + "--template", + type=str, + default=None, + help="Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates", + ) + + self.parser.add_argument( + "--list-templates", + type=bool, + default=False, + action=argparse.BooleanOptionalAction, + help="Show the available templates for building a Llama Stack distribution", + ) + self.parser.add_argument( "--name", type=str, - help="Name of the llama stack build to override from template config", + help="Name of the Llama Stack build to override from template config. This name will be used as paths to store configuration files, build conda environments/docker images. If not specified, will use the name from the template config. ", + ) + + self.parser.add_argument( + "--image-type", + type=str, + help="Image Type to use for the build. This can be either conda or docker. If not specified, will use conda by default", + default="conda", ) def _run_stack_build_command_from_build_config( @@ -46,6 +84,8 @@ class StackBuild(Subcommand): import json import os + import yaml + from llama_stack.distribution.build import ApiInput, build_image, ImageType from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR @@ -78,22 +118,80 @@ class StackBuild(Subcommand): f"Build spec configuration saved at {str(build_file_path)}", color="blue", ) + + configure_name = ( + build_config.name + if build_config.image_type == "conda" + else (f"llamastack-{build_config.name}") + ) cprint( - f"You may now run `llama stack configure {build_config.name}` or `llama stack configure {str(build_file_path)}`", + f"You may now run `llama stack configure {configure_name}` or `llama stack configure {str(build_file_path)}`", color="green", ) + def _run_template_list_cmd(self, args: argparse.Namespace) -> None: + import json + + import yaml + + from llama_stack.cli.table import print_table + + # eventually, this should query a registry at llama.meta.com/llamastack/distributions + headers = [ + "Template Name", + "Providers", + "Description", + ] + + rows = [] + for spec in available_templates_specs(): + rows.append( + [ + spec.name, + json.dumps(spec.distribution_spec.providers, indent=2), + spec.distribution_spec.description, + ] + ) + print_table( + rows, + headers, + separate_rows=True, + ) + def _run_stack_build_command(self, args: argparse.Namespace) -> None: + import yaml from llama_stack.distribution.distribution import Api, api_providers from llama_stack.distribution.utils.dynamic import instantiate_class_type from prompt_toolkit import prompt from prompt_toolkit.validation import Validator from termcolor import cprint - if not args.config: - name = prompt( - "> Enter a unique name for identifying your Llama Stack build (e.g. my-local-stack): " - ) + if args.list_templates: + self._run_template_list_cmd(args) + return + + if args.template: + if not args.name: + self.parser.error( + "You must specify a name for the build using --name when using a template" + ) + return + build_path = TEMPLATES_PATH / f"{args.template}-build.yaml" + if not build_path.exists(): + self.parser.error( + f"Could not find template {args.template}. Please run `llama stack build --list-templates` to check out the available templates" + ) + return + with open(build_path, "r") as f: + build_config = BuildConfig(**yaml.safe_load(f)) + build_config.name = args.name + build_config.image_type = args.image_type + self._run_stack_build_command_from_build_config(build_config) + + return + + if not args.config and not args.template: + name = prompt("> Enter a name for your Llama Stack (e.g. my-local-stack): ") image_type = prompt( "> Enter the image type you want your Llama Stack to be built as (docker or conda): ", validator=Validator.from_callable( @@ -154,6 +252,4 @@ class StackBuild(Subcommand): except Exception as e: self.parser.error(f"Could not parse config file {args.config}: {e}") return - if args.name: - build_config.name = args.name self._run_stack_build_command_from_build_config(build_config) diff --git a/llama_stack/cli/stack/configure.py b/llama_stack/cli/stack/configure.py index 6b3b53f3e..b0aa1c3ab 100644 --- a/llama_stack/cli/stack/configure.py +++ b/llama_stack/cli/stack/configure.py @@ -5,17 +5,10 @@ # the root directory of this source tree. import argparse -import json -from pathlib import Path - -import yaml from llama_stack.cli.subcommand import Subcommand from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR - -from llama_stack.distribution.utils.exec import run_with_pty from llama_stack.distribution.datatypes import * # noqa: F403 -import os class StackConfigure(Subcommand): @@ -46,9 +39,17 @@ class StackConfigure(Subcommand): ) def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None: + import json + import os + from pathlib import Path + import pkg_resources + import yaml + from llama_stack.distribution.build import ImageType + + from llama_stack.distribution.utils.exec import run_with_pty from termcolor import cprint docker_image = None @@ -98,7 +99,14 @@ class StackConfigure(Subcommand): build_config: BuildConfig, output_dir: Optional[str] = None, ): + import json + import os + from pathlib import Path + + import yaml from llama_stack.distribution.configure import configure_api_providers + + from llama_stack.distribution.utils.exec import run_with_pty from llama_stack.distribution.utils.serialize import EnumEncoder from termcolor import cprint diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py index 95d91aa05..4e2009ee2 100644 --- a/llama_stack/cli/stack/run.py +++ b/llama_stack/cli/stack/run.py @@ -6,10 +6,6 @@ import argparse -from pathlib import Path - -import yaml - from llama_stack.cli.subcommand import Subcommand from llama_stack.distribution.datatypes import * # noqa: F403 @@ -46,7 +42,10 @@ class StackRun(Subcommand): ) def _run_stack_run_cmd(self, args: argparse.Namespace) -> None: + from pathlib import Path + import pkg_resources + import yaml from llama_stack.distribution.build import ImageType from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR diff --git a/llama_stack/distribution/example_configs/docker/local-docker-example-build.yaml b/llama_stack/distribution/example_configs/docker/local-docker-example-build.yaml deleted file mode 100644 index 885b6e58c..000000000 --- a/llama_stack/distribution/example_configs/docker/local-docker-example-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-docker-example -distribution_spec: - description: Use code from `llama_stack` itself to serve all llama stack APIs - providers: - inference: meta-reference - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: docker diff --git a/llama_stack/distribution/example_configs/conda/local-conda-example-build.yaml b/llama_stack/distribution/templates/local-build.yaml similarity index 91% rename from llama_stack/distribution/example_configs/conda/local-conda-example-build.yaml rename to llama_stack/distribution/templates/local-build.yaml index f98e48570..f10461256 100644 --- a/llama_stack/distribution/example_configs/conda/local-conda-example-build.yaml +++ b/llama_stack/distribution/templates/local-build.yaml @@ -1,4 +1,4 @@ -name: local-conda-example +name: local distribution_spec: description: Use code from `llama_stack` itself to serve all llama stack APIs providers: diff --git a/llama_stack/distribution/example_configs/conda/local-fireworks-conda-example-build.yaml b/llama_stack/distribution/templates/local-fireworks-build.yaml similarity index 87% rename from llama_stack/distribution/example_configs/conda/local-fireworks-conda-example-build.yaml rename to llama_stack/distribution/templates/local-fireworks-build.yaml index 5e17f83c0..33bdee3b5 100644 --- a/llama_stack/distribution/example_configs/conda/local-fireworks-conda-example-build.yaml +++ b/llama_stack/distribution/templates/local-fireworks-build.yaml @@ -1,4 +1,4 @@ -name: local-fireworks-conda-example +name: local-fireworks distribution_spec: description: Use Fireworks.ai for running LLM inference providers: diff --git a/llama_stack/distribution/example_configs/conda/local-ollama-conda-example-build.yaml b/llama_stack/distribution/templates/local-ollama-build.yaml similarity index 88% rename from llama_stack/distribution/example_configs/conda/local-ollama-conda-example-build.yaml rename to llama_stack/distribution/templates/local-ollama-build.yaml index 1c43e5998..d9116b4b1 100644 --- a/llama_stack/distribution/example_configs/conda/local-ollama-conda-example-build.yaml +++ b/llama_stack/distribution/templates/local-ollama-build.yaml @@ -1,4 +1,4 @@ -name: local-ollama-conda-example +name: local-ollama distribution_spec: description: Like local, but use ollama for running LLM inference providers: diff --git a/llama_stack/distribution/example_configs/conda/local-tgi-conda-example-build.yaml b/llama_stack/distribution/templates/local-tgi-build.yaml similarity index 92% rename from llama_stack/distribution/example_configs/conda/local-tgi-conda-example-build.yaml rename to llama_stack/distribution/templates/local-tgi-build.yaml index 07848b130..e764aef8c 100644 --- a/llama_stack/distribution/example_configs/conda/local-tgi-conda-example-build.yaml +++ b/llama_stack/distribution/templates/local-tgi-build.yaml @@ -1,4 +1,4 @@ -name: local-tgi-conda-example +name: local-tgi distribution_spec: description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). providers: diff --git a/llama_stack/distribution/example_configs/conda/local-together-conda-example-build.yaml b/llama_stack/distribution/templates/local-together-build.yaml similarity index 89% rename from llama_stack/distribution/example_configs/conda/local-together-conda-example-build.yaml rename to llama_stack/distribution/templates/local-together-build.yaml index df11bd0ed..1ab891518 100644 --- a/llama_stack/distribution/example_configs/conda/local-together-conda-example-build.yaml +++ b/llama_stack/distribution/templates/local-together-build.yaml @@ -1,4 +1,4 @@ -name: local-tgi-conda-example +name: local-together distribution_spec: description: Use Together.ai for running LLM inference providers: