diff --git a/llama_toolchain/cli/stack/build.py b/llama_toolchain/cli/stack/build.py index 3801637c0..e13cd10fb 100644 --- a/llama_toolchain/cli/stack/build.py +++ b/llama_toolchain/cli/stack/build.py @@ -27,7 +27,7 @@ class StackBuild(Subcommand): def _add_arguments(self): self.parser.add_argument( - "--config", + "config", type=str, help="Path to a config file to use for the build", ) @@ -77,17 +77,20 @@ class StackBuild(Subcommand): from llama_toolchain.common.prompt_for_config import prompt_for_config from llama_toolchain.core.dynamic import instantiate_class_type - if args.config: - with open(args.config, "r") as f: - try: - build_config = BuildConfig(**yaml.safe_load(f)) - except Exception as e: - self.parser.error(f"Could not parse config file {args.config}: {e}") - return - if args.name: - build_config.name = args.name - self._run_stack_build_command_from_build_config(build_config) + if not args.config: + self.parser.error( + "No config file specified. Please use `llama stack build /path/to/*-build.yaml`. Example config files can be found in llama_toolchain/configs/distributions" + ) return - build_config = prompt_for_config(BuildConfig, None) - self._run_stack_build_command_from_build_config(build_config) + with open(args.config, "r") as f: + try: + build_config = BuildConfig(**yaml.safe_load(f)) + except Exception as e: + self.parser.error(f"Could not parse config file {args.config}: {e}") + return + if args.name: + build_config.name = args.name + self._run_stack_build_command_from_build_config(build_config) + + return diff --git a/llama_toolchain/cli/stack/configure.py b/llama_toolchain/cli/stack/configure.py index 485200397..5fa105048 100644 --- a/llama_toolchain/cli/stack/configure.py +++ b/llama_toolchain/cli/stack/configure.py @@ -128,7 +128,6 @@ class StackConfigure(Subcommand): ) config.providers = configure_api_providers(config.providers) - config.distribution_type = build_config.distribution_spec.distribution_type config.docker_image = ( package_name if build_config.image_type == "docker" else None ) diff --git a/llama_toolchain/cli/stack/list_distributions.py b/llama_toolchain/cli/stack/list_distributions.py deleted file mode 100644 index 6a4283f74..000000000 --- a/llama_toolchain/cli/stack/list_distributions.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import argparse -import json - -from llama_toolchain.cli.subcommand import Subcommand - - -class StackListDistributions(Subcommand): - def __init__(self, subparsers: argparse._SubParsersAction): - super().__init__() - self.parser = subparsers.add_parser( - "list-distributions", - prog="llama stack list-distributions", - description="Show available Llama Stack Distributions", - formatter_class=argparse.RawTextHelpFormatter, - ) - self._add_arguments() - self.parser.set_defaults(func=self._run_distribution_list_cmd) - - def _add_arguments(self): - pass - - def _run_distribution_list_cmd(self, args: argparse.Namespace) -> None: - from llama_toolchain.cli.table import print_table - from llama_toolchain.core.distribution_registry import ( - available_distribution_specs, - ) - - # eventually, this should query a registry at llama.meta.com/llamastack/distributions - headers = [ - "Distribution Type", - "Providers", - "Description", - ] - - rows = [] - for spec in available_distribution_specs(): - providers = {k: v for k, v in spec.providers.items()} - rows.append( - [ - spec.distribution_type, - json.dumps(providers, indent=2), - spec.description, - ] - ) - print_table( - rows, - headers, - separate_rows=True, - ) diff --git a/llama_toolchain/cli/stack/run.py b/llama_toolchain/cli/stack/run.py index d040cb1f7..091cc50a9 100644 --- a/llama_toolchain/cli/stack/run.py +++ b/llama_toolchain/cli/stack/run.py @@ -69,9 +69,6 @@ class StackRun(Subcommand): with open(config_file, "r") as f: config = PackageConfig(**yaml.safe_load(f)) - if not config.distribution_type: - raise ValueError("Build config appears to be corrupt.") - if config.docker_image: script = pkg_resources.resource_filename( "llama_toolchain", diff --git a/llama_toolchain/cli/stack/stack.py b/llama_toolchain/cli/stack/stack.py index e41f10633..0e4abb5a2 100644 --- a/llama_toolchain/cli/stack/stack.py +++ b/llama_toolchain/cli/stack/stack.py @@ -11,7 +11,6 @@ from llama_toolchain.cli.subcommand import Subcommand from .build import StackBuild from .configure import StackConfigure from .list_apis import StackListApis -from .list_distributions import StackListDistributions from .list_providers import StackListProviders from .run import StackRun @@ -31,6 +30,5 @@ class StackParser(Subcommand): StackBuild.create(subparsers) StackConfigure.create(subparsers) StackListApis.create(subparsers) - StackListDistributions.create(subparsers) StackListProviders.create(subparsers) StackRun.create(subparsers) diff --git a/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml index ce8ebe1be..cdb5243ca 100644 --- a/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml @@ -1,6 +1,5 @@ name: local-conda-example distribution_spec: - distribution_type: local description: Use code from `llama_toolchain` itself to serve all llama stack APIs docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml index 4531fcebb..4843c5c55 100644 --- a/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml @@ -1,6 +1,5 @@ name: local-fireworks-conda-example distribution_spec: - distribution_type: local-plus-fireworks-inference description: Use Fireworks.ai for running LLM inference docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml index dbc2c7936..b464a7151 100644 --- a/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml @@ -1,6 +1,5 @@ name: local-ollama-conda-example distribution_spec: - distribution_type: local-plus-ollama-inference description: Like local, but use ollama for running LLM inference docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml index c366adda7..0f5490192 100644 --- a/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml @@ -1,6 +1,5 @@ name: local-tgi-conda-example distribution_spec: - distribution_type: local-plus-tgi-inference description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml index 0d69369ef..2cefb55dc 100644 --- a/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml @@ -1,6 +1,5 @@ name: local-tgi-conda-example distribution_spec: - distribution_type: local-plus-together-inference description: Use Together.ai for running LLM inference docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml index 315fe5291..134dc538c 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml @@ -1,4 +1,3 @@ -distribution_type: local-ollama description: Like local, but use ollama for running LLM inference docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml index d68c0015c..0dc8ae19b 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml @@ -1,4 +1,3 @@ -distribution_type: local-plus-fireworks-inference description: Use Fireworks.ai for running LLM inference docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml index 27caa84e2..37eff0cc9 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml @@ -1,4 +1,3 @@ -distribution_type: local-plus-tgi-inference description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml index 507119e1e..198d7e2fd 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml @@ -1,4 +1,3 @@ -distribution_type: local-plus-together-inference description: Use Together.ai for running LLM inference docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/distribution_registry/local.yaml b/llama_toolchain/configs/distributions/distribution_registry/local.yaml index ca8aa1fe3..3c5f57d69 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local.yaml @@ -1,4 +1,3 @@ -distribution_type: local description: Use code from `llama_toolchain` itself to serve all llama stack APIs docker_image: null providers: diff --git a/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml b/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml index 8aacdd76b..200fab607 100644 --- a/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml +++ b/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml @@ -1,6 +1,5 @@ name: local-docker-example distribution_spec: - distribution_type: local description: Use code from `llama_toolchain` itself to serve all llama stack APIs docker_image: null providers: diff --git a/llama_toolchain/core/datatypes.py b/llama_toolchain/core/datatypes.py index 359ffe2ce..4549e1819 100644 --- a/llama_toolchain/core/datatypes.py +++ b/llama_toolchain/core/datatypes.py @@ -151,23 +151,13 @@ def remote_provider_spec( @json_schema_type class DistributionSpec(BaseModel): - distribution_type: str = Field( - default="local", - description="Name of the distribution type. This can used to identify the distribution", - ) description: Optional[str] = Field( default="", description="Description of the distribution", ) docker_image: Optional[str] = None providers: Dict[str, str] = Field( - default={ - "inference": "meta-reference", - "memory": "meta-reference-faiss", - "safety": "meta-reference", - "agentic_system": "meta-reference", - "telemetry": "console", - }, + default_factory=dict, description="Provider Types for each of the APIs provided by this distribution", ) @@ -183,8 +173,6 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p this could be just a hash """, ) - distribution_type: Optional[str] = None - docker_image: Optional[str] = Field( default=None, description="Reference to the docker image if this package refers to a container",