mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
remove distribution types!
This commit is contained in:
parent
e466ec389b
commit
c0c5839361
17 changed files with 17 additions and 98 deletions
|
@ -27,7 +27,7 @@ class StackBuild(Subcommand):
|
|||
|
||||
def _add_arguments(self):
|
||||
self.parser.add_argument(
|
||||
"--config",
|
||||
"config",
|
||||
type=str,
|
||||
help="Path to a config file to use for the build",
|
||||
)
|
||||
|
@ -77,17 +77,20 @@ class StackBuild(Subcommand):
|
|||
from llama_toolchain.common.prompt_for_config import prompt_for_config
|
||||
from llama_toolchain.core.dynamic import instantiate_class_type
|
||||
|
||||
if args.config:
|
||||
with open(args.config, "r") as f:
|
||||
try:
|
||||
build_config = BuildConfig(**yaml.safe_load(f))
|
||||
except Exception as e:
|
||||
self.parser.error(f"Could not parse config file {args.config}: {e}")
|
||||
return
|
||||
if args.name:
|
||||
build_config.name = args.name
|
||||
self._run_stack_build_command_from_build_config(build_config)
|
||||
if not args.config:
|
||||
self.parser.error(
|
||||
"No config file specified. Please use `llama stack build /path/to/*-build.yaml`. Example config files can be found in llama_toolchain/configs/distributions"
|
||||
)
|
||||
return
|
||||
|
||||
build_config = prompt_for_config(BuildConfig, None)
|
||||
self._run_stack_build_command_from_build_config(build_config)
|
||||
with open(args.config, "r") as f:
|
||||
try:
|
||||
build_config = BuildConfig(**yaml.safe_load(f))
|
||||
except Exception as e:
|
||||
self.parser.error(f"Could not parse config file {args.config}: {e}")
|
||||
return
|
||||
if args.name:
|
||||
build_config.name = args.name
|
||||
self._run_stack_build_command_from_build_config(build_config)
|
||||
|
||||
return
|
||||
|
|
|
@ -128,7 +128,6 @@ class StackConfigure(Subcommand):
|
|||
)
|
||||
|
||||
config.providers = configure_api_providers(config.providers)
|
||||
config.distribution_type = build_config.distribution_spec.distribution_type
|
||||
config.docker_image = (
|
||||
package_name if build_config.image_type == "docker" else None
|
||||
)
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
|
||||
|
||||
class StackListDistributions(Subcommand):
|
||||
def __init__(self, subparsers: argparse._SubParsersAction):
|
||||
super().__init__()
|
||||
self.parser = subparsers.add_parser(
|
||||
"list-distributions",
|
||||
prog="llama stack list-distributions",
|
||||
description="Show available Llama Stack Distributions",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
self._add_arguments()
|
||||
self.parser.set_defaults(func=self._run_distribution_list_cmd)
|
||||
|
||||
def _add_arguments(self):
|
||||
pass
|
||||
|
||||
def _run_distribution_list_cmd(self, args: argparse.Namespace) -> None:
|
||||
from llama_toolchain.cli.table import print_table
|
||||
from llama_toolchain.core.distribution_registry import (
|
||||
available_distribution_specs,
|
||||
)
|
||||
|
||||
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
|
||||
headers = [
|
||||
"Distribution Type",
|
||||
"Providers",
|
||||
"Description",
|
||||
]
|
||||
|
||||
rows = []
|
||||
for spec in available_distribution_specs():
|
||||
providers = {k: v for k, v in spec.providers.items()}
|
||||
rows.append(
|
||||
[
|
||||
spec.distribution_type,
|
||||
json.dumps(providers, indent=2),
|
||||
spec.description,
|
||||
]
|
||||
)
|
||||
print_table(
|
||||
rows,
|
||||
headers,
|
||||
separate_rows=True,
|
||||
)
|
|
@ -69,9 +69,6 @@ class StackRun(Subcommand):
|
|||
with open(config_file, "r") as f:
|
||||
config = PackageConfig(**yaml.safe_load(f))
|
||||
|
||||
if not config.distribution_type:
|
||||
raise ValueError("Build config appears to be corrupt.")
|
||||
|
||||
if config.docker_image:
|
||||
script = pkg_resources.resource_filename(
|
||||
"llama_toolchain",
|
||||
|
|
|
@ -11,7 +11,6 @@ from llama_toolchain.cli.subcommand import Subcommand
|
|||
from .build import StackBuild
|
||||
from .configure import StackConfigure
|
||||
from .list_apis import StackListApis
|
||||
from .list_distributions import StackListDistributions
|
||||
from .list_providers import StackListProviders
|
||||
from .run import StackRun
|
||||
|
||||
|
@ -31,6 +30,5 @@ class StackParser(Subcommand):
|
|||
StackBuild.create(subparsers)
|
||||
StackConfigure.create(subparsers)
|
||||
StackListApis.create(subparsers)
|
||||
StackListDistributions.create(subparsers)
|
||||
StackListProviders.create(subparsers)
|
||||
StackRun.create(subparsers)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
name: local-conda-example
|
||||
distribution_spec:
|
||||
distribution_type: local
|
||||
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
name: local-fireworks-conda-example
|
||||
distribution_spec:
|
||||
distribution_type: local-plus-fireworks-inference
|
||||
description: Use Fireworks.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
name: local-ollama-conda-example
|
||||
distribution_spec:
|
||||
distribution_type: local-plus-ollama-inference
|
||||
description: Like local, but use ollama for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
name: local-tgi-conda-example
|
||||
distribution_spec:
|
||||
distribution_type: local-plus-tgi-inference
|
||||
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
name: local-tgi-conda-example
|
||||
distribution_spec:
|
||||
distribution_type: local-plus-together-inference
|
||||
description: Use Together.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
distribution_type: local-ollama
|
||||
description: Like local, but use ollama for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
distribution_type: local-plus-fireworks-inference
|
||||
description: Use Fireworks.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
distribution_type: local-plus-tgi-inference
|
||||
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
distribution_type: local-plus-together-inference
|
||||
description: Use Together.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
distribution_type: local
|
||||
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
name: local-docker-example
|
||||
distribution_spec:
|
||||
distribution_type: local
|
||||
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
|
||||
docker_image: null
|
||||
providers:
|
||||
|
|
|
@ -151,23 +151,13 @@ def remote_provider_spec(
|
|||
|
||||
@json_schema_type
|
||||
class DistributionSpec(BaseModel):
|
||||
distribution_type: str = Field(
|
||||
default="local",
|
||||
description="Name of the distribution type. This can used to identify the distribution",
|
||||
)
|
||||
description: Optional[str] = Field(
|
||||
default="",
|
||||
description="Description of the distribution",
|
||||
)
|
||||
docker_image: Optional[str] = None
|
||||
providers: Dict[str, str] = Field(
|
||||
default={
|
||||
"inference": "meta-reference",
|
||||
"memory": "meta-reference-faiss",
|
||||
"safety": "meta-reference",
|
||||
"agentic_system": "meta-reference",
|
||||
"telemetry": "console",
|
||||
},
|
||||
default_factory=dict,
|
||||
description="Provider Types for each of the APIs provided by this distribution",
|
||||
)
|
||||
|
||||
|
@ -183,8 +173,6 @@ Reference to the distribution this package refers to. For unregistered (adhoc) p
|
|||
this could be just a hash
|
||||
""",
|
||||
)
|
||||
distribution_type: Optional[str] = None
|
||||
|
||||
docker_image: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Reference to the docker image if this package refers to a container",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue