forked from phoenix-oss/llama-stack-mirror
* API Keys passed from Client instead of distro configuration * delete distribution registry * Rename the "package" word away * Introduce a "Router" layer for providers Some providers need to be factorized and considered as thin routing layers on top of other providers. Consider two examples: - The inference API should be a routing layer over inference providers, routed using the "model" key - The memory banks API is another instance where various memory bank types will be provided by independent providers (e.g., a vector store is served by Chroma while a keyvalue memory can be served by Redis or PGVector) This commit introduces a generalized routing layer for this purpose. * update `apis_to_serve` * llama_toolchain -> llama_stack * Codemod from llama_toolchain -> llama_stack - added providers/registry - cleaned up api/ subdirectories and moved impls away - restructured api/api.py - from llama_stack.apis.<api> import foo should work now - update imports to do llama_stack.apis.<api> - update many other imports - added __init__, fixed some registry imports - updated registry imports - create_agentic_system -> create_agent - AgenticSystem -> Agent * Moved some stuff out of common/; re-generated OpenAPI spec * llama-toolchain -> llama-stack (hyphens) * add control plane API * add redis adapter + sqlite provider * move core -> distribution * Some more toolchain -> stack changes * small naming shenanigans * Removing custom tool and agent utilities and moving them client side * Move control plane to distribution server for now * Remove control plane from API list * no codeshield dependency randomly plzzzzz * Add "fire" as a dependency * add back event loggers * stack configure fixes * use brave instead of bing in the example client * add init file so it gets packaged * add init files so it gets packaged * Update MANIFEST * bug fix --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Xi Yan <xiyan@meta.com> Co-authored-by: Ashwin Bharambe <ashwin@meta.com>
137 lines
4.9 KiB
Python
137 lines
4.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import argparse
|
|
import json
|
|
from pathlib import Path
|
|
|
|
import pkg_resources
|
|
|
|
import yaml
|
|
from termcolor import cprint
|
|
|
|
from llama_stack.cli.subcommand import Subcommand
|
|
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
|
|
|
|
from llama_stack.distribution.utils.exec import run_with_pty
|
|
from llama_stack.distribution.datatypes import * # noqa: F403
|
|
import os
|
|
|
|
|
|
class StackConfigure(Subcommand):
|
|
"""Llama cli for configuring llama toolchain configs"""
|
|
|
|
def __init__(self, subparsers: argparse._SubParsersAction):
|
|
super().__init__()
|
|
self.parser = subparsers.add_parser(
|
|
"configure",
|
|
prog="llama stack configure",
|
|
description="configure a llama stack distribution",
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
)
|
|
self._add_arguments()
|
|
self.parser.set_defaults(func=self._run_stack_configure_cmd)
|
|
|
|
def _add_arguments(self):
|
|
self.parser.add_argument(
|
|
"config",
|
|
type=str,
|
|
help="Path to the build config file (e.g. ~/.llama/builds/<image_type>/<name>-build.yaml). For docker, this could also be the name of the docker image. ",
|
|
)
|
|
|
|
self.parser.add_argument(
|
|
"--output-dir",
|
|
type=str,
|
|
help="Path to the output directory to store generated run.yaml config file. If not specified, will use ~/.llama/build/<image_type>/<name>-run.yaml",
|
|
)
|
|
|
|
def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None:
|
|
from llama_stack.distribution.build import ImageType
|
|
|
|
docker_image = None
|
|
build_config_file = Path(args.config)
|
|
if not build_config_file.exists():
|
|
cprint(
|
|
f"Could not find {build_config_file}. Trying docker image name instead...",
|
|
color="green",
|
|
)
|
|
docker_image = args.config
|
|
|
|
builds_dir = BUILDS_BASE_DIR / ImageType.docker.value
|
|
if args.output_dir:
|
|
builds_dir = Path(output_dir)
|
|
os.makedirs(builds_dir, exist_ok=True)
|
|
|
|
script = pkg_resources.resource_filename(
|
|
"llama_stack", "distribution/configure_container.sh"
|
|
)
|
|
script_args = [script, docker_image, str(builds_dir)]
|
|
|
|
return_code = run_with_pty(script_args)
|
|
|
|
# we have regenerated the build config file with script, now check if it exists
|
|
if return_code != 0:
|
|
self.parser.error(
|
|
f"Can not find {build_config_file}. Please run llama stack build first or check if docker image exists"
|
|
)
|
|
|
|
build_name = docker_image.removeprefix("llamastack-")
|
|
cprint(
|
|
f"YAML configuration has been written to {builds_dir / f'{build_name}-run.yaml'}",
|
|
color="green",
|
|
)
|
|
return
|
|
|
|
with open(build_config_file, "r") as f:
|
|
build_config = BuildConfig(**yaml.safe_load(f))
|
|
|
|
self._configure_llama_distribution(build_config, args.output_dir)
|
|
|
|
def _configure_llama_distribution(
|
|
self,
|
|
build_config: BuildConfig,
|
|
output_dir: Optional[str] = None,
|
|
):
|
|
from llama_stack.distribution.configure import configure_api_providers
|
|
from llama_stack.distribution.utils.serialize import EnumEncoder
|
|
|
|
builds_dir = BUILDS_BASE_DIR / build_config.image_type
|
|
if output_dir:
|
|
builds_dir = Path(output_dir)
|
|
os.makedirs(builds_dir, exist_ok=True)
|
|
image_name = build_config.name.replace("::", "-")
|
|
run_config_file = builds_dir / f"{image_name}-run.yaml"
|
|
|
|
if run_config_file.exists():
|
|
cprint(
|
|
f"Configuration already exists at `{str(run_config_file)}`. Will overwrite...",
|
|
"yellow",
|
|
attrs=["bold"],
|
|
)
|
|
config = StackRunConfig(**yaml.safe_load(run_config_file.read_text()))
|
|
else:
|
|
config = StackRunConfig(
|
|
built_at=datetime.now(),
|
|
image_name=image_name,
|
|
apis_to_serve=[],
|
|
provider_map={},
|
|
)
|
|
|
|
config = configure_api_providers(config, build_config.distribution_spec)
|
|
|
|
config.docker_image = (
|
|
image_name if build_config.image_type == "docker" else None
|
|
)
|
|
config.conda_env = image_name if build_config.image_type == "conda" else None
|
|
|
|
with open(run_config_file, "w") as f:
|
|
to_write = json.loads(json.dumps(config.dict(), cls=EnumEncoder))
|
|
f.write(yaml.dump(to_write, sort_keys=False))
|
|
|
|
cprint(
|
|
f"> YAML configuration has been written to {run_config_file}",
|
|
color="blue",
|
|
)
|