forked from phoenix-oss/llama-stack-mirror
API Updates: fleshing out RAG APIs, introduce "llama stack" CLI command (#51)
* add tools to chat completion request * use templates for generating system prompts * Moved ToolPromptFormat and jinja templates to llama_models.llama3.api * <WIP> memory changes - inlined AgenticSystemInstanceConfig so API feels more ergonomic - renamed it to AgentConfig, AgentInstance -> Agent - added a MemoryConfig and `memory` parameter - added `attachments` to input and `output_attachments` to the response - some naming changes * InterleavedTextAttachment -> InterleavedTextMedia, introduce memory tool * flesh out memory banks API * agentic loop has a RAG implementation * faiss provider implementation * memory client works * re-work tool definitions, fix FastAPI issues, fix tool regressions * fix agentic_system utils * basic RAG seems to work * small bug fixes for inline attachments * Refactor custom tool execution utilities * Bug fix, show memory retrieval steps in EventLogger * No need for api_key for Remote providers * add special unicode character ↵ to showcase newlines in model prompt templates * remove api.endpoints imports * combine datatypes.py and endpoints.py into api.py * Attachment / add TTL api * split batch_inference from inference * minor import fixes * use a single impl for ChatFormat.decode_assistant_mesage * use interleaved_text_media_as_str() utilityt * Fix api.datatypes imports * Add blobfile for tiktoken * Add ToolPromptFormat to ChatFormat.encode_message so that tools are encoded properly * templates take optional --format={json,function_tag} * Rag Updates * Add `api build` subcommand -- WIP * fix * build + run image seems to work * <WIP> adapters * bunch more work to make adapters work * api build works for conda now * ollama remote adapter works * Several smaller fixes to make adapters work Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight * llama distribution -> llama stack + containers (WIP) * All the new CLI for api + stack work * Make Fireworks and Together into the Adapter format * Some quick fixes to the CLI behavior to make it consistent * Updated README phew * Update cli_reference.md * llama_toolchain/distribution -> llama_toolchain/core * Add termcolor * update paths * Add a log just for consistency * chmod +x scripts * Fix api dependencies not getting added to configuration * missing import lol * Delete utils.py; move to agentic system * Support downloading of URLs for attachments for code interpreter * Simplify and generalize `llama api build` yay * Update `llama stack configure` to be very simple also * Fix stack start * Allow building an "adhoc" distribution * Remote `llama api []` subcommands * Fixes to llama stack commands and update docs * Update documentation again and add error messages to llama stack start * llama stack start -> llama stack run * Change name of build for less confusion * Add pyopenapi fork to the repository, update RFC assets * Remove conflicting annotation * Added a "--raw" option for model template printing --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Ashwin Bharambe <ashwin@meta.com> Co-authored-by: Dalton Flanagan <6599399+dltn@users.noreply.github.com>
This commit is contained in:
parent
35093c0b6f
commit
7bc7785b0d
141 changed files with 8252 additions and 4032 deletions
7
llama_toolchain/cli/stack/__init__.py
Normal file
7
llama_toolchain/cli/stack/__init__.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .stack import StackParser # noqa
|
133
llama_toolchain/cli/stack/build.py
Normal file
133
llama_toolchain/cli/stack/build.py
Normal file
|
@ -0,0 +1,133 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||
|
||||
|
||||
def parse_api_provider_tuples(
|
||||
tuples: str, parser: argparse.ArgumentParser
|
||||
) -> Dict[str, ProviderSpec]:
|
||||
from llama_toolchain.core.distribution import api_providers
|
||||
|
||||
all_providers = api_providers()
|
||||
|
||||
deps = {}
|
||||
for dep in tuples.split(","):
|
||||
dep = dep.strip()
|
||||
if not dep:
|
||||
continue
|
||||
api_str, provider = dep.split("=")
|
||||
api = Api(api_str)
|
||||
|
||||
provider = provider.strip()
|
||||
if provider not in all_providers[api]:
|
||||
parser.error(f"Provider `{provider}` is not available for API `{api}`")
|
||||
return
|
||||
deps[api] = all_providers[api][provider]
|
||||
|
||||
return deps
|
||||
|
||||
|
||||
class StackBuild(Subcommand):
|
||||
def __init__(self, subparsers: argparse._SubParsersAction):
|
||||
super().__init__()
|
||||
self.parser = subparsers.add_parser(
|
||||
"build",
|
||||
prog="llama stack build",
|
||||
description="Build a Llama stack container",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
self._add_arguments()
|
||||
self.parser.set_defaults(func=self._run_stack_build_command)
|
||||
|
||||
def _add_arguments(self):
|
||||
from llama_toolchain.core.distribution_registry import available_distribution_specs
|
||||
from llama_toolchain.core.package import (
|
||||
BuildType,
|
||||
)
|
||||
|
||||
allowed_ids = [d.distribution_id for d in available_distribution_specs()]
|
||||
self.parser.add_argument(
|
||||
"distribution",
|
||||
type=str,
|
||||
help="Distribution to build (either \"adhoc\" OR one of: {})".format(allowed_ids),
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"api_providers",
|
||||
nargs='?',
|
||||
help="Comma separated list of (api=provider) tuples",
|
||||
)
|
||||
|
||||
self.parser.add_argument(
|
||||
"--name",
|
||||
type=str,
|
||||
help="Name of the build target (image, conda env)",
|
||||
required=True,
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--type",
|
||||
type=str,
|
||||
default="conda_env",
|
||||
choices=[v.value for v in BuildType],
|
||||
)
|
||||
|
||||
def _run_stack_build_command(self, args: argparse.Namespace) -> None:
|
||||
from llama_toolchain.core.distribution_registry import resolve_distribution_spec
|
||||
from llama_toolchain.core.package import (
|
||||
ApiInput,
|
||||
BuildType,
|
||||
build_package,
|
||||
)
|
||||
|
||||
api_inputs = []
|
||||
if args.distribution == "adhoc":
|
||||
if not args.api_providers:
|
||||
self.parser.error("You must specify API providers with (api=provider,...) for building an adhoc distribution")
|
||||
return
|
||||
|
||||
parsed = parse_api_provider_tuples(args.api_providers, self.parser)
|
||||
for api, provider_spec in parsed.items():
|
||||
for dep in provider_spec.api_dependencies:
|
||||
if dep not in parsed:
|
||||
self.parser.error(f"API {api} needs dependency {dep} provided also")
|
||||
return
|
||||
|
||||
api_inputs.append(
|
||||
ApiInput(
|
||||
api=api,
|
||||
provider=provider_spec.provider_id,
|
||||
)
|
||||
)
|
||||
docker_image = None
|
||||
else:
|
||||
if args.api_providers:
|
||||
self.parser.error("You cannot specify API providers for pre-registered distributions")
|
||||
return
|
||||
|
||||
dist = resolve_distribution_spec(args.distribution)
|
||||
if dist is None:
|
||||
self.parser.error(f"Could not find distribution {args.distribution}")
|
||||
return
|
||||
|
||||
for api, provider_id in dist.providers.items():
|
||||
api_inputs.append(
|
||||
ApiInput(
|
||||
api=api,
|
||||
provider=provider_id,
|
||||
)
|
||||
)
|
||||
docker_image = dist.docker_image
|
||||
|
||||
build_package(
|
||||
api_inputs,
|
||||
build_type=BuildType(args.type),
|
||||
name=args.name,
|
||||
distribution_id=args.distribution,
|
||||
docker_image=docker_image,
|
||||
)
|
106
llama_toolchain/cli/stack/configure.py
Normal file
106
llama_toolchain/cli/stack/configure.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from termcolor import cprint
|
||||
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
||||
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||
|
||||
|
||||
class StackConfigure(Subcommand):
|
||||
"""Llama cli for configuring llama toolchain configs"""
|
||||
|
||||
def __init__(self, subparsers: argparse._SubParsersAction):
|
||||
super().__init__()
|
||||
self.parser = subparsers.add_parser(
|
||||
"configure",
|
||||
prog="llama stack configure",
|
||||
description="configure a llama stack distribution",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
self._add_arguments()
|
||||
self.parser.set_defaults(func=self._run_stack_configure_cmd)
|
||||
|
||||
def _add_arguments(self):
|
||||
from llama_toolchain.core.distribution_registry import (
|
||||
available_distribution_specs,
|
||||
)
|
||||
from llama_toolchain.core.package import BuildType
|
||||
|
||||
allowed_ids = [d.distribution_id for d in available_distribution_specs()]
|
||||
self.parser.add_argument(
|
||||
"distribution",
|
||||
type=str,
|
||||
choices=allowed_ids,
|
||||
help="Distribution (one of: {})".format(allowed_ids),
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--name",
|
||||
type=str,
|
||||
help="Name of the build",
|
||||
required=True,
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--type",
|
||||
type=str,
|
||||
default="conda_env",
|
||||
choices=[v.value for v in BuildType],
|
||||
)
|
||||
|
||||
def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None:
|
||||
from llama_toolchain.core.package import BuildType
|
||||
|
||||
build_type = BuildType(args.type)
|
||||
name = args.name
|
||||
config_file = (
|
||||
BUILDS_BASE_DIR
|
||||
/ args.distribution
|
||||
/ build_type.descriptor()
|
||||
/ f"{name}.yaml"
|
||||
)
|
||||
if not config_file.exists():
|
||||
self.parser.error(
|
||||
f"Could not find {config_file}. Please run `llama stack build` first"
|
||||
)
|
||||
return
|
||||
|
||||
configure_llama_distribution(config_file)
|
||||
|
||||
|
||||
def configure_llama_distribution(config_file: Path) -> None:
|
||||
from llama_toolchain.common.serialize import EnumEncoder
|
||||
from llama_toolchain.core.configure import configure_api_providers
|
||||
from llama_toolchain.core.distribution_registry import resolve_distribution_spec
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
config = PackageConfig(**yaml.safe_load(f))
|
||||
|
||||
dist = resolve_distribution_spec(config.distribution_id)
|
||||
if dist is None:
|
||||
raise ValueError(
|
||||
f"Could not find any registered distribution `{config.distribution_id}`"
|
||||
)
|
||||
|
||||
if config.providers:
|
||||
cprint(
|
||||
f"Configuration already exists for {config.distribution_id}. Will overwrite...",
|
||||
"yellow",
|
||||
attrs=["bold"],
|
||||
)
|
||||
|
||||
config.providers = configure_api_providers(config.providers)
|
||||
|
||||
with open(config_file, "w") as fp:
|
||||
to_write = json.loads(json.dumps(config.dict(), cls=EnumEncoder))
|
||||
fp.write(yaml.dump(to_write, sort_keys=False))
|
||||
|
||||
print(f"YAML configuration has been written to {config_file}")
|
55
llama_toolchain/cli/stack/list.py
Normal file
55
llama_toolchain/cli/stack/list.py
Normal file
|
@ -0,0 +1,55 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import json
|
||||
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
|
||||
|
||||
class StackList(Subcommand):
|
||||
def __init__(self, subparsers: argparse._SubParsersAction):
|
||||
super().__init__()
|
||||
self.parser = subparsers.add_parser(
|
||||
"list-distributions",
|
||||
prog="llama stack list-distributions",
|
||||
description="Show available Llama Stack Distributions",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
self._add_arguments()
|
||||
self.parser.set_defaults(func=self._run_distribution_list_cmd)
|
||||
|
||||
def _add_arguments(self):
|
||||
pass
|
||||
|
||||
def _run_distribution_list_cmd(self, args: argparse.Namespace) -> None:
|
||||
from llama_toolchain.cli.table import print_table
|
||||
from llama_toolchain.core.distribution_registry import (
|
||||
available_distribution_specs,
|
||||
)
|
||||
|
||||
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
|
||||
headers = [
|
||||
"Distribution ID",
|
||||
"Providers",
|
||||
"Description",
|
||||
]
|
||||
|
||||
rows = []
|
||||
for spec in available_distribution_specs():
|
||||
providers = {k.value: v for k, v in spec.providers.items()}
|
||||
rows.append(
|
||||
[
|
||||
spec.distribution_id,
|
||||
json.dumps(providers, indent=2),
|
||||
spec.description,
|
||||
]
|
||||
)
|
||||
print_table(
|
||||
rows,
|
||||
headers,
|
||||
separate_rows=True,
|
||||
)
|
106
llama_toolchain/cli/stack/run.py
Normal file
106
llama_toolchain/cli/stack/run.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
import pkg_resources
|
||||
import yaml
|
||||
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
||||
|
||||
|
||||
class StackRun(Subcommand):
|
||||
def __init__(self, subparsers: argparse._SubParsersAction):
|
||||
super().__init__()
|
||||
self.parser = subparsers.add_parser(
|
||||
"run",
|
||||
prog="llama stack run",
|
||||
description="""start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.""",
|
||||
formatter_class=argparse.RawTextHelpFormatter,
|
||||
)
|
||||
self._add_arguments()
|
||||
self.parser.set_defaults(func=self._run_stack_run_cmd)
|
||||
|
||||
def _add_arguments(self):
|
||||
from llama_toolchain.core.package import BuildType
|
||||
|
||||
self.parser.add_argument(
|
||||
"distribution",
|
||||
type=str,
|
||||
help="Distribution whose build you want to start",
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--name",
|
||||
type=str,
|
||||
help="Name of the build you want to start",
|
||||
required=True,
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--type",
|
||||
type=str,
|
||||
default="conda_env",
|
||||
choices=[v.value for v in BuildType],
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--port",
|
||||
type=int,
|
||||
help="Port to run the server on. Defaults to 5000",
|
||||
default=5000,
|
||||
)
|
||||
self.parser.add_argument(
|
||||
"--disable-ipv6",
|
||||
action="store_true",
|
||||
help="Disable IPv6 support",
|
||||
default=False,
|
||||
)
|
||||
|
||||
def _run_stack_run_cmd(self, args: argparse.Namespace) -> None:
|
||||
from llama_toolchain.common.exec import run_with_pty
|
||||
from llama_toolchain.core.package import BuildType
|
||||
|
||||
build_type = BuildType(args.type)
|
||||
build_dir = BUILDS_BASE_DIR / args.distribution / build_type.descriptor()
|
||||
path = build_dir / f"{args.name}.yaml"
|
||||
|
||||
config_file = Path(path)
|
||||
|
||||
if not config_file.exists():
|
||||
self.parser.error(
|
||||
f"File {str(config_file)} does not exist. Did you run `llama stack build`?"
|
||||
)
|
||||
return
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
config = PackageConfig(**yaml.safe_load(f))
|
||||
|
||||
if not config.distribution_id:
|
||||
raise ValueError("Build config appears to be corrupt.")
|
||||
|
||||
if config.docker_image:
|
||||
script = pkg_resources.resource_filename(
|
||||
"llama_toolchain",
|
||||
"core/start_container.sh",
|
||||
)
|
||||
run_args = [script, config.docker_image]
|
||||
else:
|
||||
script = pkg_resources.resource_filename(
|
||||
"llama_toolchain",
|
||||
"core/start_conda_env.sh",
|
||||
)
|
||||
run_args = [
|
||||
script,
|
||||
config.conda_env,
|
||||
]
|
||||
|
||||
run_args.extend([str(config_file), str(args.port)])
|
||||
if args.disable_ipv6:
|
||||
run_args.append("--disable-ipv6")
|
||||
|
||||
run_with_pty(run_args)
|
32
llama_toolchain/cli/stack/stack.py
Normal file
32
llama_toolchain/cli/stack/stack.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
|
||||
from llama_toolchain.cli.subcommand import Subcommand
|
||||
|
||||
from .build import StackBuild
|
||||
from .configure import StackConfigure
|
||||
from .list import StackList
|
||||
from .run import StackRun
|
||||
|
||||
|
||||
class StackParser(Subcommand):
|
||||
def __init__(self, subparsers: argparse._SubParsersAction):
|
||||
super().__init__()
|
||||
self.parser = subparsers.add_parser(
|
||||
"stack",
|
||||
prog="llama stack",
|
||||
description="Operations for the Llama Stack / Distributions",
|
||||
)
|
||||
|
||||
subparsers = self.parser.add_subparsers(title="stack_subcommands")
|
||||
|
||||
# Add sub-commands
|
||||
StackBuild.create(subparsers)
|
||||
StackConfigure.create(subparsers)
|
||||
StackList.create(subparsers)
|
||||
StackRun.create(subparsers)
|
Loading…
Add table
Add a link
Reference in a new issue