mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
llama_toolchain/distribution -> llama_toolchain/core
This commit is contained in:
parent
81540e6ce8
commit
3cb67f1f58
31 changed files with 49 additions and 45 deletions
|
@ -1,4 +1,4 @@
|
||||||
include requirements.txt
|
include requirements.txt
|
||||||
include llama_toolchain/data/*.yaml
|
include llama_toolchain/data/*.yaml
|
||||||
include llama_toolchain/distribution/*.sh
|
include llama_toolchain/core/*.sh
|
||||||
include llama_toolchain/cli/scripts/*.sh
|
include llama_toolchain/cli/scripts/*.sh
|
||||||
|
|
|
@ -16,7 +16,7 @@ from pydantic import BaseModel
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||||
from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
from llama_toolchain.core.datatypes import RemoteProviderConfig
|
||||||
|
|
||||||
from .api import * # noqa: F403
|
from .api import * # noqa: F403
|
||||||
from .event_logger import EventLogger
|
from .event_logger import EventLogger
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import Api, ProviderSpec
|
from llama_toolchain.core.datatypes import Api, ProviderSpec
|
||||||
|
|
||||||
from .config import MetaReferenceImplConfig
|
from .config import MetaReferenceImplConfig
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
|
from llama_toolchain.core.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||||
|
|
||||||
|
|
||||||
def available_agentic_system_providers() -> List[ProviderSpec]:
|
def available_agentic_system_providers() -> List[ProviderSpec]:
|
||||||
|
|
|
@ -8,13 +8,13 @@ import argparse
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
|
|
||||||
from llama_toolchain.cli.subcommand import Subcommand
|
from llama_toolchain.cli.subcommand import Subcommand
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
def parse_dependencies(
|
def parse_dependencies(
|
||||||
dependencies: str, parser: argparse.ArgumentParser
|
dependencies: str, parser: argparse.ArgumentParser
|
||||||
) -> Dict[str, ProviderSpec]:
|
) -> Dict[str, ProviderSpec]:
|
||||||
from llama_toolchain.distribution.distribution import api_providers
|
from llama_toolchain.core.distribution import api_providers
|
||||||
|
|
||||||
all_providers = api_providers()
|
all_providers = api_providers()
|
||||||
|
|
||||||
|
@ -48,8 +48,8 @@ class ApiBuild(Subcommand):
|
||||||
self.parser.set_defaults(func=self._run_api_build_command)
|
self.parser.set_defaults(func=self._run_api_build_command)
|
||||||
|
|
||||||
def _add_arguments(self):
|
def _add_arguments(self):
|
||||||
from llama_toolchain.distribution.distribution import stack_apis
|
from llama_toolchain.core.distribution import stack_apis
|
||||||
from llama_toolchain.distribution.package import (
|
from llama_toolchain.core.package import (
|
||||||
BuildType,
|
BuildType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ class ApiBuild(Subcommand):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_api_build_command(self, args: argparse.Namespace) -> None:
|
def _run_api_build_command(self, args: argparse.Namespace) -> None:
|
||||||
from llama_toolchain.distribution.package import (
|
from llama_toolchain.core.package import (
|
||||||
ApiInput,
|
ApiInput,
|
||||||
BuildType,
|
BuildType,
|
||||||
build_package,
|
build_package,
|
||||||
|
|
|
@ -13,7 +13,7 @@ import yaml
|
||||||
|
|
||||||
from llama_toolchain.cli.subcommand import Subcommand
|
from llama_toolchain.cli.subcommand import Subcommand
|
||||||
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
class ApiConfigure(Subcommand):
|
class ApiConfigure(Subcommand):
|
||||||
|
@ -31,8 +31,8 @@ class ApiConfigure(Subcommand):
|
||||||
self.parser.set_defaults(func=self._run_api_configure_cmd)
|
self.parser.set_defaults(func=self._run_api_configure_cmd)
|
||||||
|
|
||||||
def _add_arguments(self):
|
def _add_arguments(self):
|
||||||
from llama_toolchain.distribution.distribution import stack_apis
|
from llama_toolchain.core.distribution import stack_apis
|
||||||
from llama_toolchain.distribution.package import BuildType
|
from llama_toolchain.core.package import BuildType
|
||||||
|
|
||||||
allowed_args = [a.name for a in stack_apis()]
|
allowed_args = [a.name for a in stack_apis()]
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
|
@ -67,7 +67,7 @@ class ApiConfigure(Subcommand):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_api_configure_cmd(self, args: argparse.Namespace) -> None:
|
def _run_api_configure_cmd(self, args: argparse.Namespace) -> None:
|
||||||
from llama_toolchain.distribution.package import BuildType
|
from llama_toolchain.core.package import BuildType
|
||||||
|
|
||||||
if args.build_name:
|
if args.build_name:
|
||||||
name = args.build_name
|
name = args.build_name
|
||||||
|
@ -89,7 +89,7 @@ class ApiConfigure(Subcommand):
|
||||||
|
|
||||||
def configure_llama_provider(config_file: Path) -> None:
|
def configure_llama_provider(config_file: Path) -> None:
|
||||||
from llama_toolchain.common.serialize import EnumEncoder
|
from llama_toolchain.common.serialize import EnumEncoder
|
||||||
from llama_toolchain.distribution.configure import configure_api_providers
|
from llama_toolchain.core.configure import configure_api_providers
|
||||||
|
|
||||||
with open(config_file, "r") as f:
|
with open(config_file, "r") as f:
|
||||||
config = PackageConfig(**yaml.safe_load(f))
|
config = PackageConfig(**yaml.safe_load(f))
|
||||||
|
|
|
@ -12,7 +12,7 @@ import pkg_resources
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from llama_toolchain.cli.subcommand import Subcommand
|
from llama_toolchain.cli.subcommand import Subcommand
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
class ApiStart(Subcommand):
|
class ApiStart(Subcommand):
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from llama_toolchain.cli.subcommand import Subcommand
|
from llama_toolchain.cli.subcommand import Subcommand
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
class StackBuild(Subcommand):
|
class StackBuild(Subcommand):
|
||||||
|
@ -23,8 +23,8 @@ class StackBuild(Subcommand):
|
||||||
self.parser.set_defaults(func=self._run_stack_build_command)
|
self.parser.set_defaults(func=self._run_stack_build_command)
|
||||||
|
|
||||||
def _add_arguments(self):
|
def _add_arguments(self):
|
||||||
from llama_toolchain.distribution.registry import available_distribution_specs
|
from llama_toolchain.core.distribution_registry import available_distribution_specs
|
||||||
from llama_toolchain.distribution.package import (
|
from llama_toolchain.core.package import (
|
||||||
BuildType,
|
BuildType,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -50,8 +50,8 @@ class StackBuild(Subcommand):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_stack_build_command(self, args: argparse.Namespace) -> None:
|
def _run_stack_build_command(self, args: argparse.Namespace) -> None:
|
||||||
from llama_toolchain.distribution.registry import resolve_distribution_spec
|
from llama_toolchain.core.distribution_registry import resolve_distribution_spec
|
||||||
from llama_toolchain.distribution.package import (
|
from llama_toolchain.core.package import (
|
||||||
ApiInput,
|
ApiInput,
|
||||||
BuildType,
|
BuildType,
|
||||||
build_package,
|
build_package,
|
||||||
|
|
|
@ -13,7 +13,7 @@ from termcolor import cprint
|
||||||
|
|
||||||
from llama_toolchain.cli.subcommand import Subcommand
|
from llama_toolchain.cli.subcommand import Subcommand
|
||||||
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
class StackConfigure(Subcommand):
|
class StackConfigure(Subcommand):
|
||||||
|
@ -31,8 +31,10 @@ class StackConfigure(Subcommand):
|
||||||
self.parser.set_defaults(func=self._run_stack_configure_cmd)
|
self.parser.set_defaults(func=self._run_stack_configure_cmd)
|
||||||
|
|
||||||
def _add_arguments(self):
|
def _add_arguments(self):
|
||||||
from llama_toolchain.distribution.package import BuildType
|
from llama_toolchain.core.distribution_registry import (
|
||||||
from llama_toolchain.distribution.registry import available_distribution_specs
|
available_distribution_specs,
|
||||||
|
)
|
||||||
|
from llama_toolchain.core.package import BuildType
|
||||||
|
|
||||||
self.parser.add_argument(
|
self.parser.add_argument(
|
||||||
"--build-name",
|
"--build-name",
|
||||||
|
@ -62,7 +64,7 @@ class StackConfigure(Subcommand):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None:
|
def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None:
|
||||||
from llama_toolchain.distribution.package import BuildType
|
from llama_toolchain.core.package import BuildType
|
||||||
|
|
||||||
if args.build_name:
|
if args.build_name:
|
||||||
name = args.build_name
|
name = args.build_name
|
||||||
|
@ -84,8 +86,8 @@ class StackConfigure(Subcommand):
|
||||||
|
|
||||||
def configure_llama_distribution(config_file: Path) -> None:
|
def configure_llama_distribution(config_file: Path) -> None:
|
||||||
from llama_toolchain.common.serialize import EnumEncoder
|
from llama_toolchain.common.serialize import EnumEncoder
|
||||||
from llama_toolchain.distribution.configure import configure_api_providers
|
from llama_toolchain.core.configure import configure_api_providers
|
||||||
from llama_toolchain.distribution.registry import resolve_distribution_spec
|
from llama_toolchain.core.distribution_registry import resolve_distribution_spec
|
||||||
|
|
||||||
with open(config_file, "r") as f:
|
with open(config_file, "r") as f:
|
||||||
config = PackageConfig(**yaml.safe_load(f))
|
config = PackageConfig(**yaml.safe_load(f))
|
||||||
|
|
|
@ -27,7 +27,9 @@ class StackList(Subcommand):
|
||||||
|
|
||||||
def _run_distribution_list_cmd(self, args: argparse.Namespace) -> None:
|
def _run_distribution_list_cmd(self, args: argparse.Namespace) -> None:
|
||||||
from llama_toolchain.cli.table import print_table
|
from llama_toolchain.cli.table import print_table
|
||||||
from llama_toolchain.distribution.registry import available_distribution_specs
|
from llama_toolchain.core.distribution_registry import (
|
||||||
|
available_distribution_specs,
|
||||||
|
)
|
||||||
|
|
||||||
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
|
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
|
||||||
headers = [
|
headers = [
|
||||||
|
|
|
@ -12,7 +12,7 @@ import pkg_resources
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from llama_toolchain.cli.subcommand import Subcommand
|
from llama_toolchain.cli.subcommand import Subcommand
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
class StackStart(Subcommand):
|
class StackStart(Subcommand):
|
||||||
|
|
|
@ -88,7 +88,7 @@ add_to_docker <<EOF
|
||||||
# This would be good in production but for debugging flexibility lets not add it right now
|
# This would be good in production but for debugging flexibility lets not add it right now
|
||||||
# We need a more solid production ready entrypoint.sh anyway
|
# We need a more solid production ready entrypoint.sh anyway
|
||||||
#
|
#
|
||||||
# ENTRYPOINT ["python", "-m", "llama_toolchain.distribution.server"]
|
# ENTRYPOINT ["python", "-m", "llama_toolchain.core.server"]
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
|
@ -6,12 +6,12 @@
|
||||||
|
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_toolchain.common.prompt_for_config import prompt_for_config
|
from llama_toolchain.common.prompt_for_config import prompt_for_config
|
||||||
from llama_toolchain.distribution.distribution import api_providers
|
from llama_toolchain.core.distribution import api_providers
|
||||||
from llama_toolchain.distribution.dynamic import instantiate_class_type
|
from llama_toolchain.core.dynamic import instantiate_class_type
|
||||||
|
|
||||||
|
|
||||||
def configure_api_providers(existing_configs: Dict[str, Any]) -> None:
|
def configure_api_providers(existing_configs: Dict[str, Any]) -> None:
|
|
@ -139,7 +139,7 @@ def remote_provider_spec(
|
||||||
config_class = (
|
config_class = (
|
||||||
adapter.config_class
|
adapter.config_class
|
||||||
if adapter and adapter.config_class
|
if adapter and adapter.config_class
|
||||||
else "llama_toolchain.distribution.datatypes.RemoteProviderConfig"
|
else "llama_toolchain.core.datatypes.RemoteProviderConfig"
|
||||||
)
|
)
|
||||||
provider_id = remote_provider_id(adapter.adapter_id) if adapter else "remote"
|
provider_id = remote_provider_id(adapter.adapter_id) if adapter else "remote"
|
||||||
|
|
|
@ -17,11 +17,11 @@ from pydantic import BaseModel
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
from llama_toolchain.common.config_dirs import BUILDS_BASE_DIR
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
from llama_toolchain.common.exec import run_with_pty
|
from llama_toolchain.common.exec import run_with_pty
|
||||||
from llama_toolchain.common.serialize import EnumEncoder
|
from llama_toolchain.common.serialize import EnumEncoder
|
||||||
from llama_toolchain.distribution.distribution import api_providers
|
from llama_toolchain.core.distribution import api_providers
|
||||||
|
|
||||||
|
|
||||||
class BuildType(Enum):
|
class BuildType(Enum):
|
||||||
|
@ -40,7 +40,7 @@ class Dependencies(BaseModel):
|
||||||
def get_dependencies(
|
def get_dependencies(
|
||||||
provider: ProviderSpec, dependencies: Dict[str, ProviderSpec]
|
provider: ProviderSpec, dependencies: Dict[str, ProviderSpec]
|
||||||
) -> Dependencies:
|
) -> Dependencies:
|
||||||
from llama_toolchain.distribution.distribution import SERVER_DEPENDENCIES
|
from llama_toolchain.core.distribution import SERVER_DEPENDENCIES
|
||||||
|
|
||||||
pip_packages = provider.pip_packages
|
pip_packages = provider.pip_packages
|
||||||
for dep in dependencies.values():
|
for dep in dependencies.values():
|
|
@ -36,6 +36,6 @@ eval "$(conda shell.bash hook)"
|
||||||
conda deactivate && conda activate "$env_name"
|
conda deactivate && conda activate "$env_name"
|
||||||
|
|
||||||
$CONDA_PREFIX/bin/python \
|
$CONDA_PREFIX/bin/python \
|
||||||
-m llama_toolchain.distribution.server \
|
-m llama_toolchain.core.server \
|
||||||
--yaml_config "$yaml_config" \
|
--yaml_config "$yaml_config" \
|
||||||
--port "$port" "$@"
|
--port "$port" "$@"
|
|
@ -37,6 +37,6 @@ podman run -it \
|
||||||
-p $port:$port \
|
-p $port:$port \
|
||||||
-v "$yaml_config:/app/config.yaml" \
|
-v "$yaml_config:/app/config.yaml" \
|
||||||
$docker_image \
|
$docker_image \
|
||||||
python -m llama_toolchain.distribution.server \
|
python -m llama_toolchain.core.server \
|
||||||
--yaml_config /app/config.yaml \
|
--yaml_config /app/config.yaml \
|
||||||
--port $port "$@"
|
--port $port "$@"
|
|
@ -4,7 +4,7 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
from llama_toolchain.core.datatypes import RemoteProviderConfig
|
||||||
|
|
||||||
|
|
||||||
async def get_adapter_impl(config: RemoteProviderConfig, _deps):
|
async def get_adapter_impl(config: RemoteProviderConfig, _deps):
|
||||||
|
|
|
@ -13,7 +13,7 @@ import httpx
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
from llama_toolchain.core.datatypes import RemoteProviderConfig
|
||||||
|
|
||||||
from .api import (
|
from .api import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import * # noqa: F403
|
from llama_toolchain.core.datatypes import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
def available_inference_providers() -> List[ProviderSpec]:
|
def available_inference_providers() -> List[ProviderSpec]:
|
||||||
|
|
|
@ -11,7 +11,7 @@ from typing import Any, Dict, List, Optional
|
||||||
import fire
|
import fire
|
||||||
import httpx
|
import httpx
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
from llama_toolchain.core.datatypes import RemoteProviderConfig
|
||||||
|
|
||||||
from .api import * # noqa: F403
|
from .api import * # noqa: F403
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
|
from llama_toolchain.core.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||||
|
|
||||||
|
|
||||||
def available_memory_providers() -> List[ProviderSpec]:
|
def available_memory_providers() -> List[ProviderSpec]:
|
||||||
|
|
|
@ -15,7 +15,7 @@ from llama_models.llama3.api.datatypes import UserMessage
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from termcolor import cprint
|
from termcolor import cprint
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
from llama_toolchain.core.datatypes import RemoteProviderConfig
|
||||||
|
|
||||||
from .api import * # noqa: F403
|
from .api import * # noqa: F403
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from llama_toolchain.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
|
from llama_toolchain.core.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||||
|
|
||||||
|
|
||||||
def available_safety_providers() -> List[ProviderSpec]:
|
def available_safety_providers() -> List[ProviderSpec]:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue