move core -> distribution

This commit is contained in:
Ashwin Bharambe 2024-09-17 11:29:07 -07:00
parent bbf0b59ae4
commit 17172a8bf9
46 changed files with 70 additions and 57 deletions

View file

@ -16,7 +16,7 @@ from pydantic import BaseModel
from termcolor import cprint from termcolor import cprint
from llama_models.llama3.api.datatypes import * # noqa: F403 from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.core.datatypes import RemoteProviderConfig from llama_stack.distribution.datatypes import RemoteProviderConfig
from .agents import * # noqa: F403 from .agents import * # noqa: F403
from .event_logger import EventLogger from .event_logger import EventLogger

View file

@ -11,7 +11,7 @@ from typing import Any, AsyncGenerator
import fire import fire
import httpx import httpx
from llama_stack.core.datatypes import RemoteProviderConfig from llama_stack.distribution.datatypes import RemoteProviderConfig
from pydantic import BaseModel from pydantic import BaseModel
from termcolor import cprint from termcolor import cprint

View file

@ -14,7 +14,7 @@ from typing import Any, Dict, List, Optional
import fire import fire
import httpx import httpx
from llama_stack.core.datatypes import RemoteProviderConfig from llama_stack.distribution.datatypes import RemoteProviderConfig
from termcolor import cprint from termcolor import cprint
from .memory import * # noqa: F403 from .memory import * # noqa: F403

View file

@ -14,7 +14,7 @@ import httpx
from llama_models.llama3.api.datatypes import UserMessage from llama_models.llama3.api.datatypes import UserMessage
from llama_stack.core.datatypes import RemoteProviderConfig from llama_stack.distribution.datatypes import RemoteProviderConfig
from pydantic import BaseModel from pydantic import BaseModel
from termcolor import cprint from termcolor import cprint

View file

@ -92,7 +92,7 @@ def _hf_download(
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
from llama_stack.common.model_utils import model_local_dir from llama_stack.distribution.utils.model_utils import model_local_dir
repo_id = model.huggingface_repo repo_id = model.huggingface_repo
if repo_id is None: if repo_id is None:
@ -126,7 +126,7 @@ def _hf_download(
def _meta_download(model: "Model", meta_url: str): def _meta_download(model: "Model", meta_url: str):
from llama_models.sku_list import llama_meta_net_info from llama_models.sku_list import llama_meta_net_info
from llama_stack.common.model_utils import model_local_dir from llama_stack.distribution.utils.model_utils import model_local_dir
output_dir = Path(model_local_dir(model.descriptor())) output_dir = Path(model_local_dir(model.descriptor()))
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
@ -188,7 +188,7 @@ class Manifest(BaseModel):
def _download_from_manifest(manifest_file: str): def _download_from_manifest(manifest_file: str):
from llama_stack.common.model_utils import model_local_dir from llama_stack.distribution.utils.model_utils import model_local_dir
with open(manifest_file, "r") as f: with open(manifest_file, "r") as f:
d = json.load(f) d = json.load(f)

View file

@ -13,7 +13,7 @@ from termcolor import colored
from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.subcommand import Subcommand
from llama_stack.cli.table import print_table from llama_stack.cli.table import print_table
from llama_stack.common.serialize import EnumEncoder from llama_stack.distribution.utils.serialize import EnumEncoder
class ModelDescribe(Subcommand): class ModelDescribe(Subcommand):

View file

@ -7,7 +7,7 @@
import argparse import argparse
from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.subcommand import Subcommand
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
from pathlib import Path from pathlib import Path
import yaml import yaml
@ -44,9 +44,9 @@ class StackBuild(Subcommand):
import json import json
import os import os
from llama_stack.common.config_dirs import DISTRIBS_BASE_DIR from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
from llama_stack.common.serialize import EnumEncoder from llama_stack.distribution.utils.serialize import EnumEncoder
from llama_stack.core.package import ApiInput, build_image, ImageType from llama_stack.distribution.build import ApiInput, build_image, ImageType
from termcolor import cprint from termcolor import cprint
# save build.yaml spec for building same distribution again # save build.yaml spec for building same distribution again
@ -74,8 +74,8 @@ class StackBuild(Subcommand):
) )
def _run_stack_build_command(self, args: argparse.Namespace) -> None: def _run_stack_build_command(self, args: argparse.Namespace) -> None:
from llama_stack.common.prompt_for_config import prompt_for_config from llama_stack.distribution.utils.prompt_for_config import prompt_for_config
from llama_stack.core.dynamic import instantiate_class_type from llama_stack.distribution.utils.dynamic import instantiate_class_type
if not args.config: if not args.config:
self.parser.error( self.parser.error(

View file

@ -14,10 +14,10 @@ import yaml
from termcolor import cprint from termcolor import cprint
from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.subcommand import Subcommand
from llama_stack.common.config_dirs import BUILDS_BASE_DIR from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
from llama_stack.common.exec import run_with_pty from llama_stack.distribution.utils.exec import run_with_pty
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
import os import os
@ -49,7 +49,7 @@ class StackConfigure(Subcommand):
) )
def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None: def _run_stack_configure_cmd(self, args: argparse.Namespace) -> None:
from llama_stack.core.package import ImageType from llama_stack.distribution.build import ImageType
docker_image = None docker_image = None
build_config_file = Path(args.config) build_config_file = Path(args.config)
@ -66,7 +66,7 @@ class StackConfigure(Subcommand):
os.makedirs(builds_dir, exist_ok=True) os.makedirs(builds_dir, exist_ok=True)
script = pkg_resources.resource_filename( script = pkg_resources.resource_filename(
"llama_stack", "core/configure_container.sh" "llama_stack", "distribution/configure_container.sh"
) )
script_args = [script, docker_image, str(builds_dir)] script_args = [script, docker_image, str(builds_dir)]
@ -95,8 +95,8 @@ class StackConfigure(Subcommand):
build_config: BuildConfig, build_config: BuildConfig,
output_dir: Optional[str] = None, output_dir: Optional[str] = None,
): ):
from llama_stack.common.serialize import EnumEncoder from llama_stack.distribution.configure import configure_api_providers
from llama_stack.core.configure import configure_api_providers from llama_stack.distribution.utils.serialize import EnumEncoder
builds_dir = BUILDS_BASE_DIR / build_config.image_type builds_dir = BUILDS_BASE_DIR / build_config.image_type
if output_dir: if output_dir:

View file

@ -26,7 +26,7 @@ class StackListApis(Subcommand):
def _run_apis_list_cmd(self, args: argparse.Namespace) -> None: def _run_apis_list_cmd(self, args: argparse.Namespace) -> None:
from llama_stack.cli.table import print_table from llama_stack.cli.table import print_table
from llama_stack.core.distribution import stack_apis from llama_stack.distribution.distribution import stack_apis
# eventually, this should query a registry at llama.meta.com/llamastack/distributions # eventually, this should query a registry at llama.meta.com/llamastack/distributions
headers = [ headers = [

View file

@ -22,7 +22,7 @@ class StackListProviders(Subcommand):
self.parser.set_defaults(func=self._run_providers_list_cmd) self.parser.set_defaults(func=self._run_providers_list_cmd)
def _add_arguments(self): def _add_arguments(self):
from llama_stack.core.distribution import stack_apis from llama_stack.distribution.distribution import stack_apis
api_values = [a.value for a in stack_apis()] api_values = [a.value for a in stack_apis()]
self.parser.add_argument( self.parser.add_argument(
@ -34,7 +34,7 @@ class StackListProviders(Subcommand):
def _run_providers_list_cmd(self, args: argparse.Namespace) -> None: def _run_providers_list_cmd(self, args: argparse.Namespace) -> None:
from llama_stack.cli.table import print_table from llama_stack.cli.table import print_table
from llama_stack.core.distribution import Api, api_providers from llama_stack.distribution.distribution import Api, api_providers
all_providers = api_providers() all_providers = api_providers()
providers_for_api = all_providers[Api(args.api)] providers_for_api = all_providers[Api(args.api)]

View file

@ -12,7 +12,7 @@ import pkg_resources
import yaml import yaml
from llama_stack.cli.subcommand import Subcommand from llama_stack.cli.subcommand import Subcommand
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
class StackRun(Subcommand): class StackRun(Subcommand):
@ -47,7 +47,7 @@ class StackRun(Subcommand):
) )
def _run_stack_run_cmd(self, args: argparse.Namespace) -> None: def _run_stack_run_cmd(self, args: argparse.Namespace) -> None:
from llama_stack.common.exec import run_with_pty from llama_stack.distribution.utils.exec import run_with_pty
if not args.config: if not args.config:
self.parser.error("Must specify a config file to run") self.parser.error("Must specify a config file to run")
@ -68,13 +68,13 @@ class StackRun(Subcommand):
if config.docker_image: if config.docker_image:
script = pkg_resources.resource_filename( script = pkg_resources.resource_filename(
"llama_stack", "llama_stack",
"core/start_container.sh", "distribution/start_container.sh",
) )
run_args = [script, config.docker_image] run_args = [script, config.docker_image]
else: else:
script = pkg_resources.resource_filename( script = pkg_resources.resource_filename(
"llama_stack", "llama_stack",
"core/start_conda_env.sh", "distribution/start_conda_env.sh",
) )
run_args = [ run_args = [
script, script,

View file

@ -12,12 +12,12 @@ from pydantic import BaseModel
from termcolor import cprint from termcolor import cprint
from llama_stack.common.exec import run_with_pty from llama_stack.distribution.utils.exec import run_with_pty
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
from pathlib import Path from pathlib import Path
from llama_stack.core.distribution import api_providers, SERVER_DEPENDENCIES from llama_stack.distribution.distribution import api_providers, SERVER_DEPENDENCIES
class ImageType(Enum): class ImageType(Enum):
@ -68,7 +68,7 @@ def build_image(build_config: BuildConfig, build_file_path: Path):
if build_config.image_type == ImageType.docker.value: if build_config.image_type == ImageType.docker.value:
script = pkg_resources.resource_filename( script = pkg_resources.resource_filename(
"llama_stack", "core/build_container.sh" "llama_stack", "distribution/build_container.sh"
) )
args = [ args = [
script, script,
@ -79,7 +79,7 @@ def build_image(build_config: BuildConfig, build_file_path: Path):
] ]
else: else:
script = pkg_resources.resource_filename( script = pkg_resources.resource_filename(
"llama_stack", "core/build_conda_env.sh" "llama_stack", "distribution/build_conda_env.sh"
) )
args = [ args = [
script, script,

View file

@ -90,7 +90,7 @@ add_to_docker <<EOF
# This would be good in production but for debugging flexibility lets not add it right now # This would be good in production but for debugging flexibility lets not add it right now
# We need a more solid production ready entrypoint.sh anyway # We need a more solid production ready entrypoint.sh anyway
# #
# ENTRYPOINT ["python", "-m", "llama_stack.core.server"] # ENTRYPOINT ["python", "-m", "llama_stack.distribution.server"]
EOF EOF

View file

@ -8,12 +8,13 @@ from typing import Any
from pydantic import BaseModel from pydantic import BaseModel
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
from termcolor import cprint from termcolor import cprint
from llama_stack.common.prompt_for_config import prompt_for_config from llama_stack.distribution.distribution import api_providers, stack_apis
from llama_stack.core.distribution import api_providers, stack_apis from llama_stack.distribution.utils.dynamic import instantiate_class_type
from llama_stack.core.dynamic import instantiate_class_type
from llama_stack.distribution.utils.prompt_for_config import prompt_for_config
# These are hacks so we can re-use the `prompt_for_config` utility # These are hacks so we can re-use the `prompt_for_config` utility

View file

@ -168,7 +168,7 @@ def remote_provider_spec(
config_class = ( config_class = (
adapter.config_class adapter.config_class
if adapter and adapter.config_class if adapter and adapter.config_class
else "llama_stack.core.datatypes.RemoteProviderConfig" else "llama_stack.distribution.datatypes.RemoteProviderConfig"
) )
provider_id = remote_provider_id(adapter.adapter_id) if adapter else "remote" provider_id = remote_provider_id(adapter.adapter_id) if adapter else "remote"

View file

@ -0,0 +1,12 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import fire
from .server import main
if __name__ == __main__:
fire.Fire(main)

View file

@ -45,7 +45,7 @@ from llama_stack.providers.utils.telemetry.tracing import (
SpanStatus, SpanStatus,
start_trace, start_trace,
) )
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
from .distribution import api_endpoints, api_providers from .distribution import api_endpoints, api_providers
from .dynamic import instantiate_provider from .dynamic import instantiate_provider

View file

@ -37,6 +37,6 @@ eval "$(conda shell.bash hook)"
conda deactivate && conda activate "$env_name" conda deactivate && conda activate "$env_name"
$CONDA_PREFIX/bin/python \ $CONDA_PREFIX/bin/python \
-m llama_stack.core.server \ -m llama_stack.distribution.server \
--yaml_config "$yaml_config" \ --yaml_config "$yaml_config" \
--port "$port" "$@" --port "$port" "$@"

View file

@ -38,6 +38,6 @@ podman run -it \
-p $port:$port \ -p $port:$port \
-v "$yaml_config:/app/config.yaml" \ -v "$yaml_config:/app/config.yaml" \
$docker_image \ $docker_image \
python -m llama_stack.core.server \ python -m llama_stack.distribution.server \
--yaml_config /app/config.yaml \ --yaml_config /app/config.yaml \
--port $port "$@" --port $port "$@"

View file

@ -7,7 +7,7 @@
import importlib import importlib
from typing import Any, Dict from typing import Any, Dict
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
def instantiate_class_type(fully_qualified_name): def instantiate_class_type(fully_qualified_name):

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.core.datatypes import RemoteProviderConfig from llama_stack.distribution.datatypes import RemoteProviderConfig
async def get_adapter_impl(config: RemoteProviderConfig, _deps): async def get_adapter_impl(config: RemoteProviderConfig, _deps):

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.core.datatypes import RemoteProviderConfig from llama_stack.distribution.datatypes import RemoteProviderConfig
async def get_adapter_impl(config: RemoteProviderConfig, _deps): async def get_adapter_impl(config: RemoteProviderConfig, _deps):

View file

@ -6,7 +6,7 @@
from typing import Dict from typing import Dict
from llama_stack.core.datatypes import Api, ProviderSpec from llama_stack.distribution.datatypes import Api, ProviderSpec
from .config import MetaReferenceImplConfig from .config import MetaReferenceImplConfig

View file

@ -30,7 +30,7 @@ from llama_models.llama3.reference_impl.model import Transformer
from llama_models.sku_list import resolve_model from llama_models.sku_list import resolve_model
from llama_stack.apis.inference import QuantizationType from llama_stack.apis.inference import QuantizationType
from llama_stack.common.model_utils import model_local_dir from llama_stack.distribution.utils.model_utils import model_local_dir
from termcolor import cprint from termcolor import cprint
from .config import MetaReferenceImplConfig from .config import MetaReferenceImplConfig

View file

@ -8,7 +8,7 @@ import asyncio
from llama_models.sku_list import resolve_model from llama_models.sku_list import resolve_model
from llama_stack.common.model_utils import model_local_dir from llama_stack.distribution.utils.model_utils import model_local_dir
from llama_stack.apis.safety import * # noqa from llama_stack.apis.safety import * # noqa
from .config import SafetyConfig from .config import SafetyConfig

View file

@ -6,7 +6,7 @@
from typing import List from typing import List
from llama_stack.core.datatypes import Api, InlineProviderSpec, ProviderSpec from llama_stack.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> List[ProviderSpec]: def available_providers() -> List[ProviderSpec]:

View file

@ -6,7 +6,7 @@
from typing import List from typing import List
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
def available_providers() -> List[ProviderSpec]: def available_providers() -> List[ProviderSpec]:

View file

@ -6,7 +6,7 @@
from typing import List from typing import List
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
def available_providers() -> List[ProviderSpec]: def available_providers() -> List[ProviderSpec]:

View file

@ -6,7 +6,7 @@
from typing import List from typing import List
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
EMBEDDING_DEPS = [ EMBEDDING_DEPS = [
"blobfile", "blobfile",

View file

@ -6,7 +6,7 @@
from typing import List from typing import List
from llama_stack.core.datatypes import Api, InlineProviderSpec, ProviderSpec from llama_stack.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> List[ProviderSpec]: def available_providers() -> List[ProviderSpec]:

View file

@ -6,7 +6,7 @@
from typing import List from typing import List
from llama_stack.core.datatypes import * # noqa: F403 from llama_stack.distribution.datatypes import * # noqa: F403
def available_providers() -> List[ProviderSpec]: def available_providers() -> List[ProviderSpec]:

View file

@ -6,7 +6,7 @@
from typing import Any, List, Tuple from typing import Any, List, Tuple
from llama_stack.core.datatypes import Api from llama_stack.distribution.datatypes import Api
async def get_router_impl(inner_impls: List[Tuple[str, Any]], deps: List[Api]): async def get_router_impl(inner_impls: List[Tuple[str, Any]], deps: List[Api]):

View file

@ -6,7 +6,7 @@
from typing import Any, Dict, List, Tuple from typing import Any, Dict, List, Tuple
from llama_stack.core.datatypes import Api from llama_stack.distribution.datatypes import Api
from llama_stack.apis.memory import * # noqa: F403 from llama_stack.apis.memory import * # noqa: F403

View file

@ -31,7 +31,7 @@ from .pyopenapi.utility import Specification
schema_utils.json_schema_type = json_schema_type schema_utils.json_schema_type = json_schema_type
from llama_stack.stack import LlamaStack from llama_stack.apis.stack import LlamaStack
# TODO: this should be fixed in the generator itself so it reads appropriate annotations # TODO: this should be fixed in the generator itself so it reads appropriate annotations