From cee3816609cb7687f7a26d52b53df08c2ee953db Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Thu, 16 Jan 2025 13:44:53 -0800 Subject: [PATCH] Make llama stack build not create a new conda by default (#788) ## What does this PR do? So far `llama stack build` has always created a separate conda environment for packaging the dependencies of a distribution. The main reason to do so is isolation -- distributions are composed of providers which can have a variety of potentially conflicting dependencies. That said, this has created significant annoyance for new users since it is not at all transparent. The fact that `llama stack run` is actually running the code in some other conda is very surprising. This PR tries to make things better. - Both `llama stack build` and `llama stack run` now accept an `--image-name` argument which represents the (conda, docker, virtualenv) image you want to operate upon. - For the default (conda) mode, the script checks if a current conda environment exists. If one exists, it uses it. - If `--image-name` is provided, that option is used. In this case, an environment is created if needed. - There is no automatic `llamastack-` prefixing of the environment names done anymore. ## Test Plan Start in a conda environment, run `llama stack build --template fireworks`; verify that it successfully built into the current environment and stored the build file at `$CONDA_PREFIX/llamastack-build.yaml`. Run `llama stack run fireworks` which started correctly in the current environment. Ran the same build command outside of conda. It failed asking for `--image-name`. Ran it with `llama stack build --template fireworks --image-name foo`. This successfully created a conda environment called `foo` and installed deps. Ran `llama stack run fireworks` outside conda which failed. Activated a different conda, ran again, it failed saying it did not find the `llamastack-build.yaml` file. Then used `--image-name foo` option and it ran successfully. --- llama_stack/cli/stack/_build.py | 307 ++++++++++++++++++ llama_stack/cli/stack/build.py | 292 +---------------- llama_stack/cli/stack/run.py | 63 +++- llama_stack/distribution/build.py | 20 +- llama_stack/distribution/build_conda_env.sh | 11 +- llama_stack/distribution/datatypes.py | 6 +- llama_stack/distribution/start_conda_env.sh | 3 +- .../quantization/scripts/build_conda.sh | 36 -- 8 files changed, 400 insertions(+), 338 deletions(-) create mode 100644 llama_stack/cli/stack/_build.py delete mode 100644 llama_stack/providers/inline/inference/meta_reference/quantization/scripts/build_conda.sh diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py new file mode 100644 index 000000000..08c987a50 --- /dev/null +++ b/llama_stack/cli/stack/_build.py @@ -0,0 +1,307 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +import argparse +import importlib.resources +import json +import os +import shutil +import textwrap +from functools import lru_cache +from pathlib import Path +from typing import Dict, Optional + +import yaml +from prompt_toolkit import prompt +from prompt_toolkit.completion import WordCompleter +from prompt_toolkit.validation import Validator +from termcolor import cprint + +from llama_stack.cli.table import print_table + +from llama_stack.distribution.build import build_image, ImageType +from llama_stack.distribution.datatypes import ( + BuildConfig, + DistributionSpec, + Provider, + StackRunConfig, +) +from llama_stack.distribution.distribution import get_provider_registry +from llama_stack.distribution.resolver import InvalidProviderError +from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR +from llama_stack.distribution.utils.dynamic import instantiate_class_type +from llama_stack.providers.datatypes import Api + + +TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates" + + +@lru_cache() +def available_templates_specs() -> Dict[str, BuildConfig]: + import yaml + + template_specs = {} + for p in TEMPLATES_PATH.rglob("*build.yaml"): + template_name = p.parent.name + with open(p, "r") as f: + build_config = BuildConfig(**yaml.safe_load(f)) + template_specs[template_name] = build_config + return template_specs + + +def run_stack_build_command( + parser: argparse.ArgumentParser, args: argparse.Namespace +) -> None: + if args.list_templates: + return _run_template_list_cmd() + + current_conda_env = os.environ.get("CONDA_DEFAULT_ENV") + image_name = args.image_name or current_conda_env + + if args.template: + available_templates = available_templates_specs() + if args.template not in available_templates: + cprint( + f"Could not find template {args.template}. Please run `llama stack build --list-templates` to check out the available templates", + color="red", + ) + return + build_config = available_templates[args.template] + if args.image_type: + build_config.image_type = args.image_type + else: + cprint( + f"Please specify a image-type (docker | conda | venv) for {args.template}", + color="red", + ) + return + _run_stack_build_command_from_build_config( + build_config, + image_name=image_name, + template_name=args.template, + ) + return + + if not args.config and not args.template: + name = prompt( + "> Enter a name for your Llama Stack (e.g. my-local-stack): ", + validator=Validator.from_callable( + lambda x: len(x) > 0, + error_message="Name cannot be empty, please enter a name", + ), + ) + + image_type = prompt( + "> Enter the image type you want your Llama Stack to be built as (docker or conda or venv): ", + validator=Validator.from_callable( + lambda x: x in ["docker", "conda", "venv"], + error_message="Invalid image type, please enter conda or docker or venv", + ), + default="conda", + ) + + if image_type == "conda": + if not image_name: + cprint( + f"No current conda environment detected or specified, will create a new conda environment with the name `llamastack-{name}`", + color="yellow", + ) + image_name = f"llamastack-{name}" + else: + cprint( + f"Using conda environment {image_name}", + color="green", + ) + + cprint( + textwrap.dedent( + """ + Llama Stack is composed of several APIs working together. Let's select + the provider types (implementations) you want to use for these APIs. + """, + ), + color="green", + ) + + print("Tip: use to see options for the providers.\n") + + providers = dict() + for api, providers_for_api in get_provider_registry().items(): + available_providers = [ + x + for x in providers_for_api.keys() + if x not in ("remote", "remote::sample") + ] + api_provider = prompt( + "> Enter provider for API {}: ".format(api.value), + completer=WordCompleter(available_providers), + complete_while_typing=True, + validator=Validator.from_callable( + lambda x: x in available_providers, + error_message="Invalid provider, use to see options", + ), + ) + + providers[api.value] = api_provider + + description = prompt( + "\n > (Optional) Enter a short description for your Llama Stack: ", + default="", + ) + + distribution_spec = DistributionSpec( + providers=providers, + description=description, + ) + + build_config = BuildConfig( + image_type=image_type, distribution_spec=distribution_spec + ) + else: + with open(args.config, "r") as f: + try: + build_config = BuildConfig(**yaml.safe_load(f)) + except Exception as e: + cprint( + f"Could not parse config file {args.config}: {e}", + color="red", + ) + return + + _run_stack_build_command_from_build_config(build_config, image_name=image_name) + + +def _generate_run_config( + build_config: BuildConfig, build_dir: Path, image_name: str +) -> None: + """ + Generate a run.yaml template file for user to edit from a build.yaml file + """ + apis = list(build_config.distribution_spec.providers.keys()) + run_config = StackRunConfig( + docker_image=( + image_name if build_config.image_type == ImageType.docker.value else None + ), + image_name=image_name, + apis=apis, + providers={}, + ) + # build providers dict + provider_registry = get_provider_registry() + for api in apis: + run_config.providers[api] = [] + provider_types = build_config.distribution_spec.providers[api] + if isinstance(provider_types, str): + provider_types = [provider_types] + + for i, provider_type in enumerate(provider_types): + pid = provider_type.split("::")[-1] + + p = provider_registry[Api(api)][provider_type] + if p.deprecation_error: + raise InvalidProviderError(p.deprecation_error) + + config_type = instantiate_class_type( + provider_registry[Api(api)][provider_type].config_class + ) + if hasattr(config_type, "sample_run_config"): + config = config_type.sample_run_config( + __distro_dir__=f"distributions/{image_name}" + ) + else: + config = {} + + p_spec = Provider( + provider_id=f"{pid}-{i}" if len(provider_types) > 1 else pid, + provider_type=provider_type, + config=config, + ) + run_config.providers[api].append(p_spec) + + run_config_file = build_dir / f"{image_name}-run.yaml" + + with open(run_config_file, "w") as f: + to_write = json.loads(run_config.model_dump_json()) + f.write(yaml.dump(to_write, sort_keys=False)) + + cprint( + f"You can now edit {run_config_file} and run `llama stack run {image_name}`", + color="green", + ) + + +def _run_stack_build_command_from_build_config( + build_config: BuildConfig, + image_name: Optional[str] = None, + template_name: Optional[str] = None, +) -> None: + if build_config.image_type == ImageType.docker.value: + if template_name: + image_name = f"distribution-{template_name}" + else: + if not image_name: + raise ValueError( + "Please specify an image name when building a docker image without a template" + ) + elif build_config.image_type == ImageType.conda.value: + if not image_name: + raise ValueError("Please specify an image name when building a conda image") + + if template_name: + build_dir = DISTRIBS_BASE_DIR / template_name + build_file_path = build_dir / f"{template_name}-build.yaml" + else: + build_dir = DISTRIBS_BASE_DIR / image_name + build_file_path = build_dir / f"{image_name}-build.yaml" + + os.makedirs(build_dir, exist_ok=True) + with open(build_file_path, "w") as f: + to_write = json.loads(build_config.model_dump_json()) + f.write(yaml.dump(to_write, sort_keys=False)) + + return_code = build_image( + build_config, build_file_path, image_name, template_name=template_name + ) + if return_code != 0: + return + + if template_name: + # copy run.yaml from template to build_dir instead of generating it again + template_path = ( + importlib.resources.files("llama_stack") + / f"templates/{template_name}/run.yaml" + ) + with importlib.resources.as_file(template_path) as path: + run_config_file = build_dir / f"{template_name}-run.yaml" + shutil.copy(path, run_config_file) + # Find all ${env.VARIABLE} patterns + cprint("Build Successful!", color="green") + else: + _generate_run_config(build_config, build_dir, image_name) + + +def _run_template_list_cmd() -> None: + # eventually, this should query a registry at llama.meta.com/llamastack/distributions + headers = [ + "Template Name", + # "Providers", + "Description", + ] + + rows = [] + for template_name, spec in available_templates_specs().items(): + rows.append( + [ + template_name, + # json.dumps(spec.distribution_spec.providers, indent=2), + spec.distribution_spec.description, + ] + ) + print_table( + rows, + headers, + separate_rows=True, + ) diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py index 38994bebf..d00157710 100644 --- a/llama_stack/cli/stack/build.py +++ b/llama_stack/cli/stack/build.py @@ -4,38 +4,9 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. import argparse -import importlib.resources -import os -import shutil -from functools import lru_cache -from pathlib import Path -from typing import List, Optional +import textwrap from llama_stack.cli.subcommand import Subcommand -from llama_stack.distribution.datatypes import ( - BuildConfig, - DistributionSpec, - Provider, - StackRunConfig, -) -from llama_stack.distribution.distribution import get_provider_registry -from llama_stack.distribution.resolver import InvalidProviderError -from llama_stack.distribution.utils.dynamic import instantiate_class_type -from llama_stack.providers.datatypes import Api - -TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates" - - -@lru_cache() -def available_templates_specs() -> List[BuildConfig]: - import yaml - - template_specs = [] - for p in TEMPLATES_PATH.rglob("*build.yaml"): - with open(p, "r") as f: - build_config = BuildConfig(**yaml.safe_load(f)) - template_specs.append(build_config) - return template_specs class StackBuild(Subcommand): @@ -81,250 +52,21 @@ class StackBuild(Subcommand): default="conda", ) + self.parser.add_argument( + "--image-name", + type=str, + help=textwrap.dedent( + """[for image-type=conda] Name of the conda environment to use for the build. If +not specified, currently active Conda environment will be used. If no Conda +environment is active, you must specify a name. + """ + ), + default=None, + ) + def _run_stack_build_command(self, args: argparse.Namespace) -> None: - import textwrap + # always keep implementation completely silo-ed away from CLI so CLI + # can be fast to load and reduces dependencies + from ._build import run_stack_build_command - import yaml - from prompt_toolkit import prompt - from prompt_toolkit.completion import WordCompleter - from prompt_toolkit.validation import Validator - from termcolor import cprint - - from llama_stack.distribution.distribution import get_provider_registry - - if args.list_templates: - self._run_template_list_cmd(args) - return - - if args.template: - available_templates = available_templates_specs() - for build_config in available_templates: - if build_config.name == args.template: - if args.image_type: - build_config.image_type = args.image_type - else: - self.parser.error( - f"Please specify a image-type (docker | conda | venv) for {args.template}" - ) - self._run_stack_build_command_from_build_config( - build_config, - template_name=args.template, - ) - return - - self.parser.error( - f"Could not find template {args.template}. Please run `llama stack build --list-templates` to check out the available templates" - ) - return - - if not args.config and not args.template: - name = prompt( - "> Enter a name for your Llama Stack (e.g. my-local-stack): ", - validator=Validator.from_callable( - lambda x: len(x) > 0, - error_message="Name cannot be empty, please enter a name", - ), - ) - - image_type = prompt( - "> Enter the image type you want your Llama Stack to be built as (docker or conda or venv): ", - validator=Validator.from_callable( - lambda x: x in ["docker", "conda", "venv"], - error_message="Invalid image type, please enter conda or docker or venv", - ), - default="conda", - ) - - cprint( - textwrap.dedent( - """ - Llama Stack is composed of several APIs working together. Let's select - the provider types (implementations) you want to use for these APIs. - """, - ), - color="green", - ) - - print("Tip: use to see options for the providers.\n") - - providers = dict() - for api, providers_for_api in get_provider_registry().items(): - available_providers = [ - x - for x in providers_for_api.keys() - if x not in ("remote", "remote::sample") - ] - api_provider = prompt( - "> Enter provider for API {}: ".format(api.value), - completer=WordCompleter(available_providers), - complete_while_typing=True, - validator=Validator.from_callable( - lambda x: x in available_providers, - error_message="Invalid provider, use to see options", - ), - ) - - providers[api.value] = api_provider - - description = prompt( - "\n > (Optional) Enter a short description for your Llama Stack: ", - default="", - ) - - distribution_spec = DistributionSpec( - providers=providers, - description=description, - ) - - build_config = BuildConfig( - name=name, image_type=image_type, distribution_spec=distribution_spec - ) - self._run_stack_build_command_from_build_config(build_config) - return - - with open(args.config, "r") as f: - try: - build_config = BuildConfig(**yaml.safe_load(f)) - except Exception as e: - self.parser.error(f"Could not parse config file {args.config}: {e}") - return - self._run_stack_build_command_from_build_config(build_config) - - def _generate_run_config(self, build_config: BuildConfig, build_dir: Path) -> None: - """ - Generate a run.yaml template file for user to edit from a build.yaml file - """ - import json - - import yaml - from termcolor import cprint - - from llama_stack.distribution.build import ImageType - - apis = list(build_config.distribution_spec.providers.keys()) - run_config = StackRunConfig( - docker_image=( - build_config.name - if build_config.image_type == ImageType.docker.value - else None - ), - image_name=build_config.name, - conda_env=( - build_config.name - if build_config.image_type == ImageType.conda.value - else None - ), - apis=apis, - providers={}, - ) - # build providers dict - provider_registry = get_provider_registry() - for api in apis: - run_config.providers[api] = [] - provider_types = build_config.distribution_spec.providers[api] - if isinstance(provider_types, str): - provider_types = [provider_types] - - for i, provider_type in enumerate(provider_types): - pid = provider_type.split("::")[-1] - - p = provider_registry[Api(api)][provider_type] - if p.deprecation_error: - raise InvalidProviderError(p.deprecation_error) - - config_type = instantiate_class_type( - provider_registry[Api(api)][provider_type].config_class - ) - if hasattr(config_type, "sample_run_config"): - config = config_type.sample_run_config( - __distro_dir__=f"distributions/{build_config.name}" - ) - else: - config = {} - - p_spec = Provider( - provider_id=f"{pid}-{i}" if len(provider_types) > 1 else pid, - provider_type=provider_type, - config=config, - ) - run_config.providers[api].append(p_spec) - - os.makedirs(build_dir, exist_ok=True) - run_config_file = build_dir / f"{build_config.name}-run.yaml" - - with open(run_config_file, "w") as f: - to_write = json.loads(run_config.model_dump_json()) - f.write(yaml.dump(to_write, sort_keys=False)) - - cprint( - f"You can now edit {run_config_file} and run `llama stack run {run_config_file}`", - color="green", - ) - - def _run_stack_build_command_from_build_config( - self, - build_config: BuildConfig, - template_name: Optional[str] = None, - ) -> None: - import json - import os - - import yaml - from termcolor import cprint - - from llama_stack.distribution.build import build_image - from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR - - # save build.yaml spec for building same distribution again - build_dir = DISTRIBS_BASE_DIR / f"llamastack-{build_config.name}" - os.makedirs(build_dir, exist_ok=True) - build_file_path = build_dir / f"{build_config.name}-build.yaml" - - with open(build_file_path, "w") as f: - to_write = json.loads(build_config.model_dump_json()) - f.write(yaml.dump(to_write, sort_keys=False)) - - return_code = build_image(build_config, build_file_path) - if return_code != 0: - return - - if template_name: - # copy run.yaml from template to build_dir instead of generating it again - template_path = ( - importlib.resources.files("llama_stack") - / f"templates/{template_name}/run.yaml" - ) - with importlib.resources.as_file(template_path) as path: - run_config_file = build_dir / f"{build_config.name}-run.yaml" - shutil.copy(path, run_config_file) - # Find all ${env.VARIABLE} patterns - cprint("Build Successful!", color="green") - else: - self._generate_run_config(build_config, build_dir) - - def _run_template_list_cmd(self, args: argparse.Namespace) -> None: - import json - - from llama_stack.cli.table import print_table - - # eventually, this should query a registry at llama.meta.com/llamastack/distributions - headers = [ - "Template Name", - "Providers", - "Description", - ] - - rows = [] - for spec in available_templates_specs(): - rows.append( - [ - spec.name, - json.dumps(spec.distribution_spec.providers, indent=2), - spec.distribution_spec.description, - ] - ) - print_table( - rows, - headers, - separate_rows=True, - ) + return run_stack_build_command(self.parser, args) diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py index 90b2ecf6d..7942f603a 100644 --- a/llama_stack/cli/stack/run.py +++ b/llama_stack/cli/stack/run.py @@ -37,6 +37,11 @@ class StackRun(Subcommand): help="Port to run the server on. Defaults to 5000", default=int(os.getenv("LLAMA_STACK_PORT", 5000)), ) + self.parser.add_argument( + "--image-name", + type=str, + help="Name of the image to run. Defaults to the current conda environment", + ) self.parser.add_argument( "--disable-ipv6", action="store_true", @@ -53,8 +58,11 @@ class StackRun(Subcommand): def _run_stack_run_cmd(self, args: argparse.Namespace) -> None: import importlib.resources + import json + import subprocess import yaml + from termcolor import cprint from llama_stack.distribution.build import ImageType from llama_stack.distribution.configure import parse_and_maybe_upgrade_config @@ -99,11 +107,11 @@ class StackRun(Subcommand): if not config_file.exists(): self.parser.error( - f"File {str(config_file)} does not exist. Please run `llama stack build` to generate (and optionally edit) a run.yaml file" + f"File {str(config_file)} does not exist.\n\nPlease run `llama stack build` to generate (and optionally edit) a run.yaml file" ) return - print(f"Using config file: {config_file}") + print(f"Using run configuration: {config_file}") config_dict = yaml.safe_load(config_file.read_text()) config = parse_and_maybe_upgrade_config(config_dict) @@ -114,13 +122,52 @@ class StackRun(Subcommand): ) run_args = [script, config.docker_image] else: + current_conda_env = os.environ.get("CONDA_DEFAULT_ENV") + image_name = args.image_name or current_conda_env + if not image_name: + cprint( + "No current conda environment detected, please specify a conda environment name with --image-name", + color="red", + ) + return + + def get_conda_prefix(env_name): + # Get conda environments info + conda_env_info = json.loads( + subprocess.check_output( + ["conda", "info", "--envs", "--json"] + ).decode() + ) + envs = conda_env_info["envs"] + for envpath in envs: + if envpath.endswith(env_name): + return envpath + return None + + print(f"Using conda environment: {image_name}") + conda_prefix = get_conda_prefix(image_name) + if not conda_prefix: + cprint( + f"Conda environment {image_name} does not exist.", + color="red", + ) + return + + build_file = Path(conda_prefix) / "llamastack-build.yaml" + if not build_file.exists(): + cprint( + f"Build file {build_file} does not exist.\n\nPlease run `llama stack build` or specify the correct conda environment name with --image-name", + color="red", + ) + return + script = ( importlib.resources.files("llama_stack") / "distribution/start_conda_env.sh" ) run_args = [ script, - config.conda_env, + image_name, ] run_args.extend([str(config_file), str(args.port)]) @@ -129,13 +176,17 @@ class StackRun(Subcommand): for env_var in args.env: if "=" not in env_var: - self.parser.error( - f"Environment variable '{env_var}' must be in KEY=VALUE format" + cprint( + f"Environment variable '{env_var}' must be in KEY=VALUE format", + color="red", ) return key, value = env_var.split("=", 1) # split on first = only if not key: - self.parser.error(f"Environment variable '{env_var}' has empty key") + cprint( + f"Environment variable '{env_var}' has empty key", + color="red", + ) return run_args.extend(["--env", f"{key}={value}"]) diff --git a/llama_stack/distribution/build.py b/llama_stack/distribution/build.py index a8b2342af..b8b4188ac 100644 --- a/llama_stack/distribution/build.py +++ b/llama_stack/distribution/build.py @@ -10,7 +10,7 @@ import sys from enum import Enum from pathlib import Path -from typing import Dict, List +from typing import Dict, List, Optional from pydantic import BaseModel from termcolor import cprint @@ -106,6 +106,8 @@ def print_pip_install_help(providers: Dict[str, List[Provider]]): def build_image( build_config: BuildConfig, build_file_path: Path, + image_name: str, + template_name: Optional[str] = None, ): docker_image = build_config.distribution_spec.docker_image or "python:3.10-slim" @@ -115,32 +117,34 @@ def build_image( normal_deps += SERVER_DEPENDENCIES if build_config.image_type == ImageType.docker.value: - script = ( + script = str( importlib.resources.files("llama_stack") / "distribution/build_container.sh" ) args = [ script, - build_config.name, + image_name, docker_image, str(build_file_path), str(BUILDS_BASE_DIR / ImageType.docker.value), " ".join(normal_deps), ] elif build_config.image_type == ImageType.conda.value: - script = ( + script = str( importlib.resources.files("llama_stack") / "distribution/build_conda_env.sh" ) args = [ script, - build_config.name, + str(image_name), str(build_file_path), " ".join(normal_deps), ] elif build_config.image_type == ImageType.venv.value: - script = importlib.resources.files("llama_stack") / "distribution/build_venv.sh" + script = str( + importlib.resources.files("llama_stack") / "distribution/build_venv.sh" + ) args = [ script, - build_config.name, + str(image_name), str(build_file_path), " ".join(normal_deps), ] @@ -156,7 +160,7 @@ def build_image( if return_code != 0: log.error( - f"Failed to build target {build_config.name} with return code {return_code}", + f"Failed to build target {image_name} with return code {return_code}", ) return return_code diff --git a/llama_stack/distribution/build_conda_env.sh b/llama_stack/distribution/build_conda_env.sh index 461f27baa..606fbf19d 100755 --- a/llama_stack/distribution/build_conda_env.sh +++ b/llama_stack/distribution/build_conda_env.sh @@ -18,8 +18,8 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then fi if [ "$#" -lt 3 ]; then - echo "Usage: $0 []" >&2 - echo "Example: $0 mybuild ./my-stack-build.yaml 'numpy pandas scipy'" >&2 + echo "Usage: $0 []" >&2 + echo "Example: $0 my-conda-env ./my-stack-build.yaml 'numpy pandas scipy'" >&2 exit 1 fi @@ -27,8 +27,7 @@ special_pip_deps="$4" set -euo pipefail -build_name="$1" -env_name="llamastack-$build_name" +env_name="$1" build_file_path="$2" pip_dependencies="$3" @@ -137,8 +136,8 @@ ensure_conda_env_python310() { fi fi - mv $build_file_path $CONDA_PREFIX/ - echo "Build spec configuration saved at $CONDA_PREFIX/$build_name-build.yaml" + mv $build_file_path $CONDA_PREFIX/llamastack-build.yaml + echo "Build spec configuration saved at $CONDA_PREFIX/llamastack-build.yaml" } ensure_conda_env_python310 "$env_name" "$pip_dependencies" "$special_pip_deps" diff --git a/llama_stack/distribution/datatypes.py b/llama_stack/distribution/datatypes.py index d0ccd6cd1..0a293cbc2 100644 --- a/llama_stack/distribution/datatypes.py +++ b/llama_stack/distribution/datatypes.py @@ -131,10 +131,6 @@ this could be just a hash default=None, description="Reference to the docker image if this package refers to a container", ) - conda_env: Optional[str] = Field( - default=None, - description="Reference to the conda environment if this package refers to a conda environment", - ) apis: List[str] = Field( default_factory=list, description=""" @@ -166,7 +162,7 @@ a default SQLite store will be used.""", class BuildConfig(BaseModel): version: str = LLAMA_STACK_BUILD_CONFIG_VERSION - name: str + distribution_spec: DistributionSpec = Field( description="The distribution spec to build including API providers. " ) diff --git a/llama_stack/distribution/start_conda_env.sh b/llama_stack/distribution/start_conda_env.sh index f478a8bd8..c37f30ef0 100755 --- a/llama_stack/distribution/start_conda_env.sh +++ b/llama_stack/distribution/start_conda_env.sh @@ -23,8 +23,7 @@ if [ $# -lt 3 ]; then exit 1 fi -build_name="$1" -env_name="llamastack-$build_name" +env_name="$1" shift yaml_config="$1" diff --git a/llama_stack/providers/inline/inference/meta_reference/quantization/scripts/build_conda.sh b/llama_stack/providers/inline/inference/meta_reference/quantization/scripts/build_conda.sh deleted file mode 100644 index ae0ed0bac..000000000 --- a/llama_stack/providers/inline/inference/meta_reference/quantization/scripts/build_conda.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -if [[ $# -ne 1 ]]; then - echo "Error: Please provide the name of CONDA environment you wish to create" - exit 1 -fi - -ENV_NAME=$1 - -set -eu -eval "$(conda shell.bash hook)" - -echo "Will build env (or overwrite) named '$ENV_NAME'" - -set -x - -run_build() { - # Set up the conda environment - yes | conda remove --name $ENV_NAME --all - yes | conda create -n $ENV_NAME python=3.10 - conda activate $ENV_NAME - - # PT nightly - pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu121 - - # install dependencies for `llama-agentic-system` - pip install -r fp8_requirements.txt -} - -run_build