mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
feat: add --run to llama stack build (#1156)
# What does this PR do? --run runs the stack that was just build using the same arguments during the build process (image-name, type, etc) This simplifies the workflow a lot and makes the UX better for most local users trying to get started rather than having to match the flags of the two commands (build and then run) Also, moved `ImageType` to distribution.utils since there were circular import errors with its old location ## Test Plan tested locally using the following command: `llama stack build --run --template ollama --image-type venv` Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
6227e1e3b9
commit
34e3faa4e8
6 changed files with 129 additions and 87 deletions
|
@ -7,7 +7,6 @@
|
|||
import importlib.resources
|
||||
import logging
|
||||
import sys
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
|
||||
|
@ -18,6 +17,7 @@ from llama_stack.distribution.datatypes import BuildConfig, Provider
|
|||
from llama_stack.distribution.distribution import get_provider_registry
|
||||
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
|
||||
from llama_stack.distribution.utils.exec import run_command, run_with_pty
|
||||
from llama_stack.distribution.utils.image_types import ImageType
|
||||
from llama_stack.providers.datatypes import Api
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
@ -33,12 +33,6 @@ SERVER_DEPENDENCIES = [
|
|||
]
|
||||
|
||||
|
||||
class ImageType(Enum):
|
||||
container = "container"
|
||||
conda = "conda"
|
||||
venv = "venv"
|
||||
|
||||
|
||||
class ApiInput(BaseModel):
|
||||
api: Api
|
||||
provider: str
|
||||
|
|
|
@ -12,8 +12,78 @@ import signal
|
|||
import subprocess
|
||||
import sys
|
||||
|
||||
from termcolor import cprint
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
import importlib
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from llama_stack.distribution.utils.image_types import ImageType
|
||||
|
||||
|
||||
def formulate_run_args(image_type, image_name, config, template_name) -> list:
|
||||
if image_type == ImageType.container.value or config.container_image:
|
||||
script = importlib.resources.files("llama_stack") / "distribution/start_container.sh"
|
||||
image_name = f"distribution-{template_name}" if template_name else config.container_image
|
||||
run_args = [script, image_name]
|
||||
elif image_type == ImageType.conda.value:
|
||||
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
|
||||
image_name = image_name or current_conda_env
|
||||
if not image_name:
|
||||
cprint(
|
||||
"No current conda environment detected, please specify a conda environment name with --image-name",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
def get_conda_prefix(env_name):
|
||||
# Conda "base" environment does not end with "base" in the
|
||||
# prefix, so should be handled separately.
|
||||
if env_name == "base":
|
||||
return os.environ.get("CONDA_PREFIX")
|
||||
# Get conda environments info
|
||||
conda_env_info = json.loads(subprocess.check_output(["conda", "info", "--envs", "--json"]).decode())
|
||||
envs = conda_env_info["envs"]
|
||||
for envpath in envs:
|
||||
if envpath.endswith(env_name):
|
||||
return envpath
|
||||
return None
|
||||
|
||||
print(f"Using conda environment: {image_name}")
|
||||
conda_prefix = get_conda_prefix(image_name)
|
||||
if not conda_prefix:
|
||||
cprint(
|
||||
f"Conda environment {image_name} does not exist.",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
build_file = Path(conda_prefix) / "llamastack-build.yaml"
|
||||
if not build_file.exists():
|
||||
cprint(
|
||||
f"Build file {build_file} does not exist.\n\nPlease run `llama stack build` or specify the correct conda environment name with --image-name",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
script = importlib.resources.files("llama_stack") / "distribution/start_conda_env.sh"
|
||||
run_args = [
|
||||
script,
|
||||
image_name,
|
||||
]
|
||||
else:
|
||||
# else must be venv since that is the only valid option left.
|
||||
current_venv = os.environ.get("VIRTUAL_ENV")
|
||||
venv = image_name or current_venv
|
||||
script = importlib.resources.files("llama_stack") / "distribution/start_venv.sh"
|
||||
run_args = [
|
||||
script,
|
||||
venv,
|
||||
]
|
||||
return run_args
|
||||
|
||||
|
||||
def run_with_pty(command):
|
||||
if sys.platform.startswith("win"):
|
||||
|
|
13
llama_stack/distribution/utils/image_types.py
Normal file
13
llama_stack/distribution/utils/image_types.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class ImageType(Enum):
|
||||
container = "container"
|
||||
conda = "conda"
|
||||
venv = "venv"
|
Loading…
Add table
Add a link
Reference in a new issue