From 6227e1e3b9a1164000b18286791dccdf2a2933d9 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Sun, 23 Feb 2025 16:57:11 -0800 Subject: [PATCH] fix: update virtualenv building so llamastack- prefix is not added, make notebook experience easier (#1225) Make sure venv behaves like conda (no prefix is added to image_name) and `--image-type venv` inside a notebook "just works" without any fiddling --- .pre-commit-config.yaml | 1 + llama_stack/cli/stack/_build.py | 16 ++++++++++++++-- llama_stack/distribution/build_venv.sh | 13 +++++++++---- llama_stack/distribution/library_client.py | 14 +------------- llama_stack/distribution/start_venv.sh | 1 + llama_stack/distribution/utils/exec.py | 13 +++++++++++++ 6 files changed, 39 insertions(+), 19 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 85cb1b91a..70af72a62 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,6 +30,7 @@ repos: rev: v0.9.4 hooks: - id: ruff + args: [ --fix ] exclude: ^llama_stack/strong_typing/.*$ - id: ruff-format diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py index 76f03aa5c..666c2e6dd 100644 --- a/llama_stack/cli/stack/_build.py +++ b/llama_stack/cli/stack/_build.py @@ -37,6 +37,7 @@ from llama_stack.distribution.distribution import get_provider_registry from llama_stack.distribution.resolver import InvalidProviderError from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR from llama_stack.distribution.utils.dynamic import instantiate_class_type +from llama_stack.distribution.utils.exec import in_notebook from llama_stack.providers.datatypes import Api TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates" @@ -59,8 +60,16 @@ def run_stack_build_command(args: argparse.Namespace) -> None: if args.list_templates: return _run_template_list_cmd() - current_conda_env = os.environ.get("CONDA_DEFAULT_ENV") - image_name = args.image_name or current_conda_env + if args.image_type == "venv": + current_venv = os.environ.get("VIRTUAL_ENV") + image_name = args.image_name or current_venv + if not image_name and in_notebook(): + image_name = "__system__" + elif args.image_type == "conda": + current_conda_env = os.environ.get("CONDA_DEFAULT_ENV") + image_name = args.image_name or current_conda_env + else: + image_name = args.image_name if args.template: available_templates = available_templates_specs() @@ -256,6 +265,9 @@ def _run_stack_build_command_from_build_config( elif build_config.image_type == ImageType.conda.value: if not image_name: raise ValueError("Please specify an image name when building a conda image") + elif build_config.image_type == ImageType.venv.value: + if not image_name: + raise ValueError("Please specify an image name when building a venv image") if template_name: build_dir = DISTRIBS_BASE_DIR / template_name diff --git a/llama_stack/distribution/build_venv.sh b/llama_stack/distribution/build_venv.sh index b47cfcb83..f973fe955 100755 --- a/llama_stack/distribution/build_venv.sh +++ b/llama_stack/distribution/build_venv.sh @@ -16,6 +16,7 @@ TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-} # Reference: https://github.com/astral-sh/uv/pull/1694 UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT:-500} UV_SYSTEM_PYTHON=${UV_SYSTEM_PYTHON:-} +VIRTUAL_ENV=${VIRTUAL_ENV:-} if [ -n "$LLAMA_STACK_DIR" ]; then echo "Using llama-stack-dir=$LLAMA_STACK_DIR" @@ -25,7 +26,7 @@ if [ -n "$LLAMA_MODELS_DIR" ]; then fi if [ "$#" -lt 3 ]; then - echo "Usage: $0 []" >&2 + echo "Usage: $0 []" >&2 echo "Example: $0 mybuild ./my-stack-build.yaml 'numpy pandas scipy'" >&2 exit 1 fi @@ -34,8 +35,7 @@ special_pip_deps="$3" set -euo pipefail -build_name="$1" -env_name="llamastack-$build_name" +env_name="$1" pip_dependencies="$2" # Define color codes @@ -75,8 +75,12 @@ run() { local pip_dependencies="$2" local special_pip_deps="$3" - if [ -n "$UV_SYSTEM_PYTHON" ]; then + if [ -n "$UV_SYSTEM_PYTHON" ] || [ "$env_name" == "__system__" ]; then echo "Installing dependencies in system Python environment" + # if env == __system__, ensure we set UV_SYSTEM_PYTHON + export UV_SYSTEM_PYTHON=1 + elif [ "$VIRTUAL_ENV" == "$env_name" ]; then + echo "Virtual environment $env_name is already active" else echo "Using virtual environment $env_name" uv venv "$env_name" @@ -90,6 +94,7 @@ run() { # shellcheck disable=SC2086 # we are building a command line so word splitting is expected uv pip install --extra-index-url https://test.pypi.org/simple/ \ + --index-strategy unsafe-best-match \ llama-models=="$TEST_PYPI_VERSION" llama-stack=="$TEST_PYPI_VERSION" \ $pip_dependencies if [ -n "$special_pip_deps" ]; then diff --git a/llama_stack/distribution/library_client.py b/llama_stack/distribution/library_client.py index 5790c498b..59189f8bb 100644 --- a/llama_stack/distribution/library_client.py +++ b/llama_stack/distribution/library_client.py @@ -41,6 +41,7 @@ from llama_stack.distribution.stack import ( redact_sensitive_fields, replace_env_vars, ) +from llama_stack.distribution.utils.exec import in_notebook from llama_stack.providers.utils.telemetry.tracing import ( end_trace, setup_logger, @@ -52,19 +53,6 @@ logger = logging.getLogger(__name__) T = TypeVar("T") -def in_notebook(): - try: - from IPython import get_ipython - - if "IPKernelApp" not in get_ipython().config: # pragma: no cover - return False - except ImportError: - return False - except AttributeError: - return False - return True - - def convert_pydantic_to_json_value(value: Any) -> Any: if isinstance(value, Enum): return value.value diff --git a/llama_stack/distribution/start_venv.sh b/llama_stack/distribution/start_venv.sh index 1cfa7248f..195274129 100755 --- a/llama_stack/distribution/start_venv.sh +++ b/llama_stack/distribution/start_venv.sh @@ -55,6 +55,7 @@ while [[ $# -gt 0 ]]; do esac done +echo "Using virtual environment: $venv_path" # Activate virtual environment if [ ! -d "$venv_path" ]; then echo -e "${RED}Error: Virtual environment not found at $venv_path${NC}" >&2 diff --git a/llama_stack/distribution/utils/exec.py b/llama_stack/distribution/utils/exec.py index 4a3a95826..e13e59aad 100644 --- a/llama_stack/distribution/utils/exec.py +++ b/llama_stack/distribution/utils/exec.py @@ -22,6 +22,19 @@ def run_with_pty(command): return _run_with_pty_unix(command) +def in_notebook(): + try: + from IPython import get_ipython + + if "IPKernelApp" not in get_ipython().config: # pragma: no cover + return False + except ImportError: + return False + except AttributeError: + return False + return True + + # run a command in a pseudo-terminal, with interrupt handling, # useful when you want to run interactive things def _run_with_pty_unix(command):