refactor: remove Conda support from Llama Stack (#2969)

# What does this PR do?
<!-- Provide a short summary of what this PR does and why. Link to
relevant issues if applicable. -->
This PR is responsible for removal of Conda support in Llama Stack

<!-- If resolving an issue, uncomment and update the line below -->
<!-- Closes #[issue-number] -->
Closes #2539

## Test Plan
<!-- Describe the tests you ran to verify your changes with result
summaries. *Provide clear instructions so the plan can be easily
re-executed.* -->
This commit is contained in:
IAN MILLER 2025-08-02 23:52:59 +01:00 committed by GitHub
parent f2eee4e417
commit a749d5f4a4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
44 changed files with 159 additions and 311 deletions

View file

@ -69,9 +69,6 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
if args.image_type == ImageType.VENV.value:
current_venv = os.environ.get("VIRTUAL_ENV")
image_name = args.image_name or current_venv
elif args.image_type == ImageType.CONDA.value:
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
image_name = args.image_name or current_conda_env
else:
image_name = args.image_name
@ -132,7 +129,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
)
if not args.image_type:
cprint(
f"Please specify a image-type (container | conda | venv) for {args.template}",
f"Please specify a image-type (container | venv) for {args.template}",
color="red",
file=sys.stderr,
)
@ -158,22 +155,7 @@ def run_stack_build_command(args: argparse.Namespace) -> None:
),
)
if image_type == ImageType.CONDA.value:
if not image_name:
cprint(
f"No current conda environment detected or specified, will create a new conda environment with the name `llamastack-{name}`",
color="yellow",
file=sys.stderr,
)
image_name = f"llamastack-{name}"
else:
cprint(
f"Using conda environment {image_name}",
color="green",
file=sys.stderr,
)
else:
image_name = f"llamastack-{name}"
image_name = f"llamastack-{name}"
cprint(
textwrap.dedent(
@ -372,10 +354,7 @@ def _run_stack_build_command_from_build_config(
else:
if not image_name:
raise ValueError("Please specify an image name when building a container image without a template")
elif build_config.image_type == LlamaStackImageType.CONDA.value:
if not image_name:
raise ValueError("Please specify an image name when building a conda image")
elif build_config.image_type == LlamaStackImageType.VENV.value:
else:
if not image_name and os.environ.get("UV_SYSTEM_PYTHON"):
image_name = "__system__"
if not image_name:
@ -431,7 +410,6 @@ def _run_stack_build_command_from_build_config(
return_code = build_image(
build_config,
build_file_path,
image_name,
template_or_config=template_name or config_path or str(build_file_path),
run_config=run_config_file.as_posix() if run_config_file else None,

View file

@ -56,7 +56,7 @@ class StackBuild(Subcommand):
"--image-name",
type=str,
help=textwrap.dedent(
f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the conda or virtual environment to use for
f"""[for image-type={"|".join(e.value for e in ImageType)}] Name of the virtual environment to use for
the build. If not specified, currently active environment will be used if found.
"""
),

View file

@ -47,7 +47,8 @@ class StackRun(Subcommand):
self.parser.add_argument(
"--image-name",
type=str,
help="Name of the image to run.",
default=None,
help="Name of the image to run. Defaults to the current environment",
)
self.parser.add_argument(
"--env",
@ -58,7 +59,7 @@ class StackRun(Subcommand):
self.parser.add_argument(
"--image-type",
type=str,
help="Image Type used during the build. This can be either conda or container or venv.",
help="Image Type used during the build. This can be only venv.",
choices=[e.value for e in ImageType if e.value != ImageType.CONTAINER.value],
)
self.parser.add_argument(
@ -67,20 +68,38 @@ class StackRun(Subcommand):
help="Start the UI server",
)
# If neither image type nor image name is provided, but at the same time
# the current environment has conda breadcrumbs, then assume what the user
# wants to use conda mode and not the usual default mode (using
# pre-installed system packages).
#
# Note: yes, this is hacky. It's implemented this way to keep the existing
# conda users unaffected by the switch of the default behavior to using
# system packages.
def _get_image_type_and_name(self, args: argparse.Namespace) -> tuple[str, str]:
conda_env = os.environ.get("CONDA_DEFAULT_ENV")
if conda_env and args.image_name == conda_env:
logger.warning(f"Conda detected. Using conda environment {conda_env} for the run.")
return ImageType.CONDA.value, args.image_name
return args.image_type, args.image_name
def _resolve_config_and_template(self, args: argparse.Namespace) -> tuple[Path | None, str | None]:
"""Resolve config file path and template name from args.config"""
from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
if not args.config:
return None, None
config_file = Path(args.config)
has_yaml_suffix = args.config.endswith(".yaml")
template_name = None
if not config_file.exists() and not has_yaml_suffix:
# check if this is a template
config_file = Path(REPO_ROOT) / "llama_stack" / "templates" / args.config / "run.yaml"
if config_file.exists():
template_name = args.config
if not config_file.exists() and not has_yaml_suffix:
# check if it's a build config saved to ~/.llama dir
config_file = Path(DISTRIBS_BASE_DIR / f"llamastack-{args.config}" / f"{args.config}-run.yaml")
if not config_file.exists():
self.parser.error(
f"File {str(config_file)} does not exist.\n\nPlease run `llama stack build` to generate (and optionally edit) a run.yaml file"
)
if not config_file.is_file():
self.parser.error(
f"Config file must be a valid file path, '{config_file}' is not a file: type={type(config_file)}"
)
return config_file, template_name
def _run_stack_run_cmd(self, args: argparse.Namespace) -> None:
import yaml
@ -90,7 +109,7 @@ class StackRun(Subcommand):
if args.enable_ui:
self._start_ui_development_server(args.port)
image_type, image_name = self._get_image_type_and_name(args)
image_type, image_name = args.image_type, args.image_name
if args.config:
try:
@ -103,8 +122,8 @@ class StackRun(Subcommand):
config_file = None
# Check if config is required based on image type
if (image_type in [ImageType.CONDA.value, ImageType.VENV.value]) and not config_file:
self.parser.error("Config file is required for venv and conda environments")
if image_type == ImageType.VENV.value and not config_file:
self.parser.error("Config file is required for venv environment")
if config_file:
logger.info(f"Using run configuration: {config_file}")

View file

@ -8,7 +8,6 @@ from enum import Enum
class ImageType(Enum):
CONDA = "conda"
CONTAINER = "container"
VENV = "venv"

View file

@ -7,7 +7,6 @@
import importlib.resources
import logging
import sys
from pathlib import Path
from pydantic import BaseModel
from termcolor import cprint
@ -106,7 +105,6 @@ def print_pip_install_help(config: BuildConfig):
def build_image(
build_config: BuildConfig,
build_file_path: Path,
image_name: str,
template_or_config: str,
run_config: str | None = None,
@ -138,18 +136,7 @@ def build_image(
# build arguments
if run_config is not None:
args.extend(["--run-config", run_config])
elif build_config.image_type == LlamaStackImageType.CONDA.value:
script = str(importlib.resources.files("llama_stack") / "core/build_conda_env.sh")
args = [
script,
"--env-name",
str(image_name),
"--build-file-path",
str(build_file_path),
"--normal-deps",
" ".join(normal_deps),
]
elif build_config.image_type == LlamaStackImageType.VENV.value:
else:
script = str(importlib.resources.files("llama_stack") / "core/build_venv.sh")
args = [
script,

View file

@ -6,9 +6,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# TODO: combine this with build_conda_env.sh since it is almost identical
# the only difference is that we don't do any conda-specific setup
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR:-}
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
@ -95,6 +92,8 @@ if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then
echo "Using llama-stack-client-dir=$LLAMA_STACK_CLIENT_DIR"
fi
ENVNAME=""
# pre-run checks to make sure we can proceed with the installation
pre_run_checks() {
local env_name="$1"

View file

@ -7,12 +7,10 @@
# the root directory of this source tree.
cleanup() {
envname="$1"
set +x
echo "Cleaning up..."
conda deactivate
conda env remove --name "$envname" -y
# For venv environments, no special cleanup is needed
# This function exists to avoid "function not found" errors
local env_name="$1"
echo "Cleanup called for environment: $env_name"
}
handle_int() {
@ -31,19 +29,7 @@ handle_exit() {
fi
}
setup_cleanup_handlers() {
trap handle_int INT
trap handle_exit EXIT
if is_command_available conda; then
__conda_setup="$('conda' 'shell.bash' 'hook' 2>/dev/null)"
eval "$__conda_setup"
conda deactivate
else
echo "conda is not available"
exit 1
fi
}
# check if a command is present
is_command_available() {

View file

@ -432,8 +432,8 @@ class BuildConfig(BaseModel):
distribution_spec: DistributionSpec = Field(description="The distribution spec to build including API providers. ")
image_type: str = Field(
default="conda",
description="Type of package to build (conda | container | venv)",
default="venv",
description="Type of package to build (container | venv)",
)
image_name: str | None = Field(
default=None,

View file

@ -40,7 +40,6 @@ port="$1"
shift
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
source "$SCRIPT_DIR/common.sh"
# Initialize variables
yaml_config=""
@ -75,9 +74,9 @@ while [[ $# -gt 0 ]]; do
esac
done
# Check if yaml_config is required based on env_type
if [[ "$env_type" == "venv" || "$env_type" == "conda" ]] && [ -z "$yaml_config" ]; then
echo -e "${RED}Error: --config is required for venv and conda environments${NC}" >&2
# Check if yaml_config is required
if [[ "$env_type" == "venv" ]] && [ -z "$yaml_config" ]; then
echo -e "${RED}Error: --config is required for venv environment${NC}" >&2
exit 1
fi
@ -101,19 +100,14 @@ case "$env_type" in
source "$env_path_or_name/bin/activate"
fi
;;
"conda")
if ! is_command_available conda; then
echo -e "${RED}Error: conda not found" >&2
exit 1
fi
eval "$(conda shell.bash hook)"
conda deactivate && conda activate "$env_path_or_name"
PYTHON_BINARY="$CONDA_PREFIX/bin/python"
;;
*)
# Handle unsupported env_types here
echo -e "${RED}Error: Unsupported environment type '$env_type'. Only 'venv' is supported.${NC}" >&2
exit 1
;;
esac
if [[ "$env_type" == "venv" || "$env_type" == "conda" ]]; then
if [[ "$env_type" == "venv" ]]; then
set -x
if [ -n "$yaml_config" ]; then

View file

@ -9,7 +9,7 @@
1. Start up Llama Stack API server. More details [here](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html).
```
llama stack build --template together --image-type conda
llama stack build --template together --image-type venv
llama stack run together
```

View file

@ -15,69 +15,21 @@ from termcolor import cprint
log = logging.getLogger(__name__)
import importlib
import json
from pathlib import Path
from llama_stack.core.utils.image_types import LlamaStackImageType
def formulate_run_args(image_type: str, image_name: str) -> list[str]:
env_name = ""
def formulate_run_args(image_type: str, image_name: str) -> list:
# Only venv is supported now
current_venv = os.environ.get("VIRTUAL_ENV")
env_name = image_name or current_venv
if not env_name:
cprint(
"No current virtual environment detected, please specify a virtual environment name with --image-name",
color="red",
file=sys.stderr,
)
return []
if image_type == LlamaStackImageType.CONDA.value:
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
env_name = image_name or current_conda_env
if not env_name:
cprint(
"No current conda environment detected, please specify a conda environment name with --image-name",
color="red",
file=sys.stderr,
)
return
def get_conda_prefix(env_name):
# Conda "base" environment does not end with "base" in the
# prefix, so should be handled separately.
if env_name == "base":
return os.environ.get("CONDA_PREFIX")
# Get conda environments info
conda_env_info = json.loads(subprocess.check_output(["conda", "info", "--envs", "--json"]).decode())
envs = conda_env_info["envs"]
for envpath in envs:
if os.path.basename(envpath) == env_name:
return envpath
return None
cprint(f"Using conda environment: {env_name}", color="green", file=sys.stderr)
conda_prefix = get_conda_prefix(env_name)
if not conda_prefix:
cprint(
f"Conda environment {env_name} does not exist.",
color="red",
file=sys.stderr,
)
return
build_file = Path(conda_prefix) / "llamastack-build.yaml"
if not build_file.exists():
cprint(
f"Build file {build_file} does not exist.\n\nPlease run `llama stack build` or specify the correct conda environment name with --image-name",
color="red",
file=sys.stderr,
)
return
else:
# else must be venv since that is the only valid option left.
current_venv = os.environ.get("VIRTUAL_ENV")
env_name = image_name or current_venv
if not env_name:
cprint(
"No current virtual environment detected, please specify a virtual environment name with --image-name",
color="red",
file=sys.stderr,
)
return
cprint(f"Using virtual environment: {env_name}", file=sys.stderr)
cprint(f"Using virtual environment: {env_name}", file=sys.stderr)
script = importlib.resources.files("llama_stack") / "core/start_stack.sh"
run_args = [
@ -93,7 +45,8 @@ def in_notebook():
try:
from IPython import get_ipython
if "IPKernelApp" not in get_ipython().config: # pragma: no cover
ipython = get_ipython()
if ipython is None or "IPKernelApp" not in ipython.config: # pragma: no cover
return False
except ImportError:
return False

View file

@ -9,5 +9,4 @@ import enum
class LlamaStackImageType(enum.Enum):
CONTAINER = "container"
CONDA = "conda"
VENV = "venv"

View file

@ -20,7 +20,7 @@ This provider enables dataset management using NVIDIA's NeMo Customizer service.
Build the NVIDIA environment:
```bash
llama stack build --template nvidia --image-type conda
llama stack build --template nvidia --image-type venv
```
### Basic Usage using the LlamaStack Python Client

View file

@ -18,7 +18,7 @@ This provider enables running inference using NVIDIA NIM.
Build the NVIDIA environment:
```bash
llama stack build --template nvidia --image-type conda
llama stack build --template nvidia --image-type venv
```
### Basic Usage using the LlamaStack Python Client

View file

@ -22,7 +22,7 @@ This provider enables fine-tuning of LLMs using NVIDIA's NeMo Customizer service
Build the NVIDIA environment:
```bash
llama stack build --template nvidia --image-type conda
llama stack build --template nvidia --image-type venv
```
### Basic Usage using the LlamaStack Python Client

View file

@ -19,7 +19,7 @@ This provider enables safety checks and guardrails for LLM interactions using NV
Build the NVIDIA environment:
```bash
llama stack build --template nvidia --image-type conda
llama stack build --template nvidia --image-type venv
```
### Basic Usage using the LlamaStack Python Client

View file

@ -47,8 +47,7 @@ distribution_spec:
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
- provider_type: remote::model-context-protocol
image_type: conda
image_name: ci-tests
image_type: venv
additional_pip_packages:
- aiosqlite
- asyncpg

View file

@ -29,8 +29,7 @@ distribution_spec:
- provider_type: remote::brave-search
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
image_type: conda
image_name: dell
image_type: venv
additional_pip_packages:
- aiosqlite
- sqlalchemy[asyncio]

View file

@ -28,8 +28,7 @@ distribution_spec:
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
- provider_type: remote::model-context-protocol
image_type: conda
image_name: meta-reference-gpu
image_type: venv
additional_pip_packages:
- aiosqlite
- sqlalchemy[asyncio]

View file

@ -58,7 +58,7 @@ $ llama model list --downloaded
## Running the Distribution
You can do this via Conda (build code) or Docker which has a pre-built image.
You can do this via venv or Docker which has a pre-built image.
### Via Docker
@ -92,12 +92,12 @@ docker run \
--env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B
```
### Via Conda
### Via venv
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
```bash
llama stack build --template {{ name }} --image-type conda
llama stack build --template {{ name }} --image-type venv
llama stack run distributions/{{ name }}/run.yaml \
--port 8321 \
--env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct

View file

@ -23,8 +23,7 @@ distribution_spec:
- provider_type: inline::basic
tool_runtime:
- provider_type: inline::rag-runtime
image_type: conda
image_name: nvidia
image_type: venv
additional_pip_packages:
- aiosqlite
- sqlalchemy[asyncio]

View file

@ -105,7 +105,7 @@ curl -X DELETE "$NEMO_URL/v1/deployment/model-deployments/meta/llama-3.1-8b-inst
## Running Llama Stack with NVIDIA
You can do this via Conda or venv (build code), or Docker which has a pre-built image.
You can do this via venv (build code), or Docker which has a pre-built image.
### Via Docker
@ -124,17 +124,6 @@ docker run \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
```
### Via Conda
```bash
INFERENCE_MODEL=meta-llama/Llama-3.1-8b-Instruct
llama stack build --template nvidia --image-type conda
llama stack run ./run.yaml \
--port 8321 \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY \
--env INFERENCE_MODEL=$INFERENCE_MODEL
```
### Via venv
If you've set up your local development environment, you can also build the image using your local virtual environment.

View file

@ -32,8 +32,7 @@ distribution_spec:
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
- provider_type: remote::model-context-protocol
image_type: conda
image_name: open-benchmark
image_type: venv
additional_pip_packages:
- aiosqlite
- sqlalchemy[asyncio]

View file

@ -18,8 +18,7 @@ distribution_spec:
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
- provider_type: remote::model-context-protocol
image_type: conda
image_name: postgres-demo
image_type: venv
additional_pip_packages:
- asyncpg
- psycopg2-binary

View file

@ -47,8 +47,7 @@ distribution_spec:
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
- provider_type: remote::model-context-protocol
image_type: conda
image_name: starter
image_type: venv
additional_pip_packages:
- aiosqlite
- asyncpg

View file

@ -29,6 +29,7 @@ from llama_stack.core.datatypes import (
)
from llama_stack.core.distribution import get_provider_registry
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.core.utils.image_types import LlamaStackImageType
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
from llama_stack.providers.utils.kvstore.config import get_pip_packages as get_kv_pip_packages
@ -314,8 +315,7 @@ class DistributionTemplate(BaseModel):
container_image=self.container_image,
providers=build_providers,
),
image_type="conda",
image_name=self.name,
image_type=LlamaStackImageType.VENV.value, # default to venv
additional_pip_packages=sorted(set(additional_pip_packages)),
)

View file

@ -35,16 +35,11 @@ distribution_spec:
- provider_id: braintrust
provider_type: inline::braintrust
tool_runtime:
- provider_id: brave-search
provider_type: remote::brave-search
- provider_id: tavily-search
provider_type: remote::tavily-search
- provider_id: rag-runtime
provider_type: inline::rag-runtime
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
image_type: conda
image_name: watsonx
- provider_type: remote::brave-search
- provider_type: remote::tavily-search
- provider_type: inline::rag-runtime
- provider_type: remote::model-context-protocol
image_type: venv
additional_pip_packages:
- sqlalchemy[asyncio]
- aiosqlite