From 780b4c6eead4df0faa5a73fdd5308e84f15e2f33 Mon Sep 17 00:00:00 2001 From: ehhuang Date: Wed, 9 Jul 2025 20:33:45 -0700 Subject: [PATCH] fix: llama stack run starter in conda (#2679) # What does this PR do? `llama stack run starter` in conda environment fails with ' --config is required for venv and conda environments' because it is passed as --template and start_stack.sh doesn't process template. ## Test Plan `llama stack run starter` --- llama_stack/cli/stack/run.py | 72 +++++++++++++++------------- tests/integration/fixtures/common.py | 2 +- 2 files changed, 40 insertions(+), 34 deletions(-) diff --git a/llama_stack/cli/stack/run.py b/llama_stack/cli/stack/run.py index 81f56bd25..1d6c475f2 100644 --- a/llama_stack/cli/stack/run.py +++ b/llama_stack/cli/stack/run.py @@ -83,46 +83,57 @@ class StackRun(Subcommand): return ImageType.CONDA.value, args.image_name return args.image_type, args.image_name + def _resolve_config_and_template(self, args: argparse.Namespace) -> tuple[Path | None, str | None]: + """Resolve config file path and template name from args.config""" + from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR + + if not args.config: + return None, None + + config_file = Path(args.config) + has_yaml_suffix = args.config.endswith(".yaml") + template_name = None + + if not config_file.exists() and not has_yaml_suffix: + # check if this is a template + config_file = Path(REPO_ROOT) / "llama_stack" / "templates" / args.config / "run.yaml" + if config_file.exists(): + template_name = args.config + + if not config_file.exists() and not has_yaml_suffix: + # check if it's a build config saved to ~/.llama dir + config_file = Path(DISTRIBS_BASE_DIR / f"llamastack-{args.config}" / f"{args.config}-run.yaml") + + if not config_file.exists(): + self.parser.error( + f"File {str(config_file)} does not exist.\n\nPlease run `llama stack build` to generate (and optionally edit) a run.yaml file" + ) + + if not config_file.is_file(): + self.parser.error( + f"Config file must be a valid file path, '{config_file}' is not a file: type={type(config_file)}" + ) + + return config_file, template_name + def _run_stack_run_cmd(self, args: argparse.Namespace) -> None: import yaml from llama_stack.distribution.configure import parse_and_maybe_upgrade_config - from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR from llama_stack.distribution.utils.exec import formulate_run_args, run_command if args.enable_ui: self._start_ui_development_server(args.port) image_type, image_name = self._get_image_type_and_name(args) + # Resolve config file and template name first + config_file, template_name = self._resolve_config_and_template(args) + # Check if config is required based on image type - if (image_type in [ImageType.CONDA.value, ImageType.VENV.value]) and not args.config: + if (image_type in [ImageType.CONDA.value, ImageType.VENV.value]) and not config_file: self.parser.error("Config file is required for venv and conda environments") - if args.config: - config_file = Path(args.config) - has_yaml_suffix = args.config.endswith(".yaml") - template_name = None - - if not config_file.exists() and not has_yaml_suffix: - # check if this is a template - config_file = Path(REPO_ROOT) / "llama_stack" / "templates" / args.config / "run.yaml" - if config_file.exists(): - template_name = args.config - - if not config_file.exists() and not has_yaml_suffix: - # check if it's a build config saved to ~/.llama dir - config_file = Path(DISTRIBS_BASE_DIR / f"llamastack-{args.config}" / f"{args.config}-run.yaml") - - if not config_file.exists(): - self.parser.error( - f"File {str(config_file)} does not exist.\n\nPlease run `llama stack build` to generate (and optionally edit) a run.yaml file" - ) - - if not config_file.is_file(): - self.parser.error( - f"Config file must be a valid file path, '{config_file}' is not a file: type={type(config_file)}" - ) - + if config_file: logger.info(f"Using run configuration: {config_file}") try: @@ -138,8 +149,6 @@ class StackRun(Subcommand): self.parser.error(f"failed to parse config file '{config_file}':\n {e}") else: config = None - config_file = None - template_name = None # If neither image type nor image name is provided, assume the server should be run directly # using the current environment packages. @@ -172,10 +181,7 @@ class StackRun(Subcommand): run_args.extend([str(args.port)]) if config_file: - if template_name: - run_args.extend(["--template", str(template_name)]) - else: - run_args.extend(["--config", str(config_file)]) + run_args.extend(["--config", str(config_file)]) if args.env: for env_var in args.env: diff --git a/tests/integration/fixtures/common.py b/tests/integration/fixtures/common.py index 6d37d5341..27b2d6dc3 100644 --- a/tests/integration/fixtures/common.py +++ b/tests/integration/fixtures/common.py @@ -197,7 +197,7 @@ def llama_stack_client(request, provider_data): server_process = start_llama_stack_server(config_name) # Wait for server to be ready - if not wait_for_server_ready(base_url, timeout=30, process=server_process): + if not wait_for_server_ready(base_url, timeout=120, process=server_process): print("Server failed to start within timeout") server_process.terminate() raise RuntimeError(