mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 10:54:19 +00:00
Extend shorthand support for the llama stack run
command (#465)
**Summary:** Extend the shorthand run command so it can run successfully when config exists under DISTRIBS_BASE_DIR (i.e. ~/.llama/distributions). For example, imagine you created a new stack using the `llama stack build` command where you named it "my-awesome-llama-stack". ``` $ llama stack build > Enter a name for your Llama Stack (e.g. my-local-stack): my-awesome-llama-stack ``` To run the stack you created you will have to use long config path: ``` llama stack run ~/.llama/distributions/llamastack-my-awesome-llama-stack/my-awesome-llama-stack-run.yaml ``` With this change, you can start it using the stack name instead of full path: ``` llama stack run my-awesome-llama-stack ``` **Test Plan:** Verify command fails when stack doesn't exist ``` python3 -m llama_stack.cli.llama stack run my-test-stack ``` Output [FAILURE] ``` usage: llama stack run [-h] [--port PORT] [--disable-ipv6] config llama stack run: error: File /Users/vladimirivic/.llama/distributions/llamastack-my-test-stack/my-test-stack-run.yaml does not exist. Please run `llama stack build` to generate (and optionally edit) a run.yaml file ``` Create a new stack using `llama stack build`. Name it `my-test-stack`. Verify command runs successfully ``` python3 -m llama_stack.cli.llama stack run my-test-stack ``` Output [SUCCESS] ``` Listening on ['::', '0.0.0.0']:5000 INFO: Started server process [80146] INFO: Waiting for application startup. INFO: Application startup complete. INFO: Uvicorn running on http://['::', '0.0.0.0']:5000 (Press CTRL+C to quit) ```
This commit is contained in:
parent
57bafd0f8c
commit
f1b9578f8d
1 changed files with 12 additions and 1 deletions
|
@ -48,7 +48,10 @@ class StackRun(Subcommand):
|
||||||
|
|
||||||
from llama_stack.distribution.build import ImageType
|
from llama_stack.distribution.build import ImageType
|
||||||
from llama_stack.distribution.configure import parse_and_maybe_upgrade_config
|
from llama_stack.distribution.configure import parse_and_maybe_upgrade_config
|
||||||
from llama_stack.distribution.utils.config_dirs import BUILDS_BASE_DIR
|
from llama_stack.distribution.utils.config_dirs import (
|
||||||
|
BUILDS_BASE_DIR,
|
||||||
|
DISTRIBS_BASE_DIR,
|
||||||
|
)
|
||||||
from llama_stack.distribution.utils.exec import run_with_pty
|
from llama_stack.distribution.utils.exec import run_with_pty
|
||||||
|
|
||||||
if not args.config:
|
if not args.config:
|
||||||
|
@ -68,6 +71,14 @@ class StackRun(Subcommand):
|
||||||
BUILDS_BASE_DIR / ImageType.docker.value / f"{args.config}-run.yaml"
|
BUILDS_BASE_DIR / ImageType.docker.value / f"{args.config}-run.yaml"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if not config_file.exists() and not args.config.endswith(".yaml"):
|
||||||
|
# check if it's a build config saved to ~/.llama dir
|
||||||
|
config_file = Path(
|
||||||
|
DISTRIBS_BASE_DIR
|
||||||
|
/ f"llamastack-{args.config}"
|
||||||
|
/ f"{args.config}-run.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
if not config_file.exists():
|
if not config_file.exists():
|
||||||
self.parser.error(
|
self.parser.error(
|
||||||
f"File {str(config_file)} does not exist. Please run `llama stack build` to generate (and optionally edit) a run.yaml file"
|
f"File {str(config_file)} does not exist. Please run `llama stack build` to generate (and optionally edit) a run.yaml file"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue