diff --git a/docs/zero_to_hero_guide/README.md b/docs/zero_to_hero_guide/README.md index 9f756de26..96f9768de 100644 --- a/docs/zero_to_hero_guide/README.md +++ b/docs/zero_to_hero_guide/README.md @@ -86,11 +86,11 @@ If you're looking for more specific topics, we have a [Zero to Hero Guide](#next llama stack build --template ollama --image-type conda ``` **Expected Output:** - ``` + ```bash ... - Build Successful! Next steps: - 1. Set the environment variables: LLAMA_STACK_PORT, OLLAMA_URL, INFERENCE_MODEL, SAFETY_MODEL - 2. `llama stack run /Users//.llama/distributions/llamastack-ollama/ollama-run.yaml + Build Successful! + You can find the newly-built template here: ~/.llama/distributions/ollama/ollama-run.yaml + You can run the new Llama Stack Distro via: llama stack run ~/.llama/distributions/ollama/ollama-run.yaml --image-type conda ``` 3. **Set the ENV variables by exporting them to the terminal**: diff --git a/llama_stack/cli/stack/_build.py b/llama_stack/cli/stack/_build.py index 2787a93d5..f3a29b947 100644 --- a/llama_stack/cli/stack/_build.py +++ b/llama_stack/cli/stack/_build.py @@ -19,7 +19,7 @@ import yaml from prompt_toolkit import prompt from prompt_toolkit.completion import WordCompleter from prompt_toolkit.validation import Validator -from termcolor import cprint +from termcolor import colored, cprint from llama_stack.cli.stack.utils import ImageType from llama_stack.cli.table import print_table @@ -389,6 +389,11 @@ def _run_stack_build_command_from_build_config( shutil.copy(path, run_config_file) cprint("Build Successful!", color="green") + cprint("You can find the newly-built template here: " + colored(template_path, "light_blue")) + cprint( + "You can run the new Llama Stack distro via: " + + colored(f"llama stack run {template_path} --image-type {build_config.image_type}", "light_blue") + ) return template_path else: return _generate_run_config(build_config, build_dir, image_name)