From 4349d5b1c411e31002a19b9eb831d210aa9e865c Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Mon, 28 Oct 2024 22:24:19 -0700 Subject: [PATCH] Update broken references --- docs/building_distro.md | 6 +++--- docs/cli_reference.md | 2 +- docs/getting_started.ipynb | 1 - docs/getting_started.md | 1 - docs/source/cli_reference.md | 6 +++--- docs/source/getting_started.md | 5 ++--- llama_stack/cli/stack/build.py | 1 + llama_stack/cli/stack/configure.py | 19 +------------------ 8 files changed, 11 insertions(+), 30 deletions(-) diff --git a/docs/building_distro.md b/docs/building_distro.md index 483703064..234c553da 100644 --- a/docs/building_distro.md +++ b/docs/building_distro.md @@ -58,11 +58,11 @@ llama stack build --list-templates You may then pick a template to build your distribution with providers fitted to your liking. ``` -llama stack build --template tgi --name tgi +llama stack build --template tgi ``` ``` -$ llama stack build --template tgi --name tgi +$ llama stack build --template tgi ... ... Build spec configuration saved at ~/.conda/envs/llamastack-tgi/tgi-build.yaml @@ -77,7 +77,7 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e ``` $ cat llama_stack/templates/ollama/build.yaml -name: local-ollama +name: ollama distribution_spec: description: Like local, but use ollama for running LLM inference providers: diff --git a/docs/cli_reference.md b/docs/cli_reference.md index 5ec08282a..39ac99615 100644 --- a/docs/cli_reference.md +++ b/docs/cli_reference.md @@ -298,7 +298,7 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e ``` $ cat build.yaml -name: local-ollama +name: ollama distribution_spec: description: Like local, but use ollama for running LLM inference providers: diff --git a/docs/getting_started.ipynb b/docs/getting_started.ipynb index 00633b1d3..c8fc63e5d 100644 --- a/docs/getting_started.ipynb +++ b/docs/getting_started.ipynb @@ -68,7 +68,6 @@ "```\n", "$ llama stack configure llamastack-local-gpu\n", "\n", - "Could not find llamastack-local-gpu. Trying conda build name instead...\n", "Could not find /home/hjshah/.conda/envs/llamastack-llamastack-local-gpu/llamastack-local-gpu-build.yaml. Trying docker image name instead...\n", "+ podman run --network host -it -v /home/hjshah/.llama/builds/docker:/app/builds llamastack-local-gpu llama stack configure ./llamastack-build.yaml --output-dir /app/builds\n", "\n", diff --git a/docs/getting_started.md b/docs/getting_started.md index bb4e76cf2..49c7cd5a0 100644 --- a/docs/getting_started.md +++ b/docs/getting_started.md @@ -97,7 +97,6 @@ docker run -it -p 5000:5000 -v ~/.llama:/root/.llama -v ./run.yaml:/root/my-run. ``` $ llama stack configure my-local-stack - Could not find my-local-stack. Trying conda build name instead... Configuring API `inference`... === Configuring provider `meta-reference` for API inference... Enter value for model (default: Llama3.1-8B-Instruct) (required): diff --git a/docs/source/cli_reference.md b/docs/source/cli_reference.md index a0cd3219e..81da1a773 100644 --- a/docs/source/cli_reference.md +++ b/docs/source/cli_reference.md @@ -296,9 +296,9 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e - The config file will be of contents like the ones in `llama_stack/distributions/templates/`. ``` -$ cat llama_stack/distribution/templates/local-ollama-build.yaml +$ cat llama_stack/templates/ollama/build.yaml -name: local-ollama +name: ollama distribution_spec: description: Like local, but use ollama for running LLM inference providers: @@ -311,7 +311,7 @@ image_type: conda ``` ``` -llama stack build --config llama_stack/distribution/templates/local-ollama-build.yaml +llama stack build --config llama_stack/templates/ollama/build.yaml ``` #### How to build distribution with Docker image diff --git a/docs/source/getting_started.md b/docs/source/getting_started.md index a8f1a5f01..b1450cd42 100644 --- a/docs/source/getting_started.md +++ b/docs/source/getting_started.md @@ -86,7 +86,6 @@ llama stack configure ``` $ llama stack configure my-local-stack -Could not find my-local-stack. Trying conda build name instead... Configuring API `inference`... === Configuring provider `meta-reference` for API inference... Enter value for model (default: Llama3.1-8B-Instruct) (required): @@ -235,9 +234,9 @@ You may now run `llama stack configure tgi` or `llama stack configure ~/.conda/e - The config file will be of contents like the ones in `llama_stack/distributions/templates/`. ``` -$ cat llama_stack/distribution/templates/local-ollama-build.yaml +$ cat llama_stack/templates/ollama/build.yaml -name: local-ollama +name: ollama distribution_spec: description: Like local, but use ollama for running LLM inference providers: diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py index 77486b022..0ba39265b 100644 --- a/llama_stack/cli/stack/build.py +++ b/llama_stack/cli/stack/build.py @@ -67,6 +67,7 @@ class StackBuild(Subcommand): type=str, help="Image Type to use for the build. This can be either conda or docker. If not specified, will use the image type from the template config.", choices=["conda", "docker"], + default="conda", ) def _run_stack_build_command(self, args: argparse.Namespace) -> None: diff --git a/llama_stack/cli/stack/configure.py b/llama_stack/cli/stack/configure.py index 9ec3b4357..779bb90fc 100644 --- a/llama_stack/cli/stack/configure.py +++ b/llama_stack/cli/stack/configure.py @@ -55,19 +55,12 @@ class StackConfigure(Subcommand): docker_image = None build_config_file = Path(args.config) - if build_config_file.exists(): with open(build_config_file, "r") as f: build_config = BuildConfig(**yaml.safe_load(f)) self._configure_llama_distribution(build_config, args.output_dir) return - # if we get here, we need to try to find the conda build config file - cprint( - f"Could not find {build_config_file}. Trying conda build name instead...", - color="green", - ) - conda_dir = ( Path(os.path.expanduser("~/.conda/envs")) / f"llamastack-{args.config}" ) @@ -80,19 +73,14 @@ class StackConfigure(Subcommand): break build_config_file = Path(conda_dir) / f"{args.config}-build.yaml" - if build_config_file.exists(): with open(build_config_file, "r") as f: build_config = BuildConfig(**yaml.safe_load(f)) + cprint(f"Using {build_config_file}...", "green") self._configure_llama_distribution(build_config, args.output_dir) return - # if we get here, we need to try to find the docker image - cprint( - f"Could not find {build_config_file}. Trying docker image name instead...", - color="green", - ) docker_image = args.config builds_dir = BUILDS_BASE_DIR / ImageType.docker.value if args.output_dir: @@ -105,15 +93,10 @@ class StackConfigure(Subcommand): script_args = [script, docker_image, str(builds_dir)] return_code = run_with_pty(script_args) - - # we have regenerated the build config file with script, now check if it exists if return_code != 0: self.parser.error( f"Failed to configure container {docker_image} with return code {return_code}. Please run `llama stack build` first. " ) - return - - return def _configure_llama_distribution( self,