From 13871d5a5b1d589cb1739a11f1078b446be4672a Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Mon, 16 Sep 2024 10:56:05 -0700 Subject: [PATCH] address nits, remove docker_image=null --- llama_toolchain/cli/stack/build.py | 6 ++---- .../distributions/conda/local-conda-example-build.yaml | 1 - .../conda/local-fireworks-conda-example-build.yaml | 1 - .../conda/local-ollama-conda-example-build.yaml | 1 - .../distributions/conda/local-tgi-conda-example-build.yaml | 1 - .../conda/local-together-conda-example-build.yaml | 1 - .../distributions/distribution_registry/local-ollama.yaml | 1 - .../local-plus-fireworks-inference.yaml | 1 - .../distribution_registry/local-plus-tgi-inference.yaml | 1 - .../local-plus-together-inference.yaml | 1 - .../configs/distributions/distribution_registry/local.yaml | 1 - .../distributions/docker/local-docker-example-build.yaml | 1 - llama_toolchain/core/configure_container.sh | 5 ++++- 13 files changed, 6 insertions(+), 16 deletions(-) diff --git a/llama_toolchain/cli/stack/build.py b/llama_toolchain/cli/stack/build.py index e13cd10fb..36cd480fc 100644 --- a/llama_toolchain/cli/stack/build.py +++ b/llama_toolchain/cli/stack/build.py @@ -29,13 +29,13 @@ class StackBuild(Subcommand): self.parser.add_argument( "config", type=str, - help="Path to a config file to use for the build", + help="Path to a config file to use for the build. You may find example configs in llama_toolchain/configs/distributions", ) self.parser.add_argument( "--name", type=str, - help="Override the name of the llama stack build", + help="Name of the llama stack build to override from template config", ) def _run_stack_build_command_from_build_config( @@ -92,5 +92,3 @@ class StackBuild(Subcommand): if args.name: build_config.name = args.name self._run_stack_build_command_from_build_config(build_config) - - return diff --git a/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml index cdb5243ca..2a25cb9dd 100644 --- a/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-conda-example-build.yaml @@ -1,7 +1,6 @@ name: local-conda-example distribution_spec: description: Use code from `llama_toolchain` itself to serve all llama stack APIs - docker_image: null providers: inference: meta-reference memory: meta-reference-faiss diff --git a/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml index 4843c5c55..c3b38aebe 100644 --- a/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-fireworks-conda-example-build.yaml @@ -1,7 +1,6 @@ name: local-fireworks-conda-example distribution_spec: description: Use Fireworks.ai for running LLM inference - docker_image: null providers: inference: remote::fireworks memory: meta-reference-faiss diff --git a/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml index b464a7151..31bc9d0e9 100644 --- a/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-ollama-conda-example-build.yaml @@ -1,7 +1,6 @@ name: local-ollama-conda-example distribution_spec: description: Like local, but use ollama for running LLM inference - docker_image: null providers: inference: remote::ollama memory: meta-reference-faiss diff --git a/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml index 0f5490192..1ac6f44ba 100644 --- a/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-tgi-conda-example-build.yaml @@ -1,7 +1,6 @@ name: local-tgi-conda-example distribution_spec: description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). - docker_image: null providers: inference: remote::tgi memory: meta-reference-faiss diff --git a/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml b/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml index 2cefb55dc..4aa13fed5 100644 --- a/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml +++ b/llama_toolchain/configs/distributions/conda/local-together-conda-example-build.yaml @@ -1,7 +1,6 @@ name: local-tgi-conda-example distribution_spec: description: Use Together.ai for running LLM inference - docker_image: null providers: inference: remote::together memory: meta-reference-faiss diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml index 134dc538c..4518a7eb7 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-ollama.yaml @@ -1,5 +1,4 @@ description: Like local, but use ollama for running LLM inference -docker_image: null providers: inference: remote::ollama safety: meta-reference diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml index 0dc8ae19b..520274e1c 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-plus-fireworks-inference.yaml @@ -1,5 +1,4 @@ description: Use Fireworks.ai for running LLM inference -docker_image: null providers: inference: remote::fireworks safety: meta-reference diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml index 37eff0cc9..a0b36b07e 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-plus-tgi-inference.yaml @@ -1,5 +1,4 @@ description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). -docker_image: null providers: inference: remote::tgi safety: meta-reference diff --git a/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml b/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml index 198d7e2fd..2681b619c 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local-plus-together-inference.yaml @@ -1,5 +1,4 @@ description: Use Together.ai for running LLM inference -docker_image: null providers: inference: remote::together safety: meta-reference diff --git a/llama_toolchain/configs/distributions/distribution_registry/local.yaml b/llama_toolchain/configs/distributions/distribution_registry/local.yaml index 3c5f57d69..03dfb1dab 100644 --- a/llama_toolchain/configs/distributions/distribution_registry/local.yaml +++ b/llama_toolchain/configs/distributions/distribution_registry/local.yaml @@ -1,5 +1,4 @@ description: Use code from `llama_toolchain` itself to serve all llama stack APIs -docker_image: null providers: inference: meta-reference memory: meta-reference-faiss diff --git a/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml b/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml index 200fab607..0bdb18802 100644 --- a/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml +++ b/llama_toolchain/configs/distributions/docker/local-docker-example-build.yaml @@ -1,7 +1,6 @@ name: local-docker-example distribution_spec: description: Use code from `llama_toolchain` itself to serve all llama stack APIs - docker_image: null providers: inference: meta-reference memory: meta-reference-faiss diff --git a/llama_toolchain/core/configure_container.sh b/llama_toolchain/core/configure_container.sh index 5a25a475f..68009f8b1 100755 --- a/llama_toolchain/core/configure_container.sh +++ b/llama_toolchain/core/configure_container.sh @@ -25,4 +25,7 @@ host_build_dir="$2" container_build_dir="/app/builds" set -x -podman run -it -v $host_build_dir:$container_build_dir $docker_image llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir +podman run -it \ + -v $host_build_dir:$container_build_dir \ + $docker_image \ + llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir