diff --git a/llama_stack/cli/stack/build.py b/llama_stack/cli/stack/build.py index 5e32af494..d2c1bf324 100644 --- a/llama_stack/cli/stack/build.py +++ b/llama_stack/cli/stack/build.py @@ -71,13 +71,12 @@ class StackBuild(Subcommand): help="Name of the Llama Stack build to override from template config. This name will be used as paths to store configuration files, build conda environments/docker images. If not specified, will use the name from the template config. ", ) - # 1. llama stack build - - # 2. llama stack build --list-templates - - # 2. llama stack build --template --name - - # 3. llama stack build --config + self.parser.add_argument( + "--image-type", + type=str, + help="Image Type to use for the build. This can be either conda or docker. If not specified, will use conda by default", + default="conda", + ) def _run_stack_build_command_from_build_config( self, build_config: BuildConfig @@ -135,7 +134,6 @@ class StackBuild(Subcommand): headers = [ "Template Name", "Providers", - "Image Type", "Description", ] @@ -145,7 +143,6 @@ class StackBuild(Subcommand): [ spec.name, json.dumps(spec.distribution_spec.providers, indent=2), - spec.image_type, spec.distribution_spec.description, ] ) @@ -182,6 +179,7 @@ class StackBuild(Subcommand): with open(build_path, "r") as f: build_config = BuildConfig(**yaml.safe_load(f)) build_config.name = args.name + build_config.image_type = args.image_type self._run_stack_build_command_from_build_config(build_config) return diff --git a/llama_stack/distribution/configure_container.sh b/llama_stack/distribution/configure_container.sh index 68009f8b1..49ef439a9 100755 --- a/llama_stack/distribution/configure_container.sh +++ b/llama_stack/distribution/configure_container.sh @@ -28,4 +28,4 @@ set -x podman run -it \ -v $host_build_dir:$container_build_dir \ $docker_image \ - llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir + llama stack configure --config ./llamastack-build.yaml --output-dir $container_build_dir diff --git a/llama_stack/distribution/templates/local-conda-build.yaml b/llama_stack/distribution/templates/local-build.yaml similarity index 93% rename from llama_stack/distribution/templates/local-conda-build.yaml rename to llama_stack/distribution/templates/local-build.yaml index ba541502e..f10461256 100644 --- a/llama_stack/distribution/templates/local-conda-build.yaml +++ b/llama_stack/distribution/templates/local-build.yaml @@ -1,4 +1,4 @@ -name: local-conda +name: local distribution_spec: description: Use code from `llama_stack` itself to serve all llama stack APIs providers: diff --git a/llama_stack/distribution/templates/local-docker-build.yaml b/llama_stack/distribution/templates/local-docker-build.yaml deleted file mode 100644 index ebccd2cf2..000000000 --- a/llama_stack/distribution/templates/local-docker-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-docker -distribution_spec: - description: Use code from `llama_stack` itself to serve all llama stack APIs - providers: - inference: meta-reference - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: docker diff --git a/llama_stack/distribution/templates/local-fireworks-conda-build.yaml b/llama_stack/distribution/templates/local-fireworks-build.yaml similarity index 90% rename from llama_stack/distribution/templates/local-fireworks-conda-build.yaml rename to llama_stack/distribution/templates/local-fireworks-build.yaml index 9c4ff0bb4..33bdee3b5 100644 --- a/llama_stack/distribution/templates/local-fireworks-conda-build.yaml +++ b/llama_stack/distribution/templates/local-fireworks-build.yaml @@ -1,4 +1,4 @@ -name: local-fireworks-conda +name: local-fireworks distribution_spec: description: Use Fireworks.ai for running LLM inference providers: diff --git a/llama_stack/distribution/templates/local-ollama-conda-build.yaml b/llama_stack/distribution/templates/local-ollama-build.yaml similarity index 91% rename from llama_stack/distribution/templates/local-ollama-conda-build.yaml rename to llama_stack/distribution/templates/local-ollama-build.yaml index 0d27e4d6b..d9116b4b1 100644 --- a/llama_stack/distribution/templates/local-ollama-conda-build.yaml +++ b/llama_stack/distribution/templates/local-ollama-build.yaml @@ -1,4 +1,4 @@ -name: local-ollama-conda +name: local-ollama distribution_spec: description: Like local, but use ollama for running LLM inference providers: diff --git a/llama_stack/distribution/templates/local-tgi-conda-build.yaml b/llama_stack/distribution/templates/local-tgi-build.yaml similarity index 94% rename from llama_stack/distribution/templates/local-tgi-conda-build.yaml rename to llama_stack/distribution/templates/local-tgi-build.yaml index 9a6fce105..e764aef8c 100644 --- a/llama_stack/distribution/templates/local-tgi-conda-build.yaml +++ b/llama_stack/distribution/templates/local-tgi-build.yaml @@ -1,4 +1,4 @@ -name: local-tgi-conda +name: local-tgi distribution_spec: description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint). providers: diff --git a/llama_stack/distribution/templates/local-together-conda-build.yaml b/llama_stack/distribution/templates/local-together-build.yaml similarity index 91% rename from llama_stack/distribution/templates/local-together-conda-build.yaml rename to llama_stack/distribution/templates/local-together-build.yaml index 8cf867971..1ab891518 100644 --- a/llama_stack/distribution/templates/local-together-conda-build.yaml +++ b/llama_stack/distribution/templates/local-together-build.yaml @@ -1,4 +1,4 @@ -name: local-tgi-conda +name: local-together distribution_spec: description: Use Together.ai for running LLM inference providers: