fix docker

This commit is contained in:
Xi Yan 2024-09-18 14:13:57 -07:00
parent d3d66ba40b
commit 2ba8047013
8 changed files with 13 additions and 25 deletions

View file

@ -71,13 +71,12 @@ class StackBuild(Subcommand):
help="Name of the Llama Stack build to override from template config. This name will be used as paths to store configuration files, build conda environments/docker images. If not specified, will use the name from the template config. ",
)
# 1. llama stack build
# 2. llama stack build --list-templates
# 2. llama stack build --template <template_name> --name <name>
# 3. llama stack build --config <config_path>
self.parser.add_argument(
"--image-type",
type=str,
help="Image Type to use for the build. This can be either conda or docker. If not specified, will use conda by default",
default="conda",
)
def _run_stack_build_command_from_build_config(
self, build_config: BuildConfig
@ -135,7 +134,6 @@ class StackBuild(Subcommand):
headers = [
"Template Name",
"Providers",
"Image Type",
"Description",
]
@ -145,7 +143,6 @@ class StackBuild(Subcommand):
[
spec.name,
json.dumps(spec.distribution_spec.providers, indent=2),
spec.image_type,
spec.distribution_spec.description,
]
)
@ -182,6 +179,7 @@ class StackBuild(Subcommand):
with open(build_path, "r") as f:
build_config = BuildConfig(**yaml.safe_load(f))
build_config.name = args.name
build_config.image_type = args.image_type
self._run_stack_build_command_from_build_config(build_config)
return

View file

@ -28,4 +28,4 @@ set -x
podman run -it \
-v $host_build_dir:$container_build_dir \
$docker_image \
llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir
llama stack configure --config ./llamastack-build.yaml --output-dir $container_build_dir

View file

@ -1,4 +1,4 @@
name: local-conda
name: local
distribution_spec:
description: Use code from `llama_stack` itself to serve all llama stack APIs
providers:

View file

@ -1,10 +0,0 @@
name: local-docker
distribution_spec:
description: Use code from `llama_stack` itself to serve all llama stack APIs
providers:
inference: meta-reference
memory: meta-reference
safety: meta-reference
agents: meta-reference
telemetry: meta-reference
image_type: docker

View file

@ -1,4 +1,4 @@
name: local-fireworks-conda
name: local-fireworks
distribution_spec:
description: Use Fireworks.ai for running LLM inference
providers:

View file

@ -1,4 +1,4 @@
name: local-ollama-conda
name: local-ollama
distribution_spec:
description: Like local, but use ollama for running LLM inference
providers:

View file

@ -1,4 +1,4 @@
name: local-tgi-conda
name: local-tgi
distribution_spec:
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
providers:

View file

@ -1,4 +1,4 @@
name: local-tgi-conda
name: local-together
distribution_spec:
description: Use Together.ai for running LLM inference
providers: