mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-30 07:39:38 +00:00
fix docker
This commit is contained in:
parent
d3d66ba40b
commit
2ba8047013
8 changed files with 13 additions and 25 deletions
|
@ -71,13 +71,12 @@ class StackBuild(Subcommand):
|
||||||
help="Name of the Llama Stack build to override from template config. This name will be used as paths to store configuration files, build conda environments/docker images. If not specified, will use the name from the template config. ",
|
help="Name of the Llama Stack build to override from template config. This name will be used as paths to store configuration files, build conda environments/docker images. If not specified, will use the name from the template config. ",
|
||||||
)
|
)
|
||||||
|
|
||||||
# 1. llama stack build
|
self.parser.add_argument(
|
||||||
|
"--image-type",
|
||||||
# 2. llama stack build --list-templates
|
type=str,
|
||||||
|
help="Image Type to use for the build. This can be either conda or docker. If not specified, will use conda by default",
|
||||||
# 2. llama stack build --template <template_name> --name <name>
|
default="conda",
|
||||||
|
)
|
||||||
# 3. llama stack build --config <config_path>
|
|
||||||
|
|
||||||
def _run_stack_build_command_from_build_config(
|
def _run_stack_build_command_from_build_config(
|
||||||
self, build_config: BuildConfig
|
self, build_config: BuildConfig
|
||||||
|
@ -135,7 +134,6 @@ class StackBuild(Subcommand):
|
||||||
headers = [
|
headers = [
|
||||||
"Template Name",
|
"Template Name",
|
||||||
"Providers",
|
"Providers",
|
||||||
"Image Type",
|
|
||||||
"Description",
|
"Description",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -145,7 +143,6 @@ class StackBuild(Subcommand):
|
||||||
[
|
[
|
||||||
spec.name,
|
spec.name,
|
||||||
json.dumps(spec.distribution_spec.providers, indent=2),
|
json.dumps(spec.distribution_spec.providers, indent=2),
|
||||||
spec.image_type,
|
|
||||||
spec.distribution_spec.description,
|
spec.distribution_spec.description,
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -182,6 +179,7 @@ class StackBuild(Subcommand):
|
||||||
with open(build_path, "r") as f:
|
with open(build_path, "r") as f:
|
||||||
build_config = BuildConfig(**yaml.safe_load(f))
|
build_config = BuildConfig(**yaml.safe_load(f))
|
||||||
build_config.name = args.name
|
build_config.name = args.name
|
||||||
|
build_config.image_type = args.image_type
|
||||||
self._run_stack_build_command_from_build_config(build_config)
|
self._run_stack_build_command_from_build_config(build_config)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
|
@ -28,4 +28,4 @@ set -x
|
||||||
podman run -it \
|
podman run -it \
|
||||||
-v $host_build_dir:$container_build_dir \
|
-v $host_build_dir:$container_build_dir \
|
||||||
$docker_image \
|
$docker_image \
|
||||||
llama stack configure ./llamastack-build.yaml --output-dir $container_build_dir
|
llama stack configure --config ./llamastack-build.yaml --output-dir $container_build_dir
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
name: local-conda
|
name: local
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
||||||
providers:
|
providers:
|
|
@ -1,10 +0,0 @@
|
||||||
name: local-docker
|
|
||||||
distribution_spec:
|
|
||||||
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
|
||||||
providers:
|
|
||||||
inference: meta-reference
|
|
||||||
memory: meta-reference
|
|
||||||
safety: meta-reference
|
|
||||||
agents: meta-reference
|
|
||||||
telemetry: meta-reference
|
|
||||||
image_type: docker
|
|
|
@ -1,4 +1,4 @@
|
||||||
name: local-fireworks-conda
|
name: local-fireworks
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Use Fireworks.ai for running LLM inference
|
description: Use Fireworks.ai for running LLM inference
|
||||||
providers:
|
providers:
|
|
@ -1,4 +1,4 @@
|
||||||
name: local-ollama-conda
|
name: local-ollama
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Like local, but use ollama for running LLM inference
|
description: Like local, but use ollama for running LLM inference
|
||||||
providers:
|
providers:
|
|
@ -1,4 +1,4 @@
|
||||||
name: local-tgi-conda
|
name: local-tgi
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
||||||
providers:
|
providers:
|
|
@ -1,4 +1,4 @@
|
||||||
name: local-tgi-conda
|
name: local-together
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Use Together.ai for running LLM inference
|
description: Use Together.ai for running LLM inference
|
||||||
providers:
|
providers:
|
Loading…
Add table
Add a link
Reference in a new issue