mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-24 08:47:26 +00:00
As part of the build process, we now include the generated run.yaml
(based of the provided build configuration file) into the container. We
updated the entrypoint to use this run configuration as well.
Given this simple distribution configuration:
```
# build.yaml
version: '2'
distribution_spec:
description: Use (an external) Ollama server for running LLM inference
providers:
inference:
- remote::ollama
vector_io:
- inline::faiss
safety:
- inline::llama-guard
agents:
- inline::meta-reference
telemetry:
- inline::meta-reference
eval:
- inline::meta-reference
datasetio:
- remote::huggingface
- inline::localfs
scoring:
- inline::basic
- inline::llm-as-judge
- inline::braintrust
tool_runtime:
- remote::brave-search
- remote::tavily-search
- inline::code-interpreter
- inline::rag-runtime
- remote::model-context-protocol
- remote::wolfram-alpha
container_image: "registry.access.redhat.com/ubi9"
image_type: container
image_name: test
```
Build it:
```
llama stack build --config build.yaml
```
Run it:
```
podman run --rm \
-p 8321:8321 \
-e OLLAMA_URL=http://host.containers.internal:11434 \
--name llama-stack-server \
localhost/leseb-test:0.2.2
```
Signed-off-by: Sébastien Han <seb@redhat.com>
40 lines
1.3 KiB
Python
40 lines
1.3 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from pathlib import Path
|
|
|
|
from llama_stack.cli.stack._build import (
|
|
_run_stack_build_command_from_build_config,
|
|
)
|
|
from llama_stack.distribution.datatypes import BuildConfig, DistributionSpec
|
|
from llama_stack.distribution.utils.image_types import LlamaStackImageType
|
|
|
|
|
|
def test_container_build_passes_path(monkeypatch, tmp_path):
|
|
called_with = {}
|
|
|
|
def spy_build_image(cfg, build_file_path, image_name, template_or_config, run_config=None):
|
|
called_with["path"] = template_or_config
|
|
called_with["run_config"] = run_config
|
|
return 0
|
|
|
|
monkeypatch.setattr(
|
|
"llama_stack.cli.stack._build.build_image",
|
|
spy_build_image,
|
|
raising=True,
|
|
)
|
|
|
|
cfg = BuildConfig(
|
|
image_type=LlamaStackImageType.CONTAINER.value,
|
|
distribution_spec=DistributionSpec(providers={}, description=""),
|
|
)
|
|
|
|
_run_stack_build_command_from_build_config(cfg, image_name="dummy")
|
|
|
|
assert "path" in called_with
|
|
assert isinstance(called_with["path"], str)
|
|
assert Path(called_with["path"]).exists()
|
|
assert called_with["run_config"] is None
|