mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-05 18:27:22 +00:00
dockerfile
# What does this PR do? ## Test Plan
This commit is contained in:
parent
f675fdda0f
commit
a9ff8c5c9f
9 changed files with 288 additions and 392 deletions
19
.dockerignore
Normal file
19
.dockerignore
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
.venv
|
||||||
|
__pycache__
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.pyd
|
||||||
|
*.so
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
htmlcov*
|
||||||
|
.coverage
|
||||||
|
coverage*
|
||||||
|
.cache
|
||||||
|
.mypy_cache
|
||||||
|
.pytest_cache
|
||||||
|
.ruff_cache
|
||||||
|
uv.lock
|
||||||
|
node_modules
|
||||||
|
build
|
||||||
|
/tmp
|
||||||
7
.github/workflows/install-script-ci.yml
vendored
7
.github/workflows/install-script-ci.yml
vendored
|
|
@ -30,8 +30,11 @@ jobs:
|
||||||
|
|
||||||
- name: Build a single provider
|
- name: Build a single provider
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync \
|
docker build . \
|
||||||
llama stack build --template starter --image-type container --image-name test
|
-f containers/Containerfile \
|
||||||
|
--build-arg INSTALL_MODE=editable \
|
||||||
|
--build-arg DISTRO_NAME=starter \
|
||||||
|
--tag llama-stack:starter-ci
|
||||||
|
|
||||||
- name: Run installer end-to-end
|
- name: Run installer end-to-end
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
63
.github/workflows/providers-build.yml
vendored
63
.github/workflows/providers-build.yml
vendored
|
|
@ -14,6 +14,8 @@ on:
|
||||||
- '.github/workflows/providers-build.yml'
|
- '.github/workflows/providers-build.yml'
|
||||||
- 'llama_stack/distributions/**'
|
- 'llama_stack/distributions/**'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
|
- 'containers/Containerfile'
|
||||||
|
- '.dockerignore'
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
|
|
@ -24,6 +26,8 @@ on:
|
||||||
- '.github/workflows/providers-build.yml'
|
- '.github/workflows/providers-build.yml'
|
||||||
- 'llama_stack/distributions/**'
|
- 'llama_stack/distributions/**'
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
|
- 'containers/Containerfile'
|
||||||
|
- '.dockerignore'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
||||||
|
|
@ -60,15 +64,25 @@ jobs:
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
uses: ./.github/actions/setup-runner
|
uses: ./.github/actions/setup-runner
|
||||||
|
|
||||||
- name: Print build dependencies
|
- name: List build dependencies
|
||||||
run: |
|
run: |
|
||||||
uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
|
uv run llama stack list-deps ${{ matrix.distro }} | tee deps.txt
|
||||||
|
|
||||||
- name: Run Llama Stack Build
|
- name: Install distribution into venv
|
||||||
|
if: matrix.image-type == 'venv'
|
||||||
run: |
|
run: |
|
||||||
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
if [ -s deps.txt ]; then
|
||||||
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
xargs -L1 uv pip install < deps.txt
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test
|
fi
|
||||||
|
|
||||||
|
- name: Build container image
|
||||||
|
if: matrix.image-type == 'container'
|
||||||
|
run: |
|
||||||
|
docker build . \
|
||||||
|
-f containers/Containerfile \
|
||||||
|
--build-arg INSTALL_MODE=editable \
|
||||||
|
--build-arg DISTRO_NAME=${{ matrix.distro }} \
|
||||||
|
--tag llama-stack:${{ matrix.distro }}-ci
|
||||||
|
|
||||||
- name: Print dependencies in the image
|
- name: Print dependencies in the image
|
||||||
if: matrix.image-type == 'venv'
|
if: matrix.image-type == 'venv'
|
||||||
|
|
@ -86,8 +100,8 @@ jobs:
|
||||||
|
|
||||||
- name: Build a single provider
|
- name: Build a single provider
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama
|
uv pip install -e .
|
||||||
|
uv run --no-sync llama stack list-deps --providers inference=remote::ollama | xargs -L1 uv pip install
|
||||||
build-custom-container-distribution:
|
build-custom-container-distribution:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
|
@ -97,11 +111,16 @@ jobs:
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
uses: ./.github/actions/setup-runner
|
uses: ./.github/actions/setup-runner
|
||||||
|
|
||||||
- name: Build a single provider
|
- name: Build container image
|
||||||
run: |
|
run: |
|
||||||
yq -i '.image_type = "container"' llama_stack/distributions/ci-tests/build.yaml
|
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' llama_stack/distributions/ci-tests/build.yaml)
|
||||||
yq -i '.image_name = "test"' llama_stack/distributions/ci-tests/build.yaml
|
docker build . \
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
|
-f containers/Containerfile \
|
||||||
|
--build-arg INSTALL_MODE=editable \
|
||||||
|
--build-arg DISTRO_NAME=ci-tests \
|
||||||
|
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||||
|
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
|
||||||
|
-t llama-stack:ci-tests
|
||||||
|
|
||||||
- name: Inspect the container image entrypoint
|
- name: Inspect the container image entrypoint
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -112,7 +131,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||||
echo "Entrypoint: $entrypoint"
|
echo "Entrypoint: $entrypoint"
|
||||||
if [ "$entrypoint" != "[llama stack run /app/run.yaml]" ]; then
|
if [ "$entrypoint" != "[/usr/local/bin/llama-stack-entrypoint.sh]" ]; then
|
||||||
echo "Entrypoint is not correct"
|
echo "Entrypoint is not correct"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
@ -129,17 +148,19 @@ jobs:
|
||||||
- name: Pin distribution to UBI9 base
|
- name: Pin distribution to UBI9 base
|
||||||
run: |
|
run: |
|
||||||
yq -i '
|
yq -i '
|
||||||
.image_type = "container" |
|
|
||||||
.image_name = "ubi9-test" |
|
|
||||||
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
|
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
|
||||||
' llama_stack/distributions/ci-tests/build.yaml
|
' llama_stack/distributions/ci-tests/build.yaml
|
||||||
|
|
||||||
- name: Build dev container (UBI9)
|
- name: Build UBI9 container image
|
||||||
env:
|
|
||||||
USE_COPY_NOT_MOUNT: "true"
|
|
||||||
LLAMA_STACK_DIR: "."
|
|
||||||
run: |
|
run: |
|
||||||
uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
|
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' llama_stack/distributions/ci-tests/build.yaml)
|
||||||
|
docker build . \
|
||||||
|
-f containers/Containerfile \
|
||||||
|
--build-arg INSTALL_MODE=editable \
|
||||||
|
--build-arg DISTRO_NAME=ci-tests \
|
||||||
|
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||||
|
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
|
||||||
|
-t llama-stack:ci-tests-ubi9
|
||||||
|
|
||||||
- name: Inspect UBI9 image
|
- name: Inspect UBI9 image
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -150,7 +171,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||||
echo "Entrypoint: $entrypoint"
|
echo "Entrypoint: $entrypoint"
|
||||||
if [ "$entrypoint" != "[llama stack run /app/run.yaml]" ]; then
|
if [ "$entrypoint" != "[/usr/local/bin/llama-stack-entrypoint.sh]" ]; then
|
||||||
echo "Entrypoint is not correct"
|
echo "Entrypoint is not correct"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -46,9 +46,9 @@ jobs:
|
||||||
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml
|
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml
|
||||||
cat tests/external/ramalama-stack/run.yaml
|
cat tests/external/ramalama-stack/run.yaml
|
||||||
|
|
||||||
- name: Build distro from config file
|
- name: Install distribution dependencies
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/ramalama-stack/build.yaml
|
uv run llama stack list-deps tests/external/ramalama-stack/build.yaml | xargs -L1 uv pip install
|
||||||
|
|
||||||
- name: Start Llama Stack server in background
|
- name: Start Llama Stack server in background
|
||||||
if: ${{ matrix.image-type }} == 'venv'
|
if: ${{ matrix.image-type }} == 'venv'
|
||||||
|
|
|
||||||
2
.github/workflows/test-external.yml
vendored
2
.github/workflows/test-external.yml
vendored
|
|
@ -44,7 +44,7 @@ jobs:
|
||||||
|
|
||||||
- name: Print distro dependencies
|
- name: Print distro dependencies
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync llama stack list-deps tests/external/build.yaml
|
uv run --no-sync llama stack list-deps tests/external/build.yaml
|
||||||
|
|
||||||
- name: Build distro from config file
|
- name: Build distro from config file
|
||||||
run: |
|
run: |
|
||||||
|
|
|
||||||
137
containers/Containerfile
Normal file
137
containers/Containerfile
Normal file
|
|
@ -0,0 +1,137 @@
|
||||||
|
# syntax=docker/dockerfile:1.6
|
||||||
|
#
|
||||||
|
# This Dockerfile is used to build the Llama Stack container image.
|
||||||
|
# Example:
|
||||||
|
# docker build \
|
||||||
|
# -f containers/Containerfile \
|
||||||
|
# --build-arg DISTRO_NAME=starter \
|
||||||
|
# --tag llama-stack:starter .
|
||||||
|
|
||||||
|
ARG BASE_IMAGE=python:3.12-slim
|
||||||
|
FROM ${BASE_IMAGE}
|
||||||
|
|
||||||
|
ARG UV_HTTP_TIMEOUT=500
|
||||||
|
ENV UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT}
|
||||||
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
RUN set -eux; \
|
||||||
|
if command -v dnf >/dev/null 2>&1; then \
|
||||||
|
dnf -y update && \
|
||||||
|
dnf install -y iputils git net-tools wget \
|
||||||
|
vim-minimal python3.12 python3.12-pip python3.12-wheel \
|
||||||
|
python3.12-setuptools python3.12-devel gcc gcc-c++ make && \
|
||||||
|
ln -sf /usr/bin/pip3.12 /usr/local/bin/pip && \
|
||||||
|
ln -sf /usr/bin/python3.12 /usr/local/bin/python && \
|
||||||
|
dnf clean all; \
|
||||||
|
elif command -v apt-get >/dev/null 2>&1; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
iputils-ping net-tools iproute2 dnsutils telnet \
|
||||||
|
curl wget git procps psmisc lsof traceroute bubblewrap \
|
||||||
|
gcc g++ && \
|
||||||
|
rm -rf /var/lib/apt/lists/*; \
|
||||||
|
else \
|
||||||
|
echo "Unsupported base image: expected dnf or apt-get" >&2; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
RUN pip install --no-cache-dir uv
|
||||||
|
ENV UV_SYSTEM_PYTHON=1
|
||||||
|
|
||||||
|
ARG INSTALL_MODE="pypi"
|
||||||
|
ARG LLAMA_STACK_DIR="/workspace"
|
||||||
|
ARG LLAMA_STACK_CLIENT_DIR=""
|
||||||
|
ARG PYPI_VERSION=""
|
||||||
|
ARG TEST_PYPI_VERSION=""
|
||||||
|
ARG KEEP_WORKSPACE=""
|
||||||
|
ARG DISTRO_NAME="starter"
|
||||||
|
ARG RUN_CONFIG_PATH=""
|
||||||
|
|
||||||
|
ENV INSTALL_MODE=${INSTALL_MODE}
|
||||||
|
ENV LLAMA_STACK_DIR=${LLAMA_STACK_DIR}
|
||||||
|
ENV LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR}
|
||||||
|
ENV PYPI_VERSION=${PYPI_VERSION}
|
||||||
|
ENV TEST_PYPI_VERSION=${TEST_PYPI_VERSION}
|
||||||
|
ENV KEEP_WORKSPACE=${KEEP_WORKSPACE}
|
||||||
|
ENV DISTRO_NAME=${DISTRO_NAME}
|
||||||
|
ENV RUN_CONFIG_PATH=${RUN_CONFIG_PATH}
|
||||||
|
|
||||||
|
# Copy the repository so editable installs and run configurations are available.
|
||||||
|
COPY . /workspace
|
||||||
|
|
||||||
|
# Install llama-stack
|
||||||
|
RUN set -eux; \
|
||||||
|
if [ "$INSTALL_MODE" = "editable" ]; then \
|
||||||
|
if [ ! -d "$LLAMA_STACK_DIR" ]; then \
|
||||||
|
echo "INSTALL_MODE=editable requires LLAMA_STACK_DIR to point to a directory inside the build context" >&2; \
|
||||||
|
exit 1; \
|
||||||
|
fi; \
|
||||||
|
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"; \
|
||||||
|
elif [ "$INSTALL_MODE" = "test-pypi" ]; then \
|
||||||
|
uv pip install --no-cache-dir fastapi libcst; \
|
||||||
|
if [ -n "$TEST_PYPI_VERSION" ]; then \
|
||||||
|
uv pip install --no-cache-dir --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match "llama-stack==$TEST_PYPI_VERSION"; \
|
||||||
|
else \
|
||||||
|
uv pip install --no-cache-dir --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match llama-stack; \
|
||||||
|
fi; \
|
||||||
|
else \
|
||||||
|
if [ -n "$PYPI_VERSION" ]; then \
|
||||||
|
uv pip install --no-cache-dir "llama-stack==$PYPI_VERSION"; \
|
||||||
|
else \
|
||||||
|
uv pip install --no-cache-dir llama-stack; \
|
||||||
|
fi; \
|
||||||
|
fi;
|
||||||
|
|
||||||
|
# Install the client package if it is provided
|
||||||
|
RUN set -eux; \
|
||||||
|
if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then \
|
||||||
|
if [ ! -d "$LLAMA_STACK_CLIENT_DIR" ]; then \
|
||||||
|
echo "LLAMA_STACK_CLIENT_DIR is set but $LLAMA_STACK_CLIENT_DIR does not exist" >&2; \
|
||||||
|
exit 1; \
|
||||||
|
fi; \
|
||||||
|
uv pip install --no-cache-dir -e "$LLAMA_STACK_CLIENT_DIR"; \
|
||||||
|
fi;
|
||||||
|
|
||||||
|
# Install the dependencies for the distribution
|
||||||
|
RUN set -eux; \
|
||||||
|
if [ -z "$DISTRO_NAME" ]; then \
|
||||||
|
echo "DISTRO_NAME must be provided" >&2; \
|
||||||
|
exit 1; \
|
||||||
|
fi; \
|
||||||
|
deps="$(llama stack list-deps "$DISTRO_NAME")"; \
|
||||||
|
if [ -n "$deps" ]; then \
|
||||||
|
printf '%s\n' "$deps" | xargs -L1 uv pip install --no-cache-dir; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
RUN set -eux; \
|
||||||
|
pip uninstall -y uv; \
|
||||||
|
should_remove=1; \
|
||||||
|
if [ -n "$KEEP_WORKSPACE" ]; then should_remove=0; fi; \
|
||||||
|
if [ "$INSTALL_MODE" = "editable" ]; then should_remove=0; fi; \
|
||||||
|
case "$RUN_CONFIG_PATH" in \
|
||||||
|
/workspace*) should_remove=0 ;; \
|
||||||
|
esac; \
|
||||||
|
if [ "$should_remove" -eq 1 ] && [ -d /workspace ]; then rm -rf /workspace; fi
|
||||||
|
|
||||||
|
RUN cat <<'EOF' >/usr/local/bin/llama-stack-entrypoint.sh
|
||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ -n "$RUN_CONFIG_PATH" ] && [ -f "$RUN_CONFIG_PATH" ]; then
|
||||||
|
exec llama stack run "$RUN_CONFIG_PATH" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$DISTRO_NAME" ]; then
|
||||||
|
exec llama stack run "$DISTRO_NAME" "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec llama stack run "$@"
|
||||||
|
EOF
|
||||||
|
RUN chmod +x /usr/local/bin/llama-stack-entrypoint.sh
|
||||||
|
|
||||||
|
RUN mkdir -p /.llama /.cache && chmod -R g+rw /app /.llama /.cache
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/llama-stack-entrypoint.sh"]
|
||||||
|
|
@ -158,17 +158,16 @@ under the LICENSE file in the root directory of this source tree.
|
||||||
|
|
||||||
Some tips about common tasks you work on while contributing to Llama Stack:
|
Some tips about common tasks you work on while contributing to Llama Stack:
|
||||||
|
|
||||||
### Installing dependencies of distributions
|
### Setup for development
|
||||||
|
|
||||||
Building a stack image will use the production version of the `llama-stack` and `llama-stack-client` packages. If you are developing with a llama-stack repository checked out and need your code to be reflected in the stack image, set `LLAMA_STACK_DIR` and `LLAMA_STACK_CLIENT_DIR` to the appropriate checked out directories when running any of the `llama` CLI commands.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```bash
|
```bash
|
||||||
cd work/
|
|
||||||
git clone https://github.com/meta-llama/llama-stack.git
|
git clone https://github.com/meta-llama/llama-stack.git
|
||||||
git clone https://github.com/meta-llama/llama-stack-client-python.git
|
|
||||||
cd llama-stack
|
cd llama-stack
|
||||||
llama stack build --distro <...>
|
uv run llama stack list-deps <distro-name> | xargs -L1 uv pip install
|
||||||
|
|
||||||
|
# (Optional) If you are developing the llama-stack-client-python package, you can add it as an editable package.
|
||||||
|
git clone https://github.com/meta-llama/llama-stack-client-python.git
|
||||||
|
uv add --editable ../llama-stack-client-python
|
||||||
```
|
```
|
||||||
|
|
||||||
### Updating distribution configurations
|
### Updating distribution configurations
|
||||||
|
|
|
||||||
|
|
@ -5,225 +5,79 @@ sidebar_label: Build your own Distribution
|
||||||
sidebar_position: 3
|
sidebar_position: 3
|
||||||
---
|
---
|
||||||
|
|
||||||
This guide will walk you through the steps to get started with building a Llama Stack distribution from scratch with your choice of API providers.
|
This guide walks you through inspecting existing distributions, customising their configuration, and building runnable artefacts for your own deployment.
|
||||||
|
|
||||||
|
### Explore existing distributions
|
||||||
|
|
||||||
### Setting your log level
|
All first-party distributions live under `llama_stack/distributions/`. Each directory contains:
|
||||||
|
|
||||||
In order to specify the proper logging level users can apply the following environment variable `LLAMA_STACK_LOGGING` with the following format:
|
- `build.yaml` – the distribution specification (providers, additional dependencies, optional external provider directories).
|
||||||
|
- `run.yaml` – sample run configuration (when provided).
|
||||||
|
- Documentation fragments that power this site.
|
||||||
|
|
||||||
`LLAMA_STACK_LOGGING=server=debug;core=info`
|
Browse that folder to understand available providers and copy a distribution to use as a starting point. When creating a new stack, duplicate an existing directory, rename it, and adjust the `build.yaml` file to match your requirements.
|
||||||
|
|
||||||
Where each category in the following list:
|
|
||||||
|
|
||||||
- all
|
|
||||||
- core
|
|
||||||
- server
|
|
||||||
- router
|
|
||||||
- inference
|
|
||||||
- agents
|
|
||||||
- safety
|
|
||||||
- eval
|
|
||||||
- tools
|
|
||||||
- client
|
|
||||||
|
|
||||||
Can be set to any of the following log levels:
|
|
||||||
|
|
||||||
- debug
|
|
||||||
- info
|
|
||||||
- warning
|
|
||||||
- error
|
|
||||||
- critical
|
|
||||||
|
|
||||||
The default global log level is `info`. `all` sets the log level for all components.
|
|
||||||
|
|
||||||
A user can also set `LLAMA_STACK_LOG_FILE` which will pipe the logs to the specified path as well as to the terminal. An example would be: `export LLAMA_STACK_LOG_FILE=server.log`
|
|
||||||
|
|
||||||
### Llama Stack Build
|
|
||||||
|
|
||||||
In order to build your own distribution, we recommend you clone the `llama-stack` repository.
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
git clone git@github.com:meta-llama/llama-stack.git
|
|
||||||
cd llama-stack
|
|
||||||
pip install -e .
|
|
||||||
```
|
|
||||||
Use the CLI to build your distribution.
|
|
||||||
The main points to consider are:
|
|
||||||
1. **Image Type** - Do you want a venv environment or a Container (eg. Docker)
|
|
||||||
2. **Template** - Do you want to use a template to build your distribution? or start from scratch ?
|
|
||||||
3. **Config** - Do you want to use a pre-existing config file to build your distribution?
|
|
||||||
|
|
||||||
```
|
|
||||||
llama stack build -h
|
|
||||||
usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--distro DISTRIBUTION] [--list-distros] [--image-type {container,venv}] [--image-name IMAGE_NAME] [--print-deps-only]
|
|
||||||
[--run] [--providers PROVIDERS]
|
|
||||||
|
|
||||||
Build a Llama stack container
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to
|
|
||||||
enter information interactively (default: None)
|
|
||||||
--template TEMPLATE (deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default:
|
|
||||||
None)
|
|
||||||
--distro DISTRIBUTION, --distribution DISTRIBUTION
|
|
||||||
Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default: None)
|
|
||||||
--list-distros, --list-distributions
|
|
||||||
Show the available distributions for building a Llama Stack distribution (default: False)
|
|
||||||
--image-type {container,venv}
|
|
||||||
Image Type to use for the build. If not specified, will use the image type from the template config. (default: None)
|
|
||||||
--image-name IMAGE_NAME
|
|
||||||
[for image-type=container|venv] Name of the virtual environment to use for the build. If not specified, currently active environment will be used if found. (default:
|
|
||||||
None)
|
|
||||||
--print-deps-only Print the dependencies for the stack only, without building the stack (default: False)
|
|
||||||
--run Run the stack after building using the same image type, name, and other applicable arguments (default: False)
|
|
||||||
--providers PROVIDERS
|
|
||||||
Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per
|
|
||||||
API. (default: None)
|
|
||||||
```
|
|
||||||
|
|
||||||
After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command.
|
|
||||||
|
|
||||||
import Tabs from '@theme/Tabs';
|
import Tabs from '@theme/Tabs';
|
||||||
import TabItem from '@theme/TabItem';
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
<Tabs>
|
<Tabs>
|
||||||
<TabItem value="template" label="Building from a template">
|
<TabItem value="container" label="Building a container">
|
||||||
To build from alternative API providers, we provide distribution templates for users to get started building a distribution backed by different providers.
|
|
||||||
|
|
||||||
The following command will allow you to see the available templates and their corresponding providers.
|
Use the Containerfile at `containers/Containerfile`, which installs `llama-stack`, resolves distribution dependencies via `llama stack list-deps`, and sets the entrypoint to `llama stack run`.
|
||||||
```
|
|
||||||
llama stack build --list-templates
|
```bash
|
||||||
|
docker build . \
|
||||||
|
-f containers/Containerfile \
|
||||||
|
--build-arg DISTRO_NAME=starter \
|
||||||
|
--tag llama-stack:starter
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
Handy build arguments:
|
||||||
------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| Template Name | Description |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| watsonx | Use watsonx for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| vllm-gpu | Use a built-in vLLM engine for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| together | Use Together.AI for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| tgi | Use (an external) TGI server for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| starter | Quick start template for running Llama Stack with several popular providers |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| sambanova | Use SambaNova for running LLM inference and safety |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| remote-vllm | Use (an external) vLLM server for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| postgres-demo | Quick start template for running Llama Stack with several popular providers |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| passthrough | Use Passthrough hosted llama-stack endpoint for LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| open-benchmark | Distribution for running open benchmarks |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| ollama | Use (an external) Ollama server for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| nvidia | Use NVIDIA NIM for running LLM inference, evaluation and safety |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| meta-reference-gpu | Use Meta Reference for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| llama_api | Distribution for running e2e tests in CI |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| hf-serverless | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| hf-endpoint | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| groq | Use Groq for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| fireworks | Use Fireworks.AI for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| experimental-post-training | Experimental template for post training |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| dell | Dell's distribution of Llama Stack. TGI inference via Dell's custom |
|
|
||||||
| | container |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| ci-tests | Distribution for running e2e tests in CI |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| cerebras | Use Cerebras for running LLM inference |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
| bedrock | Use AWS Bedrock for running LLM inference and safety |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+
|
|
||||||
```
|
|
||||||
|
|
||||||
You may then pick a template to build your distribution with providers fitted to your liking.
|
- `DISTRO_NAME` – distribution directory name (defaults to `starter`).
|
||||||
|
- `RUN_CONFIG_PATH` – absolute path inside the build context for a run config that should be baked into the image (e.g. `/workspace/run.yaml`).
|
||||||
|
- `INSTALL_MODE=editable` – install the repository copied into `/workspace` with `uv pip install -e`. Pair it with `--build-arg LLAMA_STACK_DIR=/workspace`.
|
||||||
|
- `LLAMA_STACK_CLIENT_DIR` – optional editable install of the Python client.
|
||||||
|
- `PYPI_VERSION` / `TEST_PYPI_VERSION` – pin specific releases when not using editable installs.
|
||||||
|
- `KEEP_WORKSPACE=1` – retain `/workspace` in the final image if you need to access additional files (such as sample configs or provider bundles).
|
||||||
|
|
||||||
For example, to build a distribution with TGI as the inference provider, you can run:
|
Make sure any custom `build.yaml`, run configs, or provider directories you reference are included in the Docker build context so the Containerfile can read them.
|
||||||
```
|
|
||||||
$ llama stack build --distro starter
|
|
||||||
...
|
|
||||||
You can now edit ~/.llama/distributions/llamastack-starter/starter-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-starter/starter-run.yaml`
|
|
||||||
```
|
|
||||||
|
|
||||||
```{tip}
|
|
||||||
The generated `run.yaml` file is a starting point for your configuration. For comprehensive guidance on customizing it for your specific needs, infrastructure, and deployment scenarios, see [Customizing Your run.yaml Configuration](customizing_run_yaml.md).
|
|
||||||
```
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="scratch" label="Building from Scratch">
|
<TabItem value="external" label="Building with external providers">
|
||||||
|
|
||||||
If the provided templates do not fit your use case, you could start off with running `llama stack build` which will allow you to a interactively enter wizard where you will be prompted to enter build configurations.
|
External providers live outside the main repository but can be bundled by pointing `external_providers_dir` to a directory that contains your provider packages.
|
||||||
|
|
||||||
It would be best to start with a template and understand the structure of the config file and the various concepts ( APIS, providers, resources, etc.) before starting from scratch.
|
1. Copy providers into the build context, for example `cp -R path/to/providers providers.d`.
|
||||||
```
|
2. Update `build.yaml` with the directory and provider entries.
|
||||||
llama stack build
|
3. Adjust run configs to use the in-container path (usually `/.llama/providers.d`). Pass `--build-arg RUN_CONFIG_PATH=/workspace/run.yaml` if you want to bake the config.
|
||||||
|
|
||||||
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
|
Example `build.yaml` excerpt for a custom Ollama provider:
|
||||||
> Enter the image type you want your Llama Stack to be built as (container or venv): venv
|
|
||||||
|
|
||||||
Llama Stack is composed of several APIs working together. Let's select
|
|
||||||
the provider types (implementations) you want to use for these APIs.
|
|
||||||
|
|
||||||
Tip: use <TAB> to see options for the providers.
|
|
||||||
|
|
||||||
> Enter provider for API inference: inline::meta-reference
|
|
||||||
> Enter provider for API safety: inline::llama-guard
|
|
||||||
> Enter provider for API agents: inline::meta-reference
|
|
||||||
> Enter provider for API memory: inline::faiss
|
|
||||||
> Enter provider for API datasetio: inline::meta-reference
|
|
||||||
> Enter provider for API scoring: inline::meta-reference
|
|
||||||
> Enter provider for API eval: inline::meta-reference
|
|
||||||
> Enter provider for API telemetry: inline::meta-reference
|
|
||||||
|
|
||||||
> (Optional) Enter a short description for your Llama Stack:
|
|
||||||
|
|
||||||
You can now edit ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml`
|
|
||||||
```
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="config" label="Building from a pre-existing build config file">
|
|
||||||
- In addition to templates, you may customize the build to your liking through editing config files and build from config files with the following command.
|
|
||||||
|
|
||||||
- The config file will be of contents like the ones in `llama_stack/distributions/*build.yaml`.
|
|
||||||
|
|
||||||
```
|
|
||||||
llama stack build --config llama_stack/distributions/starter/build.yaml
|
|
||||||
```
|
|
||||||
</TabItem>
|
|
||||||
<TabItem value="external" label="Building with External Providers">
|
|
||||||
|
|
||||||
Llama Stack supports external providers that live outside of the main codebase. This allows you to create and maintain your own providers independently or use community-provided providers.
|
|
||||||
|
|
||||||
To build a distribution with external providers, you need to:
|
|
||||||
|
|
||||||
1. Configure the `external_providers_dir` in your build configuration file:
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# Example my-external-stack.yaml with external providers
|
|
||||||
version: '2'
|
|
||||||
distribution_spec:
|
distribution_spec:
|
||||||
description: Custom distro for CI tests
|
|
||||||
providers:
|
providers:
|
||||||
inference:
|
inference:
|
||||||
- remote::custom_ollama
|
- remote::custom_ollama
|
||||||
# Add more providers as needed
|
external_providers_dir: /workspace/providers.d
|
||||||
image_type: container
|
```
|
||||||
image_name: ci-test
|
|
||||||
# Path to external provider implementations
|
Inside `providers.d/custom_ollama/provider.py`, define `get_provider_spec()` so the CLI can discover dependencies:
|
||||||
external_providers_dir: ~/.llama/providers.d
|
|
||||||
|
```python
|
||||||
|
from llama_stack.providers.datatypes import ProviderSpec
|
||||||
|
|
||||||
|
|
||||||
|
def get_provider_spec() -> ProviderSpec:
|
||||||
|
return ProviderSpec(
|
||||||
|
provider_type="remote::custom_ollama",
|
||||||
|
module="llama_stack_ollama_provider",
|
||||||
|
config_class="llama_stack_ollama_provider.config.OllamaImplConfig",
|
||||||
|
pip_packages=[
|
||||||
|
"ollama",
|
||||||
|
"aiohttp",
|
||||||
|
"llama-stack-provider-ollama",
|
||||||
|
],
|
||||||
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Here's an example for a custom Ollama provider:
|
Here's an example for a custom Ollama provider:
|
||||||
|
|
@ -232,9 +86,9 @@ Here's an example for a custom Ollama provider:
|
||||||
adapter:
|
adapter:
|
||||||
adapter_type: custom_ollama
|
adapter_type: custom_ollama
|
||||||
pip_packages:
|
pip_packages:
|
||||||
- ollama
|
- ollama
|
||||||
- aiohttp
|
- aiohttp
|
||||||
- llama-stack-provider-ollama # This is the provider package
|
- llama-stack-provider-ollama # This is the provider package
|
||||||
config_class: llama_stack_ollama_provider.config.OllamaImplConfig
|
config_class: llama_stack_ollama_provider.config.OllamaImplConfig
|
||||||
module: llama_stack_ollama_provider
|
module: llama_stack_ollama_provider
|
||||||
api_dependencies: []
|
api_dependencies: []
|
||||||
|
|
@ -245,53 +99,22 @@ The `pip_packages` section lists the Python packages required by the provider, a
|
||||||
provider package itself. The package must be available on PyPI or can be provided from a local
|
provider package itself. The package must be available on PyPI or can be provided from a local
|
||||||
directory or a git repository (git must be installed on the build environment).
|
directory or a git repository (git must be installed on the build environment).
|
||||||
|
|
||||||
2. Build your distribution using the config file:
|
For deeper guidance, see the [External Providers documentation](../providers/external/).
|
||||||
|
|
||||||
```
|
|
||||||
llama stack build --config my-external-stack.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
For more information on external providers, including directory structure, provider types, and implementation requirements, see the [External Providers documentation](../providers/external/).
|
|
||||||
</TabItem>
|
</TabItem>
|
||||||
<TabItem value="container" label="Building Container">
|
</Tabs>
|
||||||
|
|
||||||
:::tip Podman Alternative
|
### Run your stack server
|
||||||
Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
|
|
||||||
:::
|
|
||||||
|
|
||||||
To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
|
After building the image, launch it directly with Docker or Podman—the entrypoint calls `llama stack run` using the baked distribution or the bundled run config:
|
||||||
|
|
||||||
```
|
|
||||||
llama stack build --distro starter --image-type container
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
$ llama stack build --distro starter --image-type container
|
|
||||||
...
|
|
||||||
Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml`
|
|
||||||
```
|
|
||||||
|
|
||||||
Now set some environment variables for the inference model ID and Llama Stack Port and create a local directory to mount into the container's file system.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export INFERENCE_MODEL="llama3.2:3b"
|
|
||||||
export LLAMA_STACK_PORT=8321
|
|
||||||
mkdir -p ~/.llama
|
|
||||||
```
|
|
||||||
|
|
||||||
After this step is successful, you should be able to find the built container image and test it with the below Docker command:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run -d \
|
docker run -d \
|
||||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||||
-v ~/.llama:/root/.llama \
|
-v ~/.llama:/root/.llama \
|
||||||
-e INFERENCE_MODEL=$INFERENCE_MODEL \
|
-e INFERENCE_MODEL=$INFERENCE_MODEL \
|
||||||
-e OLLAMA_URL=http://host.docker.internal:11434 \
|
-e OLLAMA_URL=http://host.docker.internal:11434 \
|
||||||
localhost/distribution-ollama:dev \
|
llama-stack:starter \
|
||||||
--port $LLAMA_STACK_PORT
|
--port $LLAMA_STACK_PORT
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
@ -311,131 +134,14 @@ Here are the docker flags and their uses:
|
||||||
|
|
||||||
* `--port $LLAMA_STACK_PORT`: Port number for the server to listen on
|
* `--port $LLAMA_STACK_PORT`: Port number for the server to listen on
|
||||||
|
|
||||||
</TabItem>
|
|
||||||
</Tabs>
|
|
||||||
|
|
||||||
|
|
||||||
### Running your Stack server
|
If you prepared a custom run config, mount it into the container and reference it explicitly:
|
||||||
Now, let's start the Llama Stack Distribution Server. You will need the YAML configuration file which was written out at the end by the `llama stack build` step.
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run \
|
||||||
|
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||||
|
-v $(pwd)/run.yaml:/app/run.yaml \
|
||||||
|
llama-stack:starter \
|
||||||
|
/app/run.yaml
|
||||||
```
|
```
|
||||||
llama stack run -h
|
|
||||||
usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME]
|
|
||||||
[--image-type {venv}] [--enable-ui]
|
|
||||||
[config | distro]
|
|
||||||
|
|
||||||
Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.
|
|
||||||
|
|
||||||
positional arguments:
|
|
||||||
config | distro Path to config file to use for the run or name of known distro (`llama stack list` for a list). (default: None)
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
--port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321)
|
|
||||||
--image-name IMAGE_NAME
|
|
||||||
[DEPRECATED] This flag is no longer supported. Please activate your virtual environment before running. (default: None)
|
|
||||||
--image-type {venv}
|
|
||||||
[DEPRECATED] This flag is no longer supported. Please activate your virtual environment before running. (default: None)
|
|
||||||
--enable-ui Start the UI server (default: False)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** Container images built with `llama stack build --image-type container` cannot be run using `llama stack run`. Instead, they must be run directly using Docker or Podman commands as shown in the container building section above.
|
|
||||||
|
|
||||||
```
|
|
||||||
# Start using template name
|
|
||||||
llama stack run tgi
|
|
||||||
|
|
||||||
# Start using config file
|
|
||||||
llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
$ llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml
|
|
||||||
|
|
||||||
Serving API inspect
|
|
||||||
GET /health
|
|
||||||
GET /providers/list
|
|
||||||
GET /routes/list
|
|
||||||
Serving API inference
|
|
||||||
POST /inference/chat_completion
|
|
||||||
POST /inference/completion
|
|
||||||
POST /inference/embeddings
|
|
||||||
...
|
|
||||||
Serving API agents
|
|
||||||
POST /agents/create
|
|
||||||
POST /agents/session/create
|
|
||||||
POST /agents/turn/create
|
|
||||||
POST /agents/delete
|
|
||||||
POST /agents/session/delete
|
|
||||||
POST /agents/session/get
|
|
||||||
POST /agents/step/get
|
|
||||||
POST /agents/turn/get
|
|
||||||
|
|
||||||
Listening on ['::', '0.0.0.0']:8321
|
|
||||||
INFO: Started server process [2935911]
|
|
||||||
INFO: Waiting for application startup.
|
|
||||||
INFO: Application startup complete.
|
|
||||||
INFO: Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit)
|
|
||||||
INFO: 2401:db00:35c:2d2b:face:0:c9:0:54678 - "GET /models/list HTTP/1.1" 200 OK
|
|
||||||
```
|
|
||||||
|
|
||||||
### Listing Distributions
|
|
||||||
Using the list command, you can view all existing Llama Stack distributions, including stacks built from templates, from scratch, or using custom configuration files.
|
|
||||||
|
|
||||||
```
|
|
||||||
llama stack list -h
|
|
||||||
usage: llama stack list [-h]
|
|
||||||
|
|
||||||
list the build stacks
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
```
|
|
||||||
|
|
||||||
Example Usage
|
|
||||||
|
|
||||||
```
|
|
||||||
llama stack list
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
------------------------------+-----------------------------------------------------------------+--------------+------------+
|
|
||||||
| Stack Name | Path | Build Config | Run Config |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
|
||||||
| together | ~/.llama/distributions/together | Yes | No |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
|
||||||
| bedrock | ~/.llama/distributions/bedrock | Yes | No |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
|
||||||
| starter | ~/.llama/distributions/starter | Yes | Yes |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
|
||||||
| remote-vllm | ~/.llama/distributions/remote-vllm | Yes | Yes |
|
|
||||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
|
||||||
```
|
|
||||||
|
|
||||||
### Removing a Distribution
|
|
||||||
Use the remove command to delete a distribution you've previously built.
|
|
||||||
|
|
||||||
```
|
|
||||||
llama stack rm -h
|
|
||||||
usage: llama stack rm [-h] [--all] [name]
|
|
||||||
|
|
||||||
Remove the build stack
|
|
||||||
|
|
||||||
positional arguments:
|
|
||||||
name Name of the stack to delete (default: None)
|
|
||||||
|
|
||||||
options:
|
|
||||||
-h, --help show this help message and exit
|
|
||||||
--all, -a Delete all stacks (use with caution) (default: False)
|
|
||||||
```
|
|
||||||
|
|
||||||
Example
|
|
||||||
```
|
|
||||||
llama stack rm llamastack-test
|
|
||||||
```
|
|
||||||
|
|
||||||
To keep your environment organized and avoid clutter, consider using `llama stack list` to review old or unused distributions and `llama stack rm <name>` to delete them when they're no longer needed.
|
|
||||||
|
|
||||||
### Troubleshooting
|
|
||||||
|
|
||||||
If you encounter any issues, ask questions in our discord or search through our [GitHub Issues](https://github.com/meta-llama/llama-stack/issues), or file an new issue.
|
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,17 @@ Another simple way to start interacting with Llama Stack is to just spin up a co
|
||||||
If you have built a container image and want to deploy it in a Kubernetes cluster instead of starting the Llama Stack server locally. See [Kubernetes Deployment Guide](../deploying/kubernetes_deployment) for more details.
|
If you have built a container image and want to deploy it in a Kubernetes cluster instead of starting the Llama Stack server locally. See [Kubernetes Deployment Guide](../deploying/kubernetes_deployment) for more details.
|
||||||
|
|
||||||
|
|
||||||
|
## Configure logging
|
||||||
|
|
||||||
|
Control log output via environment variables before starting the server.
|
||||||
|
|
||||||
|
- `LLAMA_STACK_LOGGING` sets per-component levels, e.g. `LLAMA_STACK_LOGGING=server=debug;core=info`.
|
||||||
|
- Supported categories: `all`, `core`, `server`, `router`, `inference`, `agents`, `safety`, `eval`, `tools`, `client`.
|
||||||
|
- Levels: `debug`, `info`, `warning`, `error`, `critical` (default is `info`). Use `all=<level>` to apply globally.
|
||||||
|
- `LLAMA_STACK_LOG_FILE=/path/to/log` mirrors logs to a file while still printing to stdout.
|
||||||
|
|
||||||
|
Export these variables prior to running `llama stack run`, launching a container, or starting the server through any other pathway.
|
||||||
|
|
||||||
```{toctree}
|
```{toctree}
|
||||||
:maxdepth: 1
|
:maxdepth: 1
|
||||||
:hidden:
|
:hidden:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue