From 34f0ad0d256a0087203a0fa013629d24fc987ee7 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 17 Oct 2025 22:27:51 -0700 Subject: [PATCH] dockerfile # What does this PR do? ## Test Plan --- containers/Containerfile | 137 ++++++ docs/docs/contributing/index.mdx | 13 +- docs/docs/distributions/building_distro.mdx | 457 +++--------------- .../starting_llama_stack_server.mdx | 11 + 4 files changed, 229 insertions(+), 389 deletions(-) create mode 100644 containers/Containerfile diff --git a/containers/Containerfile b/containers/Containerfile new file mode 100644 index 000000000..06b87ba4c --- /dev/null +++ b/containers/Containerfile @@ -0,0 +1,137 @@ +# syntax=docker/dockerfile:1.6 +# +# This Dockerfile is used to build the Llama Stack container image. +# Example: +# docker build \ +# -f containers/Containerfile \ +# --build-arg DISTRO_NAME=starter \ +# --tag llama-stack:starter . + +ARG BASE_IMAGE=python:3.12-slim +FROM ${BASE_IMAGE} + +ARG UV_HTTP_TIMEOUT=500 +ENV UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT} +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PIP_DISABLE_PIP_VERSION_CHECK=1 +WORKDIR /app + +RUN set -eux; \ + if command -v dnf >/dev/null 2>&1; then \ + dnf -y update && \ + dnf install -y iputils git net-tools wget \ + vim-minimal python3.12 python3.12-pip python3.12-wheel \ + python3.12-setuptools python3.12-devel gcc gcc-c++ make && \ + ln -sf /usr/bin/pip3.12 /usr/local/bin/pip && \ + ln -sf /usr/bin/python3.12 /usr/local/bin/python && \ + dnf clean all; \ + elif command -v apt-get >/dev/null 2>&1; then \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + iputils-ping net-tools iproute2 dnsutils telnet \ + curl wget git procps psmisc lsof traceroute bubblewrap \ + gcc g++ && \ + rm -rf /var/lib/apt/lists/*; \ + else \ + echo "Unsupported base image: expected dnf or apt-get" >&2; \ + exit 1; \ + fi + +RUN pip install --no-cache-dir uv +ENV UV_SYSTEM_PYTHON=1 + +ARG INSTALL_MODE="pypi" +ARG LLAMA_STACK_DIR="/workspace" +ARG LLAMA_STACK_CLIENT_DIR="" +ARG PYPI_VERSION="" +ARG TEST_PYPI_VERSION="" +ARG KEEP_WORKSPACE="" +ARG DISTRO_NAME="starter" +ARG RUN_CONFIG_PATH="" + +ENV INSTALL_MODE=${INSTALL_MODE} +ENV LLAMA_STACK_DIR=${LLAMA_STACK_DIR} +ENV LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR} +ENV PYPI_VERSION=${PYPI_VERSION} +ENV TEST_PYPI_VERSION=${TEST_PYPI_VERSION} +ENV KEEP_WORKSPACE=${KEEP_WORKSPACE} +ENV DISTRO_NAME=${DISTRO_NAME} +ENV RUN_CONFIG_PATH=${RUN_CONFIG_PATH} + +# Copy the repository so editable installs and run configurations are available. +COPY . /workspace + +# Install llama-stack +RUN set -eux; \ + if [ "$INSTALL_MODE" = "editable" ]; then \ + if [ ! -d "$LLAMA_STACK_DIR" ]; then \ + echo "INSTALL_MODE=editable requires LLAMA_STACK_DIR to point to a directory inside the build context" >&2; \ + exit 1; \ + fi; \ + uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"; \ + elif [ "$INSTALL_MODE" = "test-pypi" ]; then \ + uv pip install --no-cache-dir fastapi libcst; \ + if [ -n "$TEST_PYPI_VERSION" ]; then \ + uv pip install --no-cache-dir --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match "llama-stack==$TEST_PYPI_VERSION"; \ + else \ + uv pip install --no-cache-dir --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match llama-stack; \ + fi; \ + else \ + if [ -n "$PYPI_VERSION" ]; then \ + uv pip install --no-cache-dir "llama-stack==$PYPI_VERSION"; \ + else \ + uv pip install --no-cache-dir llama-stack; \ + fi; \ + fi; + +# Install the client package if it is provided +RUN set -eux; \ + if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then \ + if [ ! -d "$LLAMA_STACK_CLIENT_DIR" ]; then \ + echo "LLAMA_STACK_CLIENT_DIR is set but $LLAMA_STACK_CLIENT_DIR does not exist" >&2; \ + exit 1; \ + fi; \ + uv pip install --no-cache-dir -e "$LLAMA_STACK_CLIENT_DIR"; \ + fi; + +# Install the dependencies for the distribution +RUN set -eux; \ + if [ -z "$DISTRO_NAME" ]; then \ + echo "DISTRO_NAME must be provided" >&2; \ + exit 1; \ + fi; \ + deps="$(llama stack list-deps "$DISTRO_NAME")"; \ + if [ -n "$deps" ]; then \ + printf '%s\n' "$deps" | xargs -L1 uv pip install --no-cache-dir; \ + fi + +# Cleanup +RUN set -eux; \ + pip uninstall -y uv; \ + should_remove=1; \ + if [ -n "$KEEP_WORKSPACE" ]; then should_remove=0; fi; \ + if [ "$INSTALL_MODE" = "editable" ]; then should_remove=0; fi; \ + case "$RUN_CONFIG_PATH" in \ + /workspace*) should_remove=0 ;; \ + esac; \ + if [ "$should_remove" -eq 1 ] && [ -d /workspace ]; then rm -rf /workspace; fi + +RUN cat <<'EOF' >/usr/local/bin/llama-stack-entrypoint.sh +#!/bin/sh +set -e + +if [ -n "$RUN_CONFIG_PATH" ] && [ -f "$RUN_CONFIG_PATH" ]; then + exec llama stack run "$RUN_CONFIG_PATH" "$@" +fi + +if [ -n "$DISTRO_NAME" ]; then + exec llama stack run "$DISTRO_NAME" "$@" +fi + +exec llama stack run "$@" +EOF +RUN chmod +x /usr/local/bin/llama-stack-entrypoint.sh + +RUN mkdir -p /.llama /.cache && chmod -R g+rw /app /.llama /.cache + +ENTRYPOINT ["/usr/local/bin/llama-stack-entrypoint.sh"] diff --git a/docs/docs/contributing/index.mdx b/docs/docs/contributing/index.mdx index 2051f6040..373f817f3 100644 --- a/docs/docs/contributing/index.mdx +++ b/docs/docs/contributing/index.mdx @@ -158,17 +158,16 @@ under the LICENSE file in the root directory of this source tree. Some tips about common tasks you work on while contributing to Llama Stack: -### Installing dependencies of distributions +### Setup for development -Building a stack image will use the production version of the `llama-stack` and `llama-stack-client` packages. If you are developing with a llama-stack repository checked out and need your code to be reflected in the stack image, set `LLAMA_STACK_DIR` and `LLAMA_STACK_CLIENT_DIR` to the appropriate checked out directories when running any of the `llama` CLI commands. - -Example: ```bash -cd work/ git clone https://github.com/meta-llama/llama-stack.git -git clone https://github.com/meta-llama/llama-stack-client-python.git cd llama-stack -llama stack build --distro <...> +uv run llama stack list-deps | xargs -L1 uv pip install + +# (Optional) If you are developing the llama-stack-client-python package, you can add it as an editable package. +git clone https://github.com/meta-llama/llama-stack-client-python.git +uv add --editable ../llama-stack-client-python ``` ### Updating distribution configurations diff --git a/docs/docs/distributions/building_distro.mdx b/docs/docs/distributions/building_distro.mdx index a4f7e1f60..c1e4fffce 100644 --- a/docs/docs/distributions/building_distro.mdx +++ b/docs/docs/distributions/building_distro.mdx @@ -5,437 +5,130 @@ sidebar_label: Build your own Distribution sidebar_position: 3 --- -This guide will walk you through the steps to get started with building a Llama Stack distribution from scratch with your choice of API providers. +This guide walks you through inspecting existing distributions, customising their configuration, and building runnable artefacts for your own deployment. +### Explore existing distributions -### Setting your log level +All first-party distributions live under `llama_stack/distributions/`. Each directory contains: -In order to specify the proper logging level users can apply the following environment variable `LLAMA_STACK_LOGGING` with the following format: +- `build.yaml` – the distribution specification (providers, additional dependencies, optional external provider directories). +- `run.yaml` – sample run configuration (when provided). +- Documentation fragments that power this site. -`LLAMA_STACK_LOGGING=server=debug;core=info` - -Where each category in the following list: - -- all -- core -- server -- router -- inference -- agents -- safety -- eval -- tools -- client - -Can be set to any of the following log levels: - -- debug -- info -- warning -- error -- critical - -The default global log level is `info`. `all` sets the log level for all components. - -A user can also set `LLAMA_STACK_LOG_FILE` which will pipe the logs to the specified path as well as to the terminal. An example would be: `export LLAMA_STACK_LOG_FILE=server.log` - -### Llama Stack Build - -In order to build your own distribution, we recommend you clone the `llama-stack` repository. - - -``` -git clone git@github.com:meta-llama/llama-stack.git -cd llama-stack -pip install -e . -``` -Use the CLI to build your distribution. -The main points to consider are: -1. **Image Type** - Do you want a venv environment or a Container (eg. Docker) -2. **Template** - Do you want to use a template to build your distribution? or start from scratch ? -3. **Config** - Do you want to use a pre-existing config file to build your distribution? - -``` -llama stack build -h -usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--distro DISTRIBUTION] [--list-distros] [--image-type {container,venv}] [--image-name IMAGE_NAME] [--print-deps-only] - [--run] [--providers PROVIDERS] - -Build a Llama stack container - -options: - -h, --help show this help message and exit - --config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to - enter information interactively (default: None) - --template TEMPLATE (deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default: - None) - --distro DISTRIBUTION, --distribution DISTRIBUTION - Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default: None) - --list-distros, --list-distributions - Show the available distributions for building a Llama Stack distribution (default: False) - --image-type {container,venv} - Image Type to use for the build. If not specified, will use the image type from the template config. (default: None) - --image-name IMAGE_NAME - [for image-type=container|venv] Name of the virtual environment to use for the build. If not specified, currently active environment will be used if found. (default: - None) - --print-deps-only Print the dependencies for the stack only, without building the stack (default: False) - --run Run the stack after building using the same image type, name, and other applicable arguments (default: False) - --providers PROVIDERS - Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per - API. (default: None) -``` - -After this step is complete, a file named `-build.yaml` and template file `-run.yaml` will be generated and saved at the output file path specified at the end of the command. +Browse that folder to understand available providers and copy a distribution to use as a starting point. When creating a new stack, duplicate an existing directory, rename it, and adjust the `build.yaml` file to match your requirements. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; - - -To build from alternative API providers, we provide distribution templates for users to get started building a distribution backed by different providers. + -The following command will allow you to see the available templates and their corresponding providers. -``` -llama stack build --list-templates +Use the Containerfile at `containers/Containerfile`, which installs `llama-stack`, resolves distribution dependencies via `llama stack list-deps`, and sets the entrypoint to `llama stack run`. + +```bash +docker build . \ + -f containers/Containerfile \ + --build-arg DISTRO_NAME=starter \ + --tag llama-stack:starter ``` -``` -------------------------------+-----------------------------------------------------------------------------+ -| Template Name | Description | -+------------------------------+-----------------------------------------------------------------------------+ -| watsonx | Use watsonx for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| vllm-gpu | Use a built-in vLLM engine for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| together | Use Together.AI for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| tgi | Use (an external) TGI server for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| starter | Quick start template for running Llama Stack with several popular providers | -+------------------------------+-----------------------------------------------------------------------------+ -| sambanova | Use SambaNova for running LLM inference and safety | -+------------------------------+-----------------------------------------------------------------------------+ -| remote-vllm | Use (an external) vLLM server for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| postgres-demo | Quick start template for running Llama Stack with several popular providers | -+------------------------------+-----------------------------------------------------------------------------+ -| passthrough | Use Passthrough hosted llama-stack endpoint for LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| open-benchmark | Distribution for running open benchmarks | -+------------------------------+-----------------------------------------------------------------------------+ -| ollama | Use (an external) Ollama server for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| nvidia | Use NVIDIA NIM for running LLM inference, evaluation and safety | -+------------------------------+-----------------------------------------------------------------------------+ -| meta-reference-gpu | Use Meta Reference for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| llama_api | Distribution for running e2e tests in CI | -+------------------------------+-----------------------------------------------------------------------------+ -| hf-serverless | Use (an external) Hugging Face Inference Endpoint for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| hf-endpoint | Use (an external) Hugging Face Inference Endpoint for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| groq | Use Groq for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| fireworks | Use Fireworks.AI for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| experimental-post-training | Experimental template for post training | -+------------------------------+-----------------------------------------------------------------------------+ -| dell | Dell's distribution of Llama Stack. TGI inference via Dell's custom | -| | container | -+------------------------------+-----------------------------------------------------------------------------+ -| ci-tests | Distribution for running e2e tests in CI | -+------------------------------+-----------------------------------------------------------------------------+ -| cerebras | Use Cerebras for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ -| bedrock | Use AWS Bedrock for running LLM inference and safety | -+------------------------------+-----------------------------------------------------------------------------+ -``` +Handy build arguments: -You may then pick a template to build your distribution with providers fitted to your liking. +- `DISTRO_NAME` – distribution directory name (defaults to `starter`). +- `RUN_CONFIG_PATH` – absolute path inside the build context for a run config that should be baked into the image (e.g. `/workspace/run.yaml`). +- `INSTALL_MODE=editable` – install the repository copied into `/workspace` with `uv pip install -e`. Pair it with `--build-arg LLAMA_STACK_DIR=/workspace`. +- `LLAMA_STACK_CLIENT_DIR` – optional editable install of the Python client. +- `PYPI_VERSION` / `TEST_PYPI_VERSION` – pin specific releases when not using editable installs. +- `KEEP_WORKSPACE=1` – retain `/workspace` in the final image if you need to access additional files (such as sample configs or provider bundles). -For example, to build a distribution with TGI as the inference provider, you can run: -``` -$ llama stack build --distro starter -... -You can now edit ~/.llama/distributions/llamastack-starter/starter-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-starter/starter-run.yaml` -``` +Make sure any custom `build.yaml`, run configs, or provider directories you reference are included in the Docker build context so the Containerfile can read them. -```{tip} -The generated `run.yaml` file is a starting point for your configuration. For comprehensive guidance on customizing it for your specific needs, infrastructure, and deployment scenarios, see [Customizing Your run.yaml Configuration](customizing_run_yaml.md). -``` - + -If the provided templates do not fit your use case, you could start off with running `llama stack build` which will allow you to a interactively enter wizard where you will be prompted to enter build configurations. +External providers live outside the main repository but can be bundled by pointing `external_providers_dir` to a directory that contains your provider packages. -It would be best to start with a template and understand the structure of the config file and the various concepts ( APIS, providers, resources, etc.) before starting from scratch. -``` -llama stack build +1. Copy providers into the build context, for example `cp -R path/to/providers providers.d`. +2. Update `build.yaml` with the directory and provider entries. +3. Adjust run configs to use the in-container path (usually `/.llama/providers.d`). Pass `--build-arg RUN_CONFIG_PATH=/workspace/run.yaml` if you want to bake the config. -> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack -> Enter the image type you want your Llama Stack to be built as (container or venv): venv - -Llama Stack is composed of several APIs working together. Let's select -the provider types (implementations) you want to use for these APIs. - -Tip: use to see options for the providers. - -> Enter provider for API inference: inline::meta-reference -> Enter provider for API safety: inline::llama-guard -> Enter provider for API agents: inline::meta-reference -> Enter provider for API memory: inline::faiss -> Enter provider for API datasetio: inline::meta-reference -> Enter provider for API scoring: inline::meta-reference -> Enter provider for API eval: inline::meta-reference -> Enter provider for API telemetry: inline::meta-reference - - > (Optional) Enter a short description for your Llama Stack: - -You can now edit ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml` -``` - - -- In addition to templates, you may customize the build to your liking through editing config files and build from config files with the following command. - -- The config file will be of contents like the ones in `llama_stack/distributions/*build.yaml`. - -``` -llama stack build --config llama_stack/distributions/starter/build.yaml -``` - - - -Llama Stack supports external providers that live outside of the main codebase. This allows you to create and maintain your own providers independently or use community-provided providers. - -To build a distribution with external providers, you need to: - -1. Configure the `external_providers_dir` in your build configuration file: +Example `build.yaml` excerpt for a custom Ollama provider: ```yaml -# Example my-external-stack.yaml with external providers -version: '2' distribution_spec: - description: Custom distro for CI tests providers: inference: - - remote::custom_ollama -# Add more providers as needed -image_type: container -image_name: ci-test -# Path to external provider implementations -external_providers_dir: ~/.llama/providers.d + - remote::custom_ollama +external_providers_dir: /workspace/providers.d ``` -Here's an example for a custom Ollama provider: +Inside `providers.d/custom_ollama/provider.py`, define `get_provider_spec()` so the CLI can discover dependencies: + +```python +from llama_stack.providers.datatypes import ProviderSpec + + +def get_provider_spec() -> ProviderSpec: + return ProviderSpec( + provider_type="remote::custom_ollama", + module="llama_stack_ollama_provider", + config_class="llama_stack_ollama_provider.config.OllamaImplConfig", + pip_packages=[ + "ollama", + "aiohttp", + "llama-stack-provider-ollama", + ], + ) +``` + +Equivalent YAML snippet: ```yaml adapter: adapter_type: custom_ollama pip_packages: - - ollama - - aiohttp - - llama-stack-provider-ollama # This is the provider package + - ollama + - aiohttp + - llama-stack-provider-ollama config_class: llama_stack_ollama_provider.config.OllamaImplConfig module: llama_stack_ollama_provider api_dependencies: [] optional_api_dependencies: [] ``` -The `pip_packages` section lists the Python packages required by the provider, as well as the -provider package itself. The package must be available on PyPI or can be provided from a local -directory or a git repository (git must be installed on the build environment). +`llama stack list-deps` installs the declared packages automatically during the build. To keep the providers accessible at runtime, ensure they are copied into `/workspace/providers.d` (or the path you chose) before the Containerfile runs. -2. Build your distribution using the config file: - -``` -llama stack build --config my-external-stack.yaml -``` - -For more information on external providers, including directory structure, provider types, and implementation requirements, see the [External Providers documentation](../providers/external/). - - - -:::tip Podman Alternative -Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman. -::: - -To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type. - -``` -llama stack build --distro starter --image-type container -``` - -``` -$ llama stack build --distro starter --image-type container -... -Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim -... -``` - -You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml` -``` - -Now set some environment variables for the inference model ID and Llama Stack Port and create a local directory to mount into the container's file system. - -```bash -export INFERENCE_MODEL="llama3.2:3b" -export LLAMA_STACK_PORT=8321 -mkdir -p ~/.llama -``` - -After this step is successful, you should be able to find the built container image and test it with the below Docker command: - -``` -docker run -d \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - -e INFERENCE_MODEL=$INFERENCE_MODEL \ - -e OLLAMA_URL=http://host.docker.internal:11434 \ - localhost/distribution-ollama:dev \ - --port $LLAMA_STACK_PORT -``` - -Here are the docker flags and their uses: - -* `-d`: Runs the container in the detached mode as a background process - -* `-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT`: Maps the container port to the host port for accessing the server - -* `-v ~/.llama:/root/.llama`: Mounts the local .llama directory to persist configurations and data - -* `localhost/distribution-ollama:dev`: The name and tag of the container image to run - -* `-e INFERENCE_MODEL=$INFERENCE_MODEL`: Sets the INFERENCE_MODEL environment variable in the container - -* `-e OLLAMA_URL=http://host.docker.internal:11434`: Sets the OLLAMA_URL environment variable in the container - -* `--port $LLAMA_STACK_PORT`: Port number for the server to listen on +For deeper guidance, see the [External Providers documentation](../providers/external/). +### Run your stack server -### Running your Stack server -Now, let's start the Llama Stack Distribution Server. You will need the YAML configuration file which was written out at the end by the `llama stack build` step. +After building the image, launch it directly with Docker or Podman—the entrypoint calls `llama stack run` using the baked distribution or the bundled run config: -``` -llama stack run -h -usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] - [--image-type {venv}] [--enable-ui] - [config | distro] - -Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution. - -positional arguments: - config | distro Path to config file to use for the run or name of known distro (`llama stack list` for a list). (default: None) - -options: - -h, --help show this help message and exit - --port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321) - --image-name IMAGE_NAME - [DEPRECATED] This flag is no longer supported. Please activate your virtual environment before running. (default: None) - --image-type {venv} - [DEPRECATED] This flag is no longer supported. Please activate your virtual environment before running. (default: None) - --enable-ui Start the UI server (default: False) +```bash +docker run -d \ + -p 8321:8321 \ + -e OPENAI_API_KEY=... \ + llama-stack:starter ``` -**Note:** Container images built with `llama stack build --image-type container` cannot be run using `llama stack run`. Instead, they must be run directly using Docker or Podman commands as shown in the container building section above. +To run a different distribution at runtime, pass it as an extra argument: -``` -# Start using template name -llama stack run tgi - -# Start using config file -llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml +```bash +docker run llama-stack:starter meta-reference-gpu ``` -``` -$ llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml +If you prepared a custom run config, mount it into the container and reference it explicitly: -Serving API inspect - GET /health - GET /providers/list - GET /routes/list -Serving API inference - POST /inference/chat_completion - POST /inference/completion - POST /inference/embeddings -... -Serving API agents - POST /agents/create - POST /agents/session/create - POST /agents/turn/create - POST /agents/delete - POST /agents/session/delete - POST /agents/session/get - POST /agents/step/get - POST /agents/turn/get - -Listening on ['::', '0.0.0.0']:8321 -INFO: Started server process [2935911] -INFO: Waiting for application startup. -INFO: Application startup complete. -INFO: Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit) -INFO: 2401:db00:35c:2d2b:face:0:c9:0:54678 - "GET /models/list HTTP/1.1" 200 OK +```bash +docker run \ + -p 8321:8321 \ + -v $(pwd)/run.yaml:/app/run.yaml \ + llama-stack:starter \ + /app/run.yaml ``` -### Listing Distributions -Using the list command, you can view all existing Llama Stack distributions, including stacks built from templates, from scratch, or using custom configuration files. +### Listing distributions -``` -llama stack list -h -usage: llama stack list [-h] - -list the build stacks - -options: - -h, --help show this help message and exit -``` - -Example Usage - -``` -llama stack list -``` - -``` -------------------------------+-----------------------------------------------------------------+--------------+------------+ -| Stack Name | Path | Build Config | Run Config | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| together | ~/.llama/distributions/together | Yes | No | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| bedrock | ~/.llama/distributions/bedrock | Yes | No | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| starter | ~/.llama/distributions/starter | Yes | Yes | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -| remote-vllm | ~/.llama/distributions/remote-vllm | Yes | Yes | -+------------------------------+-----------------------------------------------------------------------------+--------------+ -``` - -### Removing a Distribution -Use the remove command to delete a distribution you've previously built. - -``` -llama stack rm -h -usage: llama stack rm [-h] [--all] [name] - -Remove the build stack - -positional arguments: - name Name of the stack to delete (default: None) - -options: - -h, --help show this help message and exit - --all, -a Delete all stacks (use with caution) (default: False) -``` - -Example -``` -llama stack rm llamastack-test -``` - -To keep your environment organized and avoid clutter, consider using `llama stack list` to review old or unused distributions and `llama stack rm ` to delete them when they're no longer needed. - -### Troubleshooting - -If you encounter any issues, ask questions in our discord or search through our [GitHub Issues](https://github.com/meta-llama/llama-stack/issues), or file an new issue. +The repository is the source of truth for available stacks. Use your editor or `ls llama_stack/distributions` to inspect what ships with the project. The CLI command `llama stack list` still enumerates installed stacks on your machine, but when authoring new ones you should work directly with the files in `llama_stack/distributions`. diff --git a/docs/docs/distributions/starting_llama_stack_server.mdx b/docs/docs/distributions/starting_llama_stack_server.mdx index 0260692b3..20bcfa1e4 100644 --- a/docs/docs/distributions/starting_llama_stack_server.mdx +++ b/docs/docs/distributions/starting_llama_stack_server.mdx @@ -23,6 +23,17 @@ Another simple way to start interacting with Llama Stack is to just spin up a co If you have built a container image and want to deploy it in a Kubernetes cluster instead of starting the Llama Stack server locally. See [Kubernetes Deployment Guide](../deploying/kubernetes_deployment) for more details. +## Configure logging + +Control log output via environment variables before starting the server. + +- `LLAMA_STACK_LOGGING` sets per-component levels, e.g. `LLAMA_STACK_LOGGING=server=debug;core=info`. +- Supported categories: `all`, `core`, `server`, `router`, `inference`, `agents`, `safety`, `eval`, `tools`, `client`. +- Levels: `debug`, `info`, `warning`, `error`, `critical` (default is `info`). Use `all=` to apply globally. +- `LLAMA_STACK_LOG_FILE=/path/to/log` mirrors logs to a file while still printing to stdout. + +Export these variables prior to running `llama stack run`, launching a container, or starting the server through any other pathway. + ```{toctree} :maxdepth: 1 :hidden: