Merge branch 'main' into sambanova-inferene

This commit is contained in:
Ashwin Bharambe 2025-01-23 12:17:19 -08:00 committed by GitHub
commit b442951252
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
313 changed files with 19402 additions and 20445 deletions

138
.github/workflows/publish-to-docker.yml vendored Normal file
View file

@ -0,0 +1,138 @@
name: Docker Build and Publish
on:
workflow_dispatch:
inputs:
version:
description: 'TestPyPI or PyPI version to build (e.g., 0.0.63.dev20250114)'
required: true
type: string
jobs:
build-and-push:
runs-on: ubuntu-latest
env:
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
TAVILY_SEARCH_API_KEY: ${{ secrets.TAVILY_SEARCH_API_KEY }}
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set version
id: version
run: |
if [ "${{ github.event_name }}" = "push" ]; then
echo "VERSION=0.0.63.dev51206766" >> $GITHUB_OUTPUT
else
echo "VERSION=${{ inputs.version }}" >> $GITHUB_OUTPUT
fi
- name: Check package version availability
run: |
# Function to check if version exists in a repository
check_version() {
local repo=$1
local status_code=$(curl -s -o /dev/null -w "%{http_code}" "https://$repo.org/project/llama-stack/${{ steps.version.outputs.version }}")
return $([ "$status_code" -eq 200 ])
}
# Check TestPyPI first, then PyPI
if check_version "test.pypi"; then
echo "Version ${{ steps.version.outputs.version }} found in TestPyPI"
echo "PYPI_SOURCE=testpypi" >> $GITHUB_ENV
elif check_version "pypi"; then
echo "Version ${{ steps.version.outputs.version }} found in PyPI"
echo "PYPI_SOURCE=pypi" >> $GITHUB_ENV
else
echo "Error: Version ${{ steps.version.outputs.version }} not found in either TestPyPI or PyPI"
exit 1
fi
- name: Install llama-stack
run: |
if [ "${{ github.event_name }}" = "push" ]; then
pip install -e .
else
if [ "$PYPI_SOURCE" = "testpypi" ]; then
pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple llama-stack==${{ steps.version.outputs.version }}
else
pip install llama-stack==${{ steps.version.outputs.version }}
fi
fi
- name: Build docker image
run: |
TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu")
for template in "${TEMPLATES[@]}"; do
if [ "$PYPI_SOURCE" = "testpypi" ]; then
TEST_PYPI_VERSION=${{ steps.version.outputs.version }} llama stack build --template $template --image-type container
else
PYPI_VERSION=${{ steps.version.outputs.version }} llama stack build --template $template --image-type container
fi
done
- name: List docker images
run: |
docker images
# TODO (xiyan): make the following 2 steps into a matrix and test all templates other than fireworks
- name: Start up built docker image
run: |
cd distributions/fireworks
if [ "$PYPI_SOURCE" = "testpypi" ]; then
sed -i 's|image: llamastack/distribution-fireworks|image: distribution-fireworks:test-${{ steps.version.outputs.version }}|' ./compose.yaml
else
sed -i 's|image: llamastack/distribution-fireworks|image: distribution-fireworks:${{ steps.version.outputs.version }}|' ./compose.yaml
fi
docker compose up -d
cd ..
# Wait for the container to start
timeout=300
while ! curl -s -f http://localhost:8321/v1/version > /dev/null && [ $timeout -gt 0 ]; do
echo "Waiting for endpoint to be available..."
sleep 5
timeout=$((timeout - 5))
done
if [ $timeout -le 0 ]; then
echo "Timeout waiting for endpoint to become available"
exit 1
fi
- name: Run simple models list test on docker server
run: |
curl http://localhost:8321/v1/models
# TODO (xiyan): figure out why client cannot find server but curl works
# - name: Run pytest on docker server
# run: |
# pip install pytest pytest-md-report
# export LLAMA_STACK_BASE_URL="http://localhost:8321"
# LLAMA_STACK_BASE_URL="http://localhost:8321" pytest -v tests/client-sdk/inference/test_inference.py --md-report --md-report-verbose=1
- name: Push to dockerhub
run: |
TEMPLATES=("ollama" "bedrock" "remote-vllm" "fireworks" "together" "tgi" "meta-reference-gpu")
for template in "${TEMPLATES[@]}"; do
if [ "$PYPI_SOURCE" = "testpypi" ]; then
docker tag distribution-$template:test-${{ steps.version.outputs.version }} llamastack/distribution-$template:test-${{ steps.version.outputs.version }}
docker push llamastack/distribution-$template:test-${{ steps.version.outputs.version }}
else
docker tag distribution-$template:${{ steps.version.outputs.version }} llamastack/distribution-$template:${{ steps.version.outputs.version }}
docker push llamastack/distribution-$template:${{ steps.version.outputs.version }}
fi
done

View file

@ -199,7 +199,13 @@ jobs:
- publish-to-testpypi
- trigger-client-and-models-build
runs-on: ubuntu-latest
env:
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
TAVILY_SEARCH_API_KEY: ${{ secrets.TAVILY_SEARCH_API_KEY }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- name: Install the package
run: |
max_attempts=6
@ -228,5 +234,11 @@ jobs:
llama stack list-apis
llama stack list-providers inference
llama stack list-providers telemetry
- name: Test Notebook
run: |
pip install pytest nbval
llama stack build --template together --image-type venv
pytest -v -s --nbval-lax ./docs/getting_started.ipynb
pytest -v -s --nbval-lax ./docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
# TODO: add trigger for integration test workflow & docker builds

View file

@ -4,9 +4,15 @@
[![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack)](https://pypi.org/project/llama-stack/)
[![Discord](https://img.shields.io/discord/1257833999603335178)](https://discord.gg/llama-stack)
[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Zero-to-Hero Guide**](https://github.com/meta-llama/llama-stack/tree/main/docs/zero_to_hero_guide)
[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb)
Llama Stack defines and standardizes the set of core building blocks needed to bring generative AI applications to market. These building blocks are presented in the form of interoperable APIs with a broad set of Service Providers providing their implementations.
Llama Stack defines and standardizes the core building blocks that simplify AI application development. It codified best practices across the Llama ecosystem. More specifically, it provides
- **Unified API layer** for Inference, RAG, Agents, Tools, Safety, Evals, and Telemetry.
- **Plugin architecture** to support the rich ecosystem of implementations of the different APIs in different environments like local development, on-premises, cloud, and mobile.
- **Prepackaged verified distributions** which offer a one-stop solution for developers to get started quickly and reliably in any environment
- **Multiple developer interfaces** like CLI and SDKs for Python, Node, iOS, and Android
- **Standalone applications** as examples for how to build production-grade AI applications with Llama Stack
<div style="text-align: center;">
<img
@ -17,66 +23,16 @@ Llama Stack defines and standardizes the set of core building blocks needed to b
/>
</div>
Our goal is to provide pre-packaged implementations which can be operated in a variety of deployment environments: developers start iterating with Desktops or their mobile devices and can seamlessly transition to on-prem or public cloud deployments. At every point in this transition, the same set of APIs and the same developer experience is available.
### Llama Stack Benefits
- **Flexible Options**: Developers can choose their preferred infrastructure without changing APIs and enjoy flexible deployment choice.
- **Consistent Experience**: With its unified APIs Llama Stack makes it easier to build, test, and deploy AI applications with consistent application behavior.
- **Robust Ecosystem**: Llama Stack is already integrated with distribution partners (cloud providers, hardware vendors, and AI-focused companies) that offer tailored infrastructure, software, and services for deploying Llama models.
> ⚠️ **Note**
> The Stack APIs are rapidly improving, but still very much work in progress and we invite feedback as well as direct contributions.
By reducing friction and complexity, Llama Stack empowers developers to focus on what they do best: building transformative generative AI applications.
## APIs
We have working implementations of the following APIs today:
- Inference
- Safety
- Memory
- Agents
- Eval
- Telemetry
Alongside these APIs, we also related APIs for operating with associated resources (see [Concepts](https://llama-stack.readthedocs.io/en/latest/concepts/index.html#resources)):
- Models
- Shields
- Memory Banks
- Eval Tasks
- Datasets
- Scoring Functions
We are also working on the following APIs which will be released soon:
- Post Training
- Synthetic Data Generation
- Reward Scoring
Each of the APIs themselves is a collection of REST endpoints.
## Philosophy
### Service-oriented design
Unlike other frameworks, Llama Stack is built with a service-oriented, REST API-first approach. Such a design not only allows for seamless transitions from a local to remote deployments, but also forces the design to be more declarative. We believe this restriction can result in a much simpler, robust developer experience. This will necessarily trade-off against expressivity however if we get the APIs right, it can lead to a very powerful platform.
### Composability
We expect the set of APIs we design to be composable. An Agent abstractly depends on { Inference, Memory, Safety } APIs but does not care about the actual implementation details. Safety itself may require model inference and hence can depend on the Inference API.
### Turnkey one-stop solutions
We expect to provide turnkey solutions for popular deployment scenarios. It should be easy to deploy a Llama Stack server on AWS or on a private data center. Either of these should allow a developer to get started with powerful agentic apps, model evaluations or fine-tuning services in a matter of minutes. They should all result in the same uniform observability and developer experience.
### Focus on Llama models
As a Meta initiated project, we have started by explicitly focusing on Meta's Llama series of models. Supporting the broad set of open models is no easy task and we want to start with models we understand best.
### Supporting the Ecosystem
There is a vibrant ecosystem of Providers which provide efficient inference or scalable vector stores or powerful observability solutions. We want to make sure it is easy for developers to pick and choose the best implementations for their use cases. We also want to make sure it is easy for new Providers to onboard and participate in the ecosystem.
Additionally, we have designed every element of the Stack such that APIs as well as Resources (like Models) can be federated.
## Supported Llama Stack Implementations
### API Providers
Here is a list of the various API providers and available distributions to developers started easily,
| **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** |
|:------------------------------------------------------------------------------------------:|:----------------------:|:------------------:|:------------------:|:------------------:|:------------------:|:------------------:|
| Meta Reference | Single Node | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
@ -87,14 +43,16 @@ Additionally, we have designed every element of the Stack such that APIs as well
| Groq | Hosted | | :heavy_check_mark: | | | |
| Ollama | Single Node | | :heavy_check_mark: | | | |
| TGI | Hosted and Single Node | | :heavy_check_mark: | | | |
| [NVIDIA NIM](https://build.nvidia.com/nim?filters=nimType%3Anim_type_run_anywhere&q=llama) | Hosted and Single Node | | :heavy_check_mark: | | | |
| NVIDIA NIM | Hosted and Single Node | | :heavy_check_mark: | | | |
| Chroma | Single Node | | | :heavy_check_mark: | | |
| PG Vector | Single Node | | | :heavy_check_mark: | | |
| PyTorch ExecuTorch | On-device iOS | :heavy_check_mark: | :heavy_check_mark: | | | |
| [vLLM](https://github.com/vllm-project/vllm) | Hosted and Single Node | | :heavy_check_mark: | | | |
| vLLM | Hosted and Single Node | | :heavy_check_mark: | | | |
### Distributions
A Llama Stack Distribution (or "distro") is a pre-configured bundle of provider implementations for each API component. Distributions make it easy to get started with a specific deployment scenario - you can begin with a local development setup (eg. ollama) and seamlessly transition to production (eg. Fireworks) without changing your application code. Here are some of the distributions we support:
| **Distribution** | **Llama Stack Docker** | Start This Distribution |
|:---------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------:|
| Meta Reference | [llamastack/distribution-meta-reference-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/meta-reference-gpu.html) |
@ -104,9 +62,9 @@ Additionally, we have designed every element of the Stack such that APIs as well
| TGI | [llamastack/distribution-tgi](https://hub.docker.com/repository/docker/llamastack/distribution-tgi/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/tgi.html) |
| Together | [llamastack/distribution-together](https://hub.docker.com/repository/docker/llamastack/distribution-together/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/together.html) |
| Fireworks | [llamastack/distribution-fireworks](https://hub.docker.com/repository/docker/llamastack/distribution-fireworks/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/fireworks.html) |
| [vLLM](https://github.com/vllm-project/vllm) | [llamastack/distribution-remote-vllm](https://hub.docker.com/repository/docker/llamastack/distribution-remote-vllm/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/remote-vllm.html) |
| vLLM | [llamastack/distribution-remote-vllm](https://hub.docker.com/repository/docker/llamastack/distribution-remote-vllm/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/remote-vllm.html) |
## Installation
### Installation
You have two ways to install this repository:
@ -131,7 +89,7 @@ You have two ways to install this repository:
pip install -e .
```
## Documentation
### Documentation
Please checkout our [Documentation](https://llama-stack.readthedocs.io/en/latest/index.html) page for more details.
@ -139,13 +97,13 @@ Please checkout our [Documentation](https://llama-stack.readthedocs.io/en/latest
* Guide using `llama` CLI to work with Llama models (download, study prompts), and building/starting a Llama Stack distribution.
* [Getting Started](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html)
* Quick guide to start a Llama Stack server.
* [Jupyter notebook](./docs/notebooks/Llama_Stack_Building_AI_Applications.ipynb) to walk-through how to use simple text and vision inference llama_stack_client APIs
* [Jupyter notebook](./docs/getting_started.ipynb) to walk-through how to use simple text and vision inference llama_stack_client APIs
* The complete Llama Stack lesson [Colab notebook](https://colab.research.google.com/drive/1dtVmxotBsI4cGZQNsJRYPrLiDeT0Wnwt) of the new [Llama 3.2 course on Deeplearning.ai](https://learn.deeplearning.ai/courses/introducing-multimodal-llama-3-2/lesson/8/llama-stack).
* A [Zero-to-Hero Guide](https://github.com/meta-llama/llama-stack/tree/main/docs/zero_to_hero_guide) that guide you through all the key components of llama stack with code samples.
* [Contributing](CONTRIBUTING.md)
* [Adding a new API Provider](https://llama-stack.readthedocs.io/en/latest/contributing/new_api_provider.html) to walk-through how to add a new API provider.
## Llama Stack Client SDKs
### Llama Stack Client SDKs
| **Language** | **Client SDK** | **Package** |
| :----: | :----: | :----: |

View file

@ -5,7 +5,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-bedrock.yaml
ports:
- "5000:5000"
- "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-bedrock.yaml"
deploy:
restart_policy:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-cerebras.yaml
ports:
- "5000:5000"
- "8321:8321"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-cerebras.yaml"
deploy:
restart_policy:

View file

@ -40,7 +40,7 @@ services:
# Link to TGI run.yaml file
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
# Hack: wait for TGI server to start before starting docker
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
restart_policy:

View file

@ -1,6 +1,6 @@
version: '2'
image_name: local
docker_image: null
container_image: null
conda_env: local
apis:
- shields

View file

@ -13,6 +13,7 @@
"httpx",
"huggingface_hub",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -45,6 +46,7 @@
"fire",
"httpx",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -78,6 +80,7 @@
"fire",
"httpx",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -101,14 +104,17 @@
],
"remote-vllm": [
"aiosqlite",
"autoevals",
"blobfile",
"chardet",
"chromadb-client",
"datasets",
"faiss-cpu",
"fastapi",
"fire",
"httpx",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -142,6 +148,7 @@
"fireworks-ai",
"httpx",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -176,6 +183,7 @@
"httpx",
"huggingface_hub",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -209,6 +217,7 @@
"fire",
"httpx",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -244,6 +253,7 @@
"httpx",
"lm-format-enforcer",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -268,6 +278,38 @@
"sentence-transformers --no-deps",
"torch --index-url https://download.pytorch.org/whl/cpu"
],
"nvidia": [
"aiosqlite",
"autoevals",
"blobfile",
"chardet",
"datasets",
"faiss-cpu",
"fastapi",
"fire",
"httpx",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas",
"pillow",
"psycopg2-binary",
"pypdf",
"redis",
"requests",
"scikit-learn",
"scipy",
"sentencepiece",
"tqdm",
"transformers",
"uvicorn",
"sentence-transformers --no-deps",
"torch --index-url https://download.pytorch.org/whl/cpu"
],
"meta-reference-quantized-gpu": [
"accelerate",
"aiosqlite",
@ -284,6 +326,7 @@
"httpx",
"lm-format-enforcer",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",
@ -311,9 +354,12 @@
],
"cerebras": [
"aiosqlite",
"autoevals",
"blobfile",
"cerebras_cloud_sdk",
"chardet",
"chromadb-client",
"datasets",
"faiss-cpu",
"fastapi",
"fire",
@ -321,6 +367,7 @@
"matplotlib",
"nltk",
"numpy",
"openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas",
@ -386,6 +433,7 @@
"httpx",
"huggingface_hub",
"matplotlib",
"mcp",
"nltk",
"numpy",
"openai",

View file

@ -1,13 +1,11 @@
services:
llamastack:
image: llamastack/distribution-fireworks
network_mode: "host"
volumes:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-fireworks.yaml
ports:
- "5000:5000"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-fireworks.yaml"
- "8321:8321"
environment:
- FIREWORKS_API_KEY=${FIREWORKS_API_KEY}
entrypoint: bash -c "python -m llama_stack.distribution.server.server --template fireworks"
deploy:
restart_policy:
condition: on-failure

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
devices:
- nvidia.com/gpu=all
environment:

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
devices:
- nvidia.com/gpu=all
environment:

View file

@ -1,6 +1,6 @@
version: '2'
image_name: local
docker_image: null
container_image: null
conda_env: local
apis:
- shields

View file

@ -0,0 +1 @@
../../llama_stack/templates/nvidia/build.yaml

View file

@ -0,0 +1,19 @@
services:
llamastack:
image: distribution-nvidia:dev
network_mode: "host"
volumes:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-nvidia.yaml
ports:
- "8321:8321"
environment:
- INFERENCE_MODEL=${INFERENCE_MODEL:-Llama3.1-8B-Instruct}
- NVIDIA_API_KEY=${NVIDIA_API_KEY:-}
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml-config /root/llamastack-run-nvidia.yaml"
deploy:
restart_policy:
condition: on-failure
delay: 3s
max_attempts: 5
window: 60s

View file

@ -0,0 +1 @@
../../llama_stack/templates/nvidia/run.yaml

View file

@ -1,13 +1,11 @@
services:
llamastack:
image: llamastack/distribution-together
network_mode: "host"
volumes:
- ~/.llama:/root/.llama
- ./run.yaml:/root/llamastack-run-together.yaml
ports:
- "5000:5000"
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-together.yaml"
- "8321:8321"
environment:
- TOGETHER_API_KEY=${TOGETHER_API_KEY}
entrypoint: bash -c "python -m llama_stack.distribution.server.server --template together"
deploy:
restart_policy:
condition: on-failure

View file

@ -6,7 +6,7 @@ services:
- ~/.llama:/root/.llama
- ./run.yaml:/root/my-run.yaml
ports:
- "5000:5000"
- "8321:8321"
devices:
- nvidia.com/gpu=all
environment:

View file

@ -1,6 +1,6 @@
version: '2'
image_name: local
docker_image: null
container_image: null
conda_env: local
apis:
- shields

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -537,7 +537,6 @@ class Generator:
success_type_descriptions = {
item: doc_string.short_description
for item, doc_string in success_type_docstring.items()
if doc_string.short_description
}
else:
# use return type as a single response type
@ -596,6 +595,7 @@ class Generator:
)
responses.update(response_builder.build_response(response_options))
assert len(responses.keys()) > 0, f"No responses found for {op.name}"
if op.event_type is not None:
builder = ContentBuilder(self.schema_builder)
callbacks = {

View file

@ -8,7 +8,6 @@ import collections.abc
import enum
import inspect
import typing
import uuid
from dataclasses import dataclass
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
@ -16,12 +15,7 @@ from llama_stack.apis.version import LLAMA_STACK_API_VERSION
from termcolor import colored
from ..strong_typing.inspection import (
get_signature,
is_type_enum,
is_type_optional,
unwrap_optional_type,
)
from ..strong_typing.inspection import get_signature
def split_prefix(
@ -113,9 +107,6 @@ class EndpointOperation:
def get_route(self) -> str:
if self.route is not None:
assert (
"_" not in self.route
), f"route should not contain underscores: {self.route}"
return "/".join(["", LLAMA_STACK_API_VERSION, self.route.lstrip("/")])
route_parts = ["", LLAMA_STACK_API_VERSION, self.name]
@ -181,10 +172,16 @@ def _get_endpoint_functions(
def _get_defining_class(member_fn: str, derived_cls: type) -> type:
"Find the class in which a member function is first defined in a class inheritance hierarchy."
# This import must be dynamic here
from llama_stack.apis.tools import RAGToolRuntime, ToolRuntime
# iterate in reverse member resolution order to find most specific class first
for cls in reversed(inspect.getmro(derived_cls)):
for name, _ in inspect.getmembers(cls, inspect.isfunction):
if name == member_fn:
# HACK ALERT
if cls == RAGToolRuntime:
return ToolRuntime
return cls
raise ValidationError(
@ -265,42 +262,16 @@ def get_endpoint_operations(
f"parameter '{param_name}' in function '{func_name}' has no type annotation"
)
if is_type_optional(param_type):
inner_type: type = unwrap_optional_type(param_type)
else:
inner_type = param_type
if prefix == "get" and (
inner_type is bool
or inner_type is int
or inner_type is float
or inner_type is str
or inner_type is uuid.UUID
or is_type_enum(inner_type)
):
if parameter.kind == inspect.Parameter.POSITIONAL_ONLY:
if route_params is not None and param_name not in route_params:
raise ValidationError(
f"positional parameter '{param_name}' absent from user-defined route '{route}' for function '{func_name}'"
)
# simple type maps to route path element, e.g. /study/{uuid}/{version}
if prefix in ["get", "delete"]:
if route_params is not None and param_name in route_params:
path_params.append((param_name, param_type))
else:
if route_params is not None and param_name in route_params:
raise ValidationError(
f"query parameter '{param_name}' found in user-defined route '{route}' for function '{func_name}'"
)
# simple type maps to key=value pair in query string
query_params.append((param_name, param_type))
else:
if route_params is not None and param_name in route_params:
raise ValidationError(
f"user-defined route '{route}' for function '{func_name}' has parameter '{param_name}' of composite type: {param_type}"
)
request_params.append((param_name, param_type))
path_params.append((param_name, param_type))
else:
request_params.append((param_name, param_type))
# check if function has explicit return type
if signature.return_annotation is inspect.Signature.empty:
@ -335,19 +306,18 @@ def get_endpoint_operations(
response_type = process_type(return_type)
# set HTTP request method based on type of request and presence of payload
if not request_params:
if prefix in ["delete", "remove"]:
http_method = HTTPMethod.DELETE
else:
elif prefix == "post":
http_method = HTTPMethod.POST
elif prefix == "get":
http_method = HTTPMethod.GET
else:
if prefix == "set":
elif prefix == "set":
http_method = HTTPMethod.PUT
elif prefix == "update":
http_method = HTTPMethod.PATCH
else:
http_method = HTTPMethod.POST
raise ValidationError(f"unknown prefix {prefix}")
result.append(
EndpointOperation(

View file

@ -125,6 +125,7 @@ class JsonSchemaAnyOf(JsonSchemaNode):
@dataclass
class JsonSchemaOneOf(JsonSchemaNode):
oneOf: List["JsonSchemaAny"]
discriminator: Optional[str]
JsonSchemaAny = Union[

View file

@ -342,7 +342,6 @@ def is_type_union(typ: object) -> bool:
"True if the type annotation corresponds to a union type (e.g. `Union[T1,T2,T3]`)."
typ = unwrap_annotated_type(typ)
if _is_union_like(typ):
args = typing.get_args(typ)
return len(args) > 2 or type(None) not in args

View file

@ -36,6 +36,7 @@ from typing import (
)
import jsonschema
from typing_extensions import Annotated
from . import docstring
from .auxiliary import (
@ -329,7 +330,6 @@ class JsonSchemaGenerator:
if metadata is not None:
# type is Annotated[T, ...]
typ = typing.get_args(data_type)[0]
schema = self._simple_type_to_schema(typ)
if schema is not None:
# recognize well-known auxiliary types
@ -446,12 +446,20 @@ class JsonSchemaGenerator:
],
}
elif origin_type is Union:
return {
discriminator = None
if typing.get_origin(data_type) is Annotated:
discriminator = typing.get_args(data_type)[1].discriminator
ret = {
"oneOf": [
self.type_to_schema(union_type)
for union_type in typing.get_args(typ)
]
}
if discriminator:
ret["discriminator"] = {
"propertyName": discriminator,
}
return ret
elif origin_type is Literal:
(literal_value,) = typing.get_args(typ) # unpack value of literal type
schema = self.type_to_schema(type(literal_value))

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -56,9 +56,10 @@ response = client.eval.evaluate_rows(
"type": "model",
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"sampling_params": {
"temperature": 0.0,
"strategy": {
"type": "greedy",
},
"max_tokens": 4096,
"top_p": 0.9,
"repeat_penalty": 1.0,
},
"system_message": system_message
@ -113,9 +114,10 @@ response = client.eval.evaluate_rows(
"type": "model",
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"sampling_params": {
"temperature": 0.0,
"strategy": {
"type": "greedy",
},
"max_tokens": 4096,
"top_p": 0.9,
"repeat_penalty": 1.0,
},
}
@ -134,9 +136,9 @@ agent_config = {
"model": "meta-llama/Llama-3.1-405B-Instruct",
"instructions": "You are a helpful assistant",
"sampling_params": {
"strategy": "greedy",
"temperature": 0.0,
"top_p": 0.95,
"strategy": {
"type": "greedy",
},
},
"tools": [
{

View file

@ -0,0 +1,133 @@
# Agent Execution Loop
Agents are the heart of complex AI applications. They combine inference, memory, safety, and tool usage into coherent workflows. At its core, an agent follows a sophisticated execution loop that enables multi-step reasoning, tool usage, and safety checks.
Each agent turn follows these key steps:
1. **Initial Safety Check**: The user's input is first screened through configured safety shields
2. **Context Retrieval**:
- If RAG is enabled, the agent queries relevant documents from memory banks
- For new documents, they are first inserted into the memory bank
- Retrieved context is augmented to the user's prompt
3. **Inference Loop**: The agent enters its main execution loop:
- The LLM receives the augmented prompt (with context and/or previous tool outputs)
- The LLM generates a response, potentially with tool calls
- If tool calls are present:
- Tool inputs are safety-checked
- Tools are executed (e.g., web search, code execution)
- Tool responses are fed back to the LLM for synthesis
- The loop continues until:
- The LLM provides a final response without tool calls
- Maximum iterations are reached
- Token limit is exceeded
4. **Final Safety Check**: The agent's final response is screened through safety shields
```{mermaid}
sequenceDiagram
participant U as User
participant E as Executor
participant M as Memory Bank
participant L as LLM
participant T as Tools
participant S as Safety Shield
Note over U,S: Agent Turn Start
U->>S: 1. Submit Prompt
activate S
S->>E: Input Safety Check
deactivate S
E->>M: 2.1 Query Context
M-->>E: 2.2 Retrieved Documents
loop Inference Loop
E->>L: 3.1 Augment with Context
L-->>E: 3.2 Response (with/without tool calls)
alt Has Tool Calls
E->>S: Check Tool Input
S->>T: 4.1 Execute Tool
T-->>E: 4.2 Tool Response
E->>L: 5.1 Tool Response
L-->>E: 5.2 Synthesized Response
end
opt Stop Conditions
Note over E: Break if:
Note over E: - No tool calls
Note over E: - Max iterations reached
Note over E: - Token limit exceeded
end
end
E->>S: Output Safety Check
S->>U: 6. Final Response
```
Each step in this process can be monitored and controlled through configurations. Here's an example that demonstrates monitoring the agent's execution:
```python
from llama_stack_client.lib.agents.event_logger import EventLogger
agent_config = AgentConfig(
model="Llama3.2-3B-Instruct",
instructions="You are a helpful assistant",
# Enable both RAG and tool usage
tools=[
{
"type": "memory",
"memory_bank_configs": [{
"type": "vector",
"bank_id": "my_docs"
}],
"max_tokens_in_context": 4096
},
{
"type": "code_interpreter",
"enable_inline_code_execution": True
}
],
# Configure safety
input_shields=["content_safety"],
output_shields=["content_safety"],
# Control the inference loop
max_infer_iters=5,
sampling_params={
"strategy": {
"type": "top_p",
"temperature": 0.7,
"top_p": 0.95
},
"max_tokens": 2048
}
)
agent = Agent(client, agent_config)
session_id = agent.create_session("monitored_session")
# Stream the agent's execution steps
response = agent.create_turn(
messages=[{"role": "user", "content": "Analyze this code and run it"}],
attachments=[{
"content": "https://raw.githubusercontent.com/example/code.py",
"mime_type": "text/plain"
}],
session_id=session_id
)
# Monitor each step of execution
for log in EventLogger().log(response):
if log.event.step_type == "memory_retrieval":
print("Retrieved context:", log.event.retrieved_context)
elif log.event.step_type == "inference":
print("LLM output:", log.event.model_response)
elif log.event.step_type == "tool_execution":
print("Tool call:", log.event.tool_call)
print("Tool response:", log.event.tool_response)
elif log.event.step_type == "shield_call":
if log.event.violation:
print("Safety violation:", log.event.violation)
```

View file

@ -0,0 +1,36 @@
## Testing & Evaluation
Llama Stack provides built-in tools for evaluating your applications:
1. **Benchmarking**: Test against standard datasets
2. **Application Evaluation**: Score your application's outputs
3. **Custom Metrics**: Define your own evaluation criteria
Here's how to set up basic evaluation:
```python
# Create an evaluation task
response = client.eval_tasks.register(
eval_task_id="my_eval",
dataset_id="my_dataset",
scoring_functions=["accuracy", "relevance"]
)
# Run evaluation
job = client.eval.run_eval(
task_id="my_eval",
task_config={
"type": "app",
"eval_candidate": {
"type": "agent",
"config": agent_config
}
}
)
# Get results
result = client.eval.job_result(
task_id="my_eval",
job_id=job.job_id
)
```

View file

@ -1,421 +1,26 @@
# Building AI Applications
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1F2ksmkoGQPa4pzRjMOE6BXWeOxWFIW6n?usp=sharing)
Llama Stack provides all the building blocks needed to create sophisticated AI applications.
Llama Stack provides all the building blocks needed to create sophisticated AI applications. This guide will walk you through how to use these components effectively. Check out our Colab notebook on to follow along working examples on how you can build LLM-powered agentic applications using Llama Stack.
The best way to get started is to look at this notebook which walks through the various APIs (from basic inference, to RAG agents) and how to use them.
## Basic Inference
**Notebook**: [Building AI Applications](docs/notebooks/Llama_Stack_Building_AI_Applications.ipynb)
The foundation of any AI application is the ability to interact with LLM models. Llama Stack provides a simple interface for both completion and chat-based inference:
## Agentic Concepts
- **[Agent Execution Loop](agent_execution_loop)**
- **[RAG](rag)**
- **[Safety](safety)**
- **[Tools](tools)**
- **[Telemetry](telemetry)**
```python
from llama_stack_client import LlamaStackClient
client = LlamaStackClient(base_url="http://localhost:5001")
# List available models
models = client.models.list()
# Simple chat completion
response = client.inference.chat_completion(
model_id="Llama3.2-3B-Instruct",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Write a haiku about coding"}
]
)
print(response.completion_message.content)
```
## Adding Memory & RAG
Memory enables your applications to reference and recall information from previous interactions or external documents. Llama Stack's memory system is built around the concept of Memory Banks:
1. **Vector Memory Banks**: For semantic search and retrieval
2. **Key-Value Memory Banks**: For structured data storage
3. **Keyword Memory Banks**: For basic text search
4. **Graph Memory Banks**: For relationship-based retrieval
Here's how to set up a vector memory bank for RAG:
```python
# Register a memory bank
bank_id = "my_documents"
response = client.memory_banks.register(
memory_bank_id=bank_id,
params={
"memory_bank_type": "vector",
"embedding_model": "all-MiniLM-L6-v2",
"chunk_size_in_tokens": 512
}
)
# Insert documents
documents = [
{
"document_id": "doc1",
"content": "Your document text here",
"mime_type": "text/plain"
}
]
client.memory.insert(bank_id, documents)
# Query documents
results = client.memory.query(
bank_id=bank_id,
query="What do you know about...",
)
```
## Implementing Safety Guardrails
Safety is a critical component of any AI application. Llama Stack provides a Shield system that can be applied at multiple touchpoints:
```python
# Register a safety shield
shield_id = "content_safety"
client.shields.register(
shield_id=shield_id,
provider_shield_id="llama-guard-basic"
)
# Run content through shield
response = client.safety.run_shield(
shield_id=shield_id,
messages=[{"role": "user", "content": "User message here"}]
)
if response.violation:
print(f"Safety violation detected: {response.violation.user_message}")
```
## Building Agents
Agents are the heart of complex AI applications. They combine inference, memory, safety, and tool usage into coherent workflows. At its core, an agent follows a sophisticated execution loop that enables multi-step reasoning, tool usage, and safety checks.
### The Agent Execution Loop
Each agent turn follows these key steps:
1. **Initial Safety Check**: The user's input is first screened through configured safety shields
2. **Context Retrieval**:
- If RAG is enabled, the agent queries relevant documents from memory banks
- For new documents, they are first inserted into the memory bank
- Retrieved context is augmented to the user's prompt
3. **Inference Loop**: The agent enters its main execution loop:
- The LLM receives the augmented prompt (with context and/or previous tool outputs)
- The LLM generates a response, potentially with tool calls
- If tool calls are present:
- Tool inputs are safety-checked
- Tools are executed (e.g., web search, code execution)
- Tool responses are fed back to the LLM for synthesis
- The loop continues until:
- The LLM provides a final response without tool calls
- Maximum iterations are reached
- Token limit is exceeded
4. **Final Safety Check**: The agent's final response is screened through safety shields
```{mermaid}
sequenceDiagram
participant U as User
participant E as Executor
participant M as Memory Bank
participant L as LLM
participant T as Tools
participant S as Safety Shield
Note over U,S: Agent Turn Start
U->>S: 1. Submit Prompt
activate S
S->>E: Input Safety Check
deactivate S
E->>M: 2.1 Query Context
M-->>E: 2.2 Retrieved Documents
loop Inference Loop
E->>L: 3.1 Augment with Context
L-->>E: 3.2 Response (with/without tool calls)
alt Has Tool Calls
E->>S: Check Tool Input
S->>T: 4.1 Execute Tool
T-->>E: 4.2 Tool Response
E->>L: 5.1 Tool Response
L-->>E: 5.2 Synthesized Response
end
opt Stop Conditions
Note over E: Break if:
Note over E: - No tool calls
Note over E: - Max iterations reached
Note over E: - Token limit exceeded
end
end
E->>S: Output Safety Check
S->>U: 6. Final Response
```
Each step in this process can be monitored and controlled through configurations. Here's an example that demonstrates monitoring the agent's execution:
```python
from llama_stack_client.lib.agents.event_logger import EventLogger
agent_config = AgentConfig(
model="Llama3.2-3B-Instruct",
instructions="You are a helpful assistant",
# Enable both RAG and tool usage
tools=[
{
"type": "memory",
"memory_bank_configs": [{
"type": "vector",
"bank_id": "my_docs"
}],
"max_tokens_in_context": 4096
},
{
"type": "code_interpreter",
"enable_inline_code_execution": True
}
],
# Configure safety
input_shields=["content_safety"],
output_shields=["content_safety"],
# Control the inference loop
max_infer_iters=5,
sampling_params={
"temperature": 0.7,
"max_tokens": 2048
}
)
agent = Agent(client, agent_config)
session_id = agent.create_session("monitored_session")
# Stream the agent's execution steps
response = agent.create_turn(
messages=[{"role": "user", "content": "Analyze this code and run it"}],
attachments=[{
"content": "https://raw.githubusercontent.com/example/code.py",
"mime_type": "text/plain"
}],
session_id=session_id
)
# Monitor each step of execution
for log in EventLogger().log(response):
if log.event.step_type == "memory_retrieval":
print("Retrieved context:", log.event.retrieved_context)
elif log.event.step_type == "inference":
print("LLM output:", log.event.model_response)
elif log.event.step_type == "tool_execution":
print("Tool call:", log.event.tool_call)
print("Tool response:", log.event.tool_response)
elif log.event.step_type == "shield_call":
if log.event.violation:
print("Safety violation:", log.event.violation)
```
This example shows how an agent can: Llama Stack provides a high-level agent framework:
```python
from llama_stack_client.lib.agents.agent import Agent
from llama_stack_client.types.agent_create_params import AgentConfig
# Configure an agent
agent_config = AgentConfig(
model="Llama3.2-3B-Instruct",
instructions="You are a helpful assistant",
tools=[
{
"type": "memory",
"memory_bank_configs": [],
"query_generator_config": {
"type": "default",
"sep": " "
}
}
],
input_shields=["content_safety"],
output_shields=["content_safety"],
enable_session_persistence=True
)
# Create an agent
agent = Agent(client, agent_config)
session_id = agent.create_session("my_session")
# Run agent turns
response = agent.create_turn(
messages=[{"role": "user", "content": "Your question here"}],
session_id=session_id
)
```
### Adding Tools to Agents
Agents can be enhanced with various tools:
1. **Search**: Web search capabilities through providers like Brave
2. **Code Interpreter**: Execute code snippets
3. **RAG**: Memory and document retrieval
4. **Function Calling**: Custom function execution
5. **WolframAlpha**: Mathematical computations
6. **Photogen**: Image generation
Example of configuring an agent with tools:
```python
agent_config = AgentConfig(
model="Llama3.2-3B-Instruct",
tools=[
{
"type": "brave_search",
"api_key": "YOUR_API_KEY",
"engine": "brave"
},
{
"type": "code_interpreter",
"enable_inline_code_execution": True
}
],
tool_choice="auto",
tool_prompt_format="json"
)
```
## Building RAG-Enhanced Agents
One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example:
```python
from llama_stack_client.types import Attachment
# Create attachments from documents
attachments = [
Attachment(
content="https://raw.githubusercontent.com/example/doc.rst",
mime_type="text/plain"
)
]
# Configure agent with memory
agent_config = AgentConfig(
model="Llama3.2-3B-Instruct",
instructions="You are a helpful assistant",
tools=[{
"type": "memory",
"memory_bank_configs": [],
"query_generator_config": {"type": "default", "sep": " "},
"max_tokens_in_context": 4096,
"max_chunks": 10
}],
enable_session_persistence=True
)
agent = Agent(client, agent_config)
session_id = agent.create_session("rag_session")
# Initial document ingestion
response = agent.create_turn(
messages=[{
"role": "user",
"content": "I am providing some documents for reference."
}],
attachments=attachments,
session_id=session_id
)
# Query with RAG
response = agent.create_turn(
messages=[{
"role": "user",
"content": "What are the key topics in the documents?"
}],
session_id=session_id
)
```
## Testing & Evaluation
Llama Stack provides built-in tools for evaluating your applications:
1. **Benchmarking**: Test against standard datasets
2. **Application Evaluation**: Score your application's outputs
3. **Custom Metrics**: Define your own evaluation criteria
Here's how to set up basic evaluation:
```python
# Create an evaluation task
response = client.eval_tasks.register(
eval_task_id="my_eval",
dataset_id="my_dataset",
scoring_functions=["accuracy", "relevance"]
)
# Run evaluation
job = client.eval.run_eval(
task_id="my_eval",
task_config={
"type": "app",
"eval_candidate": {
"type": "agent",
"config": agent_config
}
}
)
# Get results
result = client.eval.job_result(
task_id="my_eval",
job_id=job.job_id
)
```
## Debugging & Monitoring
Llama Stack includes comprehensive telemetry for debugging and monitoring your applications:
1. **Tracing**: Track request flows across components
2. **Metrics**: Measure performance and usage
3. **Logging**: Debug issues and track behavior
The telemetry system supports multiple output formats:
- OpenTelemetry for visualization in tools like Jaeger
- SQLite for local storage and querying
- Console output for development
Example of querying traces:
```python
# Query traces for a session
traces = client.telemetry.query_traces(
attribute_filters=[{
"key": "session_id",
"op": "eq",
"value": session_id
}]
)
# Get spans within the root span; indexed by ID
# Use parent_span_id to build a tree out of it
spans_by_id = client.telemetry.get_span_tree(
span_id=traces[0].root_span_id
)
```
For details on how to use the telemetry system to debug your applications, export traces to a dataset, and run evaluations, see the [Telemetry](telemetry) section.
```{toctree}
:hidden:
:maxdepth: 3
:maxdepth: 1
agent_execution_loop
rag
safety
tools
telemetry
```

View file

@ -0,0 +1,92 @@
## Memory & RAG
Memory enables your applications to reference and recall information from previous interactions or external documents. Llama Stack's memory system is built around the concept of Memory Banks:
1. **Vector Memory Banks**: For semantic search and retrieval
2. **Key-Value Memory Banks**: For structured data storage
3. **Keyword Memory Banks**: For basic text search
4. **Graph Memory Banks**: For relationship-based retrieval
Here's how to set up a vector memory bank for RAG:
```python
# Register a memory bank
bank_id = "my_documents"
response = client.memory_banks.register(
memory_bank_id=bank_id,
params={
"memory_bank_type": "vector",
"embedding_model": "all-MiniLM-L6-v2",
"chunk_size_in_tokens": 512
}
)
# Insert documents
documents = [
{
"document_id": "doc1",
"content": "Your document text here",
"mime_type": "text/plain"
}
]
client.memory.insert(bank_id, documents)
# Query documents
results = client.memory.query(
bank_id=bank_id,
query="What do you know about...",
)
```
### Building RAG-Enhanced Agents
One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example:
```python
from llama_stack_client.types import Attachment
# Create attachments from documents
attachments = [
Attachment(
content="https://raw.githubusercontent.com/example/doc.rst",
mime_type="text/plain"
)
]
# Configure agent with memory
agent_config = AgentConfig(
model="Llama3.2-3B-Instruct",
instructions="You are a helpful assistant",
tools=[{
"type": "memory",
"memory_bank_configs": [],
"query_generator_config": {"type": "default", "sep": " "},
"max_tokens_in_context": 4096,
"max_chunks": 10
}],
enable_session_persistence=True
)
agent = Agent(client, agent_config)
session_id = agent.create_session("rag_session")
# Initial document ingestion
response = agent.create_turn(
messages=[{
"role": "user",
"content": "I am providing some documents for reference."
}],
attachments=attachments,
session_id=session_id
)
# Query with RAG
response = agent.create_turn(
messages=[{
"role": "user",
"content": "What are the key topics in the documents?"
}],
session_id=session_id
)
```

View file

@ -0,0 +1,21 @@
## Safety Guardrails
Safety is a critical component of any AI application. Llama Stack provides a Shield system that can be applied at multiple touchpoints:
```python
# Register a safety shield
shield_id = "content_safety"
client.shields.register(
shield_id=shield_id,
provider_shield_id="llama-guard-basic"
)
# Run content through shield
response = client.safety.run_shield(
shield_id=shield_id,
messages=[{"role": "user", "content": "User message here"}]
)
if response.violation:
print(f"Safety violation detected: {response.violation.user_message}")
```

View file

@ -1,8 +1,4 @@
# Telemetry
```{note}
The telemetry system is currently experimental and subject to change. We welcome feedback and contributions to help improve it.
```
The Llama Stack telemetry system provides comprehensive tracing, metrics, and logging capabilities. It supports multiple sink types including OpenTelemetry, SQLite, and Console output.
@ -44,58 +40,6 @@ structured_log_event = SpanStartPayload(
- **SQLite**: Store events in a local SQLite database. This is needed if you want to query the events later through the Llama Stack API.
- **Console**: Print events to the console.
## APIs
The telemetry API is designed to be flexible for different user flows like debugging/visualization in UI, monitoring, and saving traces to datasets.
The telemetry system exposes the following HTTP endpoints:
### Log Event
```http
POST /telemetry/log-event
```
Logs a telemetry event (unstructured log, metric, or structured log) with optional TTL.
### Query Traces
```http
POST /telemetry/query-traces
```
Retrieves traces based on filters with pagination support. Parameters:
- `attribute_filters`: List of conditions to filter traces
- `limit`: Maximum number of traces to return (default: 100)
- `offset`: Number of traces to skip (default: 0)
- `order_by`: List of fields to sort by
### Get Span Tree
```http
POST /telemetry/get-span-tree
```
Retrieves a hierarchical view of spans starting from a specific span. Parameters:
- `span_id`: ID of the root span to retrieve
- `attributes_to_return`: Optional list of specific attributes to include
- `max_depth`: Optional maximum depth of the span tree to return
### Query Spans
```http
POST /telemetry/query-spans
```
Retrieves spans matching specified filters and returns selected attributes. Parameters:
- `attribute_filters`: List of conditions to filter traces
- `attributes_to_return`: List of specific attributes to include in results
- `max_depth`: Optional maximum depth of spans to traverse (default: no limit)
Returns a flattened list of spans with requested attributes.
### Save Spans to Dataset
This is useful for saving traces to a dataset for running evaluations. For example, you can save the input/output of each span that is part of an agent session/turn to a dataset and then run an eval task on it. See example in [Example: Save Spans to Dataset](#example-save-spans-to-dataset).
```http
POST /telemetry/save-spans-to-dataset
```
Queries spans and saves their attributes to a dataset. Parameters:
- `attribute_filters`: List of conditions to filter traces
- `attributes_to_save`: List of span attributes to save to the dataset
- `dataset_id`: ID of the dataset to save to
- `max_depth`: Optional maximum depth of spans to traverse (default: no limit)
## Providers
### Meta-Reference Provider
@ -133,110 +77,4 @@ Once the Jaeger instance is running, you can visualize traces by navigating to h
## Querying Traces Stored in SQLIte
The `sqlite` sink allows you to query traces without an external system. Here are some example queries:
Querying Traces for a agent session
The client SDK is not updated to support the new telemetry API. It will be updated soon. You can manually query traces using the following curl command:
``` bash
curl -X POST 'http://localhost:5000/alpha/telemetry/query-traces' \
-H 'Content-Type: application/json' \
-d '{
"attribute_filters": [
{
"key": "session_id",
"op": "eq",
"value": "dd667b87-ca4b-4d30-9265-5a0de318fc65" }],
"limit": 100,
"offset": 0,
"order_by": ["start_time"]
[
{
"trace_id": "6902f54b83b4b48be18a6f422b13e16f",
"root_span_id": "5f37b85543afc15a",
"start_time": "2024-12-04T08:08:30.501587",
"end_time": "2024-12-04T08:08:36.026463"
},
........
]
}'
```
Querying spans for a specifc root span id
``` bash
curl -X POST 'http://localhost:5000/alpha/telemetry/get-span-tree' \
-H 'Content-Type: application/json' \
-d '{ "span_id" : "6cceb4b48a156913", "max_depth": 2 }'
{
"span_id": "6cceb4b48a156913",
"trace_id": "dafa796f6aaf925f511c04cd7c67fdda",
"parent_span_id": "892a66d726c7f990",
"name": "retrieve_rag_context",
"start_time": "2024-12-04T09:28:21.781995",
"end_time": "2024-12-04T09:28:21.913352",
"attributes": {
"input": [
"{\"role\":\"system\",\"content\":\"You are a helpful assistant\"}",
"{\"role\":\"user\",\"content\":\"What are the top 5 topics that were explained in the documentation? Only list succinct bullet points.\",\"context\":null}"
]
},
"children": [
{
"span_id": "1a2df181854064a8",
"trace_id": "dafa796f6aaf925f511c04cd7c67fdda",
"parent_span_id": "6cceb4b48a156913",
"name": "MemoryRouter.query_documents",
"start_time": "2024-12-04T09:28:21.787620",
"end_time": "2024-12-04T09:28:21.906512",
"attributes": {
"input": null
},
"children": [],
"status": "ok"
}
],
"status": "ok"
}
```
## Example: Save Spans to Dataset
Save all spans for a specific agent session to a dataset.
``` bash
curl -X POST 'http://localhost:5000/alpha/telemetry/save-spans-to-dataset' \
-H 'Content-Type: application/json' \
-d '{
"attribute_filters": [
{
"key": "session_id",
"op": "eq",
"value": "dd667b87-ca4b-4d30-9265-5a0de318fc65"
}
],
"attributes_to_save": ["input", "output"],
"dataset_id": "my_dataset",
"max_depth": 10
}'
```
Save all spans for a specific agent turn to a dataset.
```bash
curl -X POST 'http://localhost:5000/alpha/telemetry/save-spans-to-dataset' \
-H 'Content-Type: application/json' \
-d '{
"attribute_filters": [
{
"key": "turn_id",
"op": "eq",
"value": "123e4567-e89b-12d3-a456-426614174000"
}
],
"attributes_to_save": ["input", "output"],
"dataset_id": "my_dataset",
"max_depth": 10
}'
```
The `sqlite` sink allows you to query traces without an external system. Here are some example queries. Refer to the notebook at [Llama Stack Building AI Applications](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) for more examples on how to query traces and spaces.

View file

@ -0,0 +1,202 @@
# Tools
Tools are functions that can be invoked by an agent to perform tasks. They are organized into tool groups and registered with specific providers. Each tool group represents a collection of related tools from a single provider. They are organized into groups so that state can be externalized: the collection operates on the same state typically.
An example of this would be a "db_access" tool group that contains tools for interacting with a database. "list_tables", "query_table", "insert_row" could be examples of tools in this group.
Tools are treated as any other resource in llama stack like models. You can register them, have providers for them etc.
When instatiating an agent, you can provide it a list of tool groups that it has access to. Agent gets the corresponding tool definitions for the specified tool groups and passes them along to the model.
Refer to the [Building AI Applications](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) notebook for more examples on how to use tools.
## Types of Tool Group providers
There are three types of providers for tool groups that are supported by Llama Stack.
1. Built-in providers
2. Model Context Protocol (MCP) providers
3. Client provided tools
### Built-in providers
Built-in providers come packaged with Llama Stack. These providers provide common functionalities like web search, code interpretation, and computational capabilities.
#### Web Search providers
There are three web search providers that are supported by Llama Stack.
1. Brave Search
2. Bing Search
3. Tavily Search
Example client SDK call to register a "websearch" toolgroup that is provided by brave-search.
```python
# Register Brave Search tool group
client.toolgroups.register(
toolgroup_id="builtin::websearch",
provider_id="brave-search",
args={"max_results": 5}
)
```
The tool requires an API key which can be provided either in the configuration or through the request header `X-LlamaStack-Provider-Data`. The format of the header is `{"<provider_name>_api_key": <your api key>}`.
#### Code Interpreter
The Code Interpreter allows execution of Python code within a controlled environment.
```python
# Register Code Interpreter tool group
client.toolgroups.register(
toolgroup_id="builtin::code_interpreter",
provider_id="code_interpreter"
)
```
Features:
- Secure execution environment using `bwrap` sandboxing
- Matplotlib support for generating plots
- Disabled dangerous system operations
- Configurable execution timeouts
#### WolframAlpha
The WolframAlpha tool provides access to computational knowledge through the WolframAlpha API.
```python
# Register WolframAlpha tool group
client.toolgroups.register(
toolgroup_id="builtin::wolfram_alpha",
provider_id="wolfram-alpha"
)
```
Example usage:
```python
result = client.tool_runtime.invoke_tool(
tool_name="wolfram_alpha",
args={"query": "solve x^2 + 2x + 1 = 0"}
)
```
#### Memory
The Memory tool enables retrieval of context from various types of memory banks (vector, key-value, keyword, and graph).
```python
# Register Memory tool group
client.toolgroups.register(
toolgroup_id="builtin::memory",
provider_id="memory",
args={
"max_chunks": 5,
"max_tokens_in_context": 4096
}
)
```
Features:
- Support for multiple memory bank types
- Configurable query generation
- Context retrieval with token limits
> **Note:** By default, llama stack run.yaml defines toolgroups for web search, code interpreter and memory, that are provided by tavily-search, code-interpreter and memory providers.
## Model Context Protocol (MCP) Tools
MCP tools are special tools that can interact with llama stack over model context protocol. These tools are dynamically discovered from an MCP endpoint and can be used to extend the agent's capabilities.
Refer to https://github.com/modelcontextprotocol/server for available MCP servers.
```python
# Register MCP tools
client.toolgroups.register(
toolgroup_id="builtin::filesystem",
provider_id="model-context-protocol",
mcp_endpoint=URL(uri="http://localhost:8000/sse"),
)
```
MCP tools require:
- A valid MCP endpoint URL
- The endpoint must implement the Model Context Protocol
- Tools are discovered dynamically from the endpoint
## Tools provided by the client
These tools are registered along with the agent config and are specific to the agent for which they are registered. The main difference between these tools and the tools provided by the built-in providers is that the execution of these tools is handled by the client and the agent transfers the tool call to the client and waits for the result from the client.
```python
# Example agent config with client provided tools
config = AgentConfig(
toolgroups=[
"builtin::websearch",
],
client_tools=[
ToolDef(name="client_tool", description="Client provided tool")
]
)
```
Refer to [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/blob/main/examples/agents/e2e_loop_with_custom_tools.py) for an example of how to use client provided tools.
## Tool Structure
Each tool has the following components:
- `name`: Unique identifier for the tool
- `description`: Human-readable description of the tool's functionality
- `parameters`: List of parameters the tool accepts
- `name`: Parameter name
- `parameter_type`: Data type (string, number, etc.)
- `description`: Parameter description
- `required`: Whether the parameter is required (default: true)
- `default`: Default value if any
Example tool definition:
```python
{
"name": "web_search",
"description": "Search the web for information",
"parameters": [
{
"name": "query",
"parameter_type": "string",
"description": "The query to search for",
"required": True
}
]
}
```
## Tool Invocation
Tools can be invoked using the `invoke_tool` method:
```python
result = client.tool_runtime.invoke_tool(
tool_name="web_search",
kwargs={"query": "What is the capital of France?"}
)
```
The result contains:
- `content`: The tool's output
- `error_message`: Optional error message if the tool failed
- `error_code`: Optional error code if the tool failed
## Listing Available Tools
You can list all available tools or filter by tool group:
```python
# List all tools
all_tools = client.tools.list_tools()
# List tools in a specific group
group_tools = client.tools.list_tools(toolgroup_id="search_tools")
```

View file

@ -10,7 +10,6 @@ A Llama Stack API is described as a collection of REST endpoints. We currently s
- **Inference**: run inference with a LLM
- **Safety**: apply safety policies to the output at a Systems (not only model) level
- **Agents**: run multi-step agentic workflows with LLMs with tool usage, memory (RAG), etc.
- **Memory**: store and retrieve data for RAG, chat history, etc.
- **DatasetIO**: interface with datasets and data loaders
- **Scoring**: evaluate outputs of the system
- **Eval**: generate outputs (via Inference or Agents) and perform scoring
@ -39,7 +38,6 @@ Some of these APIs are associated with a set of **Resources**. Here is the mappi
- **Inference**, **Eval** and **Post Training** are associated with `Model` resources.
- **Safety** is associated with `Shield` resources.
- **Memory** is associated with `Memory Bank` resources.
- **DatasetIO** is associated with `Dataset` resources.
- **Scoring** is associated with `ScoringFunction` resources.
- **Eval** is associated with `Model` and `EvalTask` resources.
@ -63,12 +61,9 @@ While there is a lot of flexibility to mix-and-match providers, often users will
**On-device Distro**: Finally, you may want to run Llama Stack directly on an edge device (mobile phone or a tablet.) We provide Distros for iOS and Android (coming soon.)
## More Concepts
- [Evaluation Concepts](evaluation_concepts.md)
```{toctree}
:maxdepth: 1
:hidden:
evaluation_concepts
distributions/index
```

View file

@ -1,26 +1,57 @@
# Adding a New API Provider
This guide contains references to walk you through adding a new API provider.
This guide will walk you through the process of adding a new API provider to Llama Stack.
1. First, decide which API your provider falls into (e.g. Inference, Safety, Agents, Memory).
2. Decide whether your provider is a remote provider, or inline implementation. A remote provider is a provider that makes a remote request to a service. An inline provider is a provider where implementation is executed locally. Checkout the examples, and follow the structure to add your own API provider. Please find the following code pointers:
## Getting Started
- {repopath}`Remote Providers::llama_stack/providers/remote`
- {repopath}`Inline Providers::llama_stack/providers/inline`
1. **Choose Your API Category**
- Determine which API category your provider belongs to (Inference, Safety, Agents, VectorIO)
- Review the core concepts of Llama Stack in the [concepts guide](../concepts/index.md)
3. [Build a Llama Stack distribution](https://llama-stack.readthedocs.io/en/latest/distributions/building_distro.html) with your API provider.
4. Test your code!
2. **Determine Provider Type**
- **Remote Provider**: Makes requests to external services
- **Inline Provider**: Executes implementation locally
## Testing your newly added API providers
Reference existing implementations:
- {repopath}`Remote Providers::llama_stack/providers/remote`
- {repopath}`Inline Providers::llama_stack/providers/inline`
1. Start with an _integration test_ for your provider. That means we will instantiate the real provider, pass it real configuration and if it is a remote service, we will actually hit the remote service. We **strongly** discourage mocking for these tests at the provider level. Llama Stack is first and foremost about integration so we need to make sure stuff works end-to-end. See {repopath}`llama_stack/providers/tests/inference/test_text_inference.py` for an example.
Example PRs:
- [Grok Inference Implementation](https://github.com/meta-llama/llama-stack/pull/609)
- [Nvidia Inference Implementation](https://github.com/meta-llama/llama-stack/pull/355)
- [Model context protocol Tool Runtime](https://github.com/meta-llama/llama-stack/pull/665)
2. In addition, if you want to unit test functionality within your provider, feel free to do so. You can find some tests in `tests/` but they aren't well-supported so far.
3. **Register Your Provider**
- Add your provider to the appropriate {repopath}`Registry::llama_stack/providers/registry/`
- Specify any required pip dependencies
3. Test with a client-server Llama Stack setup. (a) Start a Llama Stack server with your own distribution which includes the new provider. (b) Send a client request to the server. See `llama_stack/apis/<api>/client.py` for how this is done. These client scripts can serve as lightweight tests.
4. **Integration**
- Update the run.yaml file to include your provider
- To make your provider a default option or create a new distribution, look at the teamplates in {repopath}`llama_stack/templates/` and run {repopath}`llama_stack/scripts/distro_codegen.py`
- Example PRs:
- [Adding Model Context Protocol Tool Runtime](https://github.com/meta-llama/llama-stack/pull/816)
You can find more complex client scripts [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repo. Note down which scripts works and do not work with your distribution.
## Testing Guidelines
## Submit your PR
### 1. Integration Testing
- Create integration tests that use real provider instances and configurations
- For remote services, test actual API interactions
- Avoid mocking at the provider level
- Reference examples in {repopath}`tests/client-sdk`
After you have fully tested your newly added API provider, submit a PR with the attached test plan. You must have a Test Plan in the summary section of your PR.
### 2. Unit Testing (Optional)
- Add unit tests for provider-specific functionality
- See examples in {repopath}`llama_stack/providers/tests/inference/test_text_inference.py`
### 3. End-to-End Testing
1. Start a Llama Stack server with your new provider
2. Test using client requests
3. Verify compatibility with existing client scripts in the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repository
4. Document which scripts are compatible with your provider
## Submitting Your PR
1. Ensure all tests pass
2. Include a comprehensive test plan in your PR summary
3. Document any known limitations or considerations
4. Submit your pull request for review

View file

@ -4,7 +4,7 @@
This guide will walk you through the steps to get started with building a Llama Stack distribution from scratch with your choice of API providers.
## Llama Stack Build
### Llama Stack Build
In order to build your own distribution, we recommend you clone the `llama-stack` repository.
@ -13,29 +13,99 @@ In order to build your own distribution, we recommend you clone the `llama-stack
git clone git@github.com:meta-llama/llama-stack.git
cd llama-stack
pip install -e .
llama stack build -h
```
Use the CLI to build your distribution.
The main points to consider are:
1. **Image Type** - Do you want a Conda / venv environment or a Container (eg. Docker)
2. **Template** - Do you want to use a template to build your distribution? or start from scratch ?
3. **Config** - Do you want to use a pre-existing config file to build your distribution?
We will start build our distribution (in the form of a Conda environment, or Docker image). In this step, we will specify:
- `name`: the name for our distribution (e.g. `my-stack`)
- `image_type`: our build image type (`conda | docker`)
- `distribution_spec`: our distribution specs for specifying API providers
- `description`: a short description of the configurations for the distribution
- `providers`: specifies the underlying implementation for serving each API endpoint
- `image_type`: `conda` | `docker` to specify whether to build the distribution in the form of Docker image or Conda environment.
```
llama stack build -h
usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--list-templates | --no-list-templates] [--image-type {conda,container,venv}] [--image-name IMAGE_NAME]
Build a Llama stack container
options:
-h, --help show this help message and exit
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack/distribution/**/build.yaml.
If this argument is not provided, you will be prompted to enter information interactively
--template TEMPLATE Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates
--list-templates, --no-list-templates
Show the available templates for building a Llama Stack distribution (default: False)
--image-type {conda,container,venv}
Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config.
--image-name IMAGE_NAME
[for image-type=conda] Name of the conda environment to use for the build. If
not specified, currently active Conda environment will be used. If no Conda
environment is active, you must specify a name.
```
After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command.
::::{tab-set}
:::{tab-item} Building from a template
To build from alternative API providers, we provide distribution templates for users to get started building a distribution backed by different providers.
The following command will allow you to see the available templates and their corresponding providers.
```
llama stack build --list-templates
```
```
------------------------------+-----------------------------------------------------------------------------+
| Template Name | Description |
+------------------------------+-----------------------------------------------------------------------------+
| hf-serverless | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| together | Use Together.AI for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| vllm-gpu | Use a built-in vLLM engine for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| experimental-post-training | Experimental template for post training |
+------------------------------+-----------------------------------------------------------------------------+
| remote-vllm | Use (an external) vLLM server for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| fireworks | Use Fireworks.AI for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| tgi | Use (an external) TGI server for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| bedrock | Use AWS Bedrock for running LLM inference and safety |
+------------------------------+-----------------------------------------------------------------------------+
| meta-reference-gpu | Use Meta Reference for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| nvidia | Use NVIDIA NIM for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| meta-reference-quantized-gpu | Use Meta Reference with fp8, int4 quantization for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| cerebras | Use Cerebras for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| ollama | Use (an external) Ollama server for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
| hf-endpoint | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
+------------------------------+-----------------------------------------------------------------------------+
```
You may then pick a template to build your distribution with providers fitted to your liking.
For example, to build a distribution with TGI as the inference provider, you can run:
```
$ llama stack build --template tgi
...
You can now edit ~/.llama/distributions/llamastack-tgi/tgi-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-tgi/tgi-run.yaml`
```
:::
:::{tab-item} Building from Scratch
- For a new user, we could start off with running `llama stack build` which will allow you to a interactively enter wizard where you will be prompted to enter build configurations.
If the provided templates do not fit your use case, you could start off with running `llama stack build` which will allow you to a interactively enter wizard where you will be prompted to enter build configurations.
It would be best to start with a template and understand the structure of the config file and the various concepts ( APIS, providers, resources, etc.) before starting from scratch.
```
llama stack build
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
> Enter the image type you want your Llama Stack to be built as (docker or conda): conda
> Enter the image type you want your Llama Stack to be built as (container or conda): conda
Llama Stack is composed of several APIs working together. Let's select
the provider types (implementations) you want to use for these APIs.
@ -57,292 +127,6 @@ You can now edit ~/.llama/distributions/llamastack-my-local-stack/my-local-stack
```
:::
:::{tab-item} Building from a template
- To build from alternative API providers, we provide distribution templates for users to get started building a distribution backed by different providers.
The following command will allow you to see the available templates and their corresponding providers.
```
llama stack build --list-templates
```
```
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| Template Name | Providers | Description |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| tgi | { | Use (an external) TGI server for running LLM inference |
| | "inference": [ | |
| | "remote::tgi" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| remote-vllm | { | Use (an external) vLLM server for running LLM inference |
| | "inference": [ | |
| | "remote::vllm" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| vllm-gpu | { | Use a built-in vLLM engine for running LLM inference |
| | "inference": [ | |
| | "inline::vllm" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| meta-reference-quantized-gpu | { | Use Meta Reference with fp8, int4 quantization for running LLM inference |
| | "inference": [ | |
| | "inline::meta-reference-quantized" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| meta-reference-gpu | { | Use Meta Reference for running LLM inference |
| | "inference": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| hf-serverless | { | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
| | "inference": [ | |
| | "remote::hf::serverless" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| together | { | Use Together.AI for running LLM inference |
| | "inference": [ | |
| | "remote::together" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| ollama | { | Use (an external) Ollama server for running LLM inference |
| | "inference": [ | |
| | "remote::ollama" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| bedrock | { | Use AWS Bedrock for running LLM inference and safety |
| | "inference": [ | |
| | "remote::bedrock" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "remote::bedrock" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| hf-endpoint | { | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
| | "inference": [ | |
| | "remote::hf::endpoint" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| fireworks | { | Use Fireworks.AI for running LLM inference |
| | "inference": [ | |
| | "remote::fireworks" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| cerebras | { | Use Cerebras for running LLM inference |
| | "inference": [ | |
| | "remote::cerebras" | |
| | ], | |
| | "memory": [ | |
| | "inline::faiss", | |
| | "remote::chromadb", | |
| | "remote::pgvector" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
| cerebras | { | Use SambaNova.ai for running LLM inference |
| | "inference": [ | |
| | "remote::sambanova" | |
| | ], | |
| | "safety": [ | |
| | "inline::llama-guard" | |
| | ], | |
| | "memory": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "agents": [ | |
| | "inline::meta-reference" | |
| | ], | |
| | "telemetry": [ | |
| | "inline::meta-reference" | |
| | ] | |
| | } | |
+------------------------------+----------------------------------------+-----------------------------------------------------------------------------+
```
You may then pick a template to build your distribution with providers fitted to your liking.
For example, to build a distribution with TGI as the inference provider, you can run:
```
llama stack build --template tgi
```
```
$ llama stack build --template tgi
...
You can now edit ~/.llama/distributions/llamastack-tgi/tgi-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-tgi/tgi-run.yaml`
```
:::
:::{tab-item} Building from a pre-existing build config file
- In addition to templates, you may customize the build to your liking through editing config files and build from config files with the following command.
@ -368,35 +152,39 @@ llama stack build --config llama_stack/templates/ollama/build.yaml
```
:::
:::{tab-item} Building Docker
:::{tab-item} Building Container
> [!TIP]
> Podman is supported as an alternative to Docker. Set `DOCKER_BINARY` to `podman` in your environment to use Podman.
> Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
To build a docker image, you may start off from a template and use the `--image-type docker` flag to specify `docker` as the build image type.
To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
```
llama stack build --template ollama --image-type docker
llama stack build --template ollama --image-type container
```
```
$ llama stack build --template ollama --image-type docker
$ llama stack build --template ollama --image-type container
...
Dockerfile created successfully in /tmp/tmp.viA3a3Rdsg/DockerfileFROM python:3.10-slim
Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim
...
You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml`
```
After this step is successful, you should be able to find the built docker image and test it with `llama stack run <path/to/run.yaml>`.
After this step is successful, you should be able to find the built container image and test it with `llama stack run <path/to/run.yaml>`.
:::
::::
## Running your Stack server
### Running your Stack server
Now, let's start the Llama Stack Distribution Server. You will need the YAML configuration file which was written out at the end by the `llama stack build` step.
```
# Start using template name
llama stack run tgi
# Start using config file
llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml
```
@ -422,14 +210,14 @@ Serving API agents
POST /agents/step/get
POST /agents/turn/get
Listening on ['::', '0.0.0.0']:5000
Listening on ['::', '0.0.0.0']:8321
INFO: Started server process [2935911]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://['::', '0.0.0.0']:5000 (Press CTRL+C to quit)
INFO: Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit)
INFO: 2401:db00:35c:2d2b:face:0:c9:0:54678 - "GET /models/list HTTP/1.1" 200 OK
```
### Troubleshooting
If you encounter any issues, search through our [GitHub Issues](https://github.com/meta-llama/llama-stack/issues), or file an new issue.
If you encounter any issues, ask questions in our discord or search through our [GitHub Issues](https://github.com/meta-llama/llama-stack/issues), or file an new issue.

View file

@ -70,20 +70,27 @@ Next up is the most critical part: the set of providers that the stack will use
```yaml
providers:
inference:
# provider_id is a string you can choose freely
- provider_id: ollama
# provider_type is a string that specifies the type of provider.
# in this case, the provider for inference is ollama and it is run remotely (outside of the distribution)
provider_type: remote::ollama
# config is a dictionary that contains the configuration for the provider.
# in this case, the configuration is the url of the ollama server
config:
url: ${env.OLLAMA_URL:http://localhost:11434}
```
A few things to note:
- A _provider instance_ is identified with an (identifier, type, configuration) tuple. The identifier is a string you can choose freely.
- A _provider instance_ is identified with an (id, type, configuration) triplet.
- The id is a string you can choose freely.
- You can instantiate any number of provider instances of the same type.
- The configuration dictionary is provider-specific. Notice that configuration can reference environment variables (with default values), which are expanded at runtime. When you run a stack server (via docker or via `llama stack run`), you can specify `--env OLLAMA_URL=http://my-server:11434` to override the default value.
- The configuration dictionary is provider-specific.
- Notice that configuration can reference environment variables (with default values), which are expanded at runtime. When you run a stack server (via docker or via `llama stack run`), you can specify `--env OLLAMA_URL=http://my-server:11434` to override the default value.
## Resources
```
Finally, let's look at the `models` section:
```yaml
models:
- metadata: {}

View file

@ -1,11 +1,20 @@
# Using Llama Stack as a Library
If you are planning to use an external service for Inference (even Ollama or TGI counts as external), it is often easier to use Llama Stack as a library. This avoids the overhead of setting up a server. For [example](https://github.com/meta-llama/llama-stack-client-python/blob/main/src/llama_stack_client/lib/direct/test.py):
If you are planning to use an external service for Inference (even Ollama or TGI counts as external), it is often easier to use Llama Stack as a library. This avoids the overhead of setting up a server.
```python
# setup
pip install llama-stack
llama stack build --template together --image-type venv
```
```python
from llama_stack_client.lib.direct.direct import LlamaStackDirectClient
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
client = await LlamaStackDirectClient.from_template('ollama')
client = LlamaStackAsLibraryClient(
"ollama",
# provider_data is optional, but if you need to pass in any provider specific data, you can do so here.
provider_data = {"tavily_search_api_key": os.environ['TAVILY_SEARCH_API_KEY']}
)
await client.initialize()
```
@ -14,23 +23,12 @@ This will parse your config and set up any inline implementations and remote cli
Then, you can access the APIs like `models` and `inference` on the client and call their methods directly:
```python
response = await client.models.list()
print(response)
```
```python
response = await client.inference.chat_completion(
messages=[UserMessage(content="What is the capital of France?", role="user")],
model_id="Llama3.1-8B-Instruct",
stream=False,
)
print("\nChat completion response:")
print(response)
response = client.models.list()
```
If you've created a [custom distribution](https://llama-stack.readthedocs.io/en/latest/distributions/building_distro.html), you can also use the run.yaml configuration file directly:
```python
client = await LlamaStackDirectClient.from_config(config_path)
await client.initialize()
client = LlamaStackAsLibraryClient(config_path)
client.initialize()
```

View file

@ -1,40 +1,27 @@
# Starting a Llama Stack
# Starting a Llama Stack Server
You can run a Llama Stack server in one of the following ways:
**As a Library**:
This is the simplest way to get started. Using Llama Stack as a library means you do not need to start a server. This is especially useful when you are not running inference locally and relying on an external inference service (eg. fireworks, together, groq, etc.) See [Using Llama Stack as a Library](importing_as_library)
**Docker**:
Another simple way to start interacting with Llama Stack is to just spin up docker which is pre-built with all the providers you need. We provide a number of pre-built Docker containers so you can start a Llama Stack server instantly. You can also build your own custom Docker container. Which distribution to choose depends on the hardware you have. See [Selection of a Distribution](distributions/selection) for more details.
**Conda**:
Lastly, if you have a custom or an advanced setup or you are developing on Llama Stackyou can also build a custom Llama Stack server. Using `llama stack build` and `llama stack run` you can build/run a custom Llama Stack server containing the exact combination of providers you wish. We have also provided various templates to make getting started easier. See [Building a Custom Distribution](building_distro) for more details.
```{toctree}
:maxdepth: 3
:maxdepth: 1
:hidden:
importing_as_library
building_distro
configuration
```
You can instantiate a Llama Stack in one of the following ways:
- **As a Library**: this is the simplest, especially if you are using an external inference service. See [Using Llama Stack as a Library](importing_as_library)
- **Docker**: we provide a number of pre-built Docker containers so you can start a Llama Stack server instantly. You can also build your own custom Docker container.
- **Conda**: finally, you can build a custom Llama Stack server using `llama stack build` containing the exact combination of providers you wish. We have provided various templates to make getting started easier.
Which templates / distributions to choose depends on the hardware you have for running LLM inference.
- **Do you have access to a machine with powerful GPUs?**
If so, we suggest:
- {dockerhub}`distribution-remote-vllm` ([Guide](self_hosted_distro/remote-vllm))
- {dockerhub}`distribution-meta-reference-gpu` ([Guide](self_hosted_distro/meta-reference-gpu))
- {dockerhub}`distribution-tgi` ([Guide](self_hosted_distro/tgi))
- **Are you running on a "regular" desktop machine?**
If so, we suggest:
- {dockerhub}`distribution-ollama` ([Guide](self_hosted_distro/ollama))
- **Do you have an API key for a remote inference provider like Fireworks, Together, etc.?** If so, we suggest:
- {dockerhub}`distribution-together` ([Guide](self_hosted_distro/together))
- {dockerhub}`distribution-fireworks` ([Guide](self_hosted_distro/fireworks))
- **Do you want to run Llama Stack inference on your iOS / Android device** If so, we suggest:
- [iOS SDK](ondevice_distro/ios_sdk)
- [Android](ondevice_distro/android_sdk)
- **Do you want a hosted Llama Stack endpoint?** If so, we suggest:
- [Remote-Hosted Llama Stack Endpoints](remote_hosted_distro/index)
You can also build your own [custom distribution](building_distro).

View file

@ -1,6 +1,3 @@
---
orphan: true
---
# iOS SDK
We offer both remote and on-device use of Llama Stack in Swift via two components:
@ -27,7 +24,7 @@ If you don't want to run inference on-device, then you can connect to any hosted
```swift
import LlamaStackClient
let agents = RemoteAgents(url: URL(string: "http://localhost:5000")!)
let agents = RemoteAgents(url: URL(string: "http://localhost:8321")!)
let request = Components.Schemas.CreateAgentTurnRequest(
agent_id: agentId,
messages: [

View file

@ -1,6 +1,3 @@
---
orphan: true
---
# Remote-Hosted Distributions
Remote-Hosted distributions are available endpoints serving Llama Stack API that you can directly connect to.

View file

@ -0,0 +1,73 @@
# NVIDIA Distribution
The `llamastack/distribution-nvidia` distribution consists of the following provider configurations.
| API | Provider(s) |
|-----|-------------|
| agents | `inline::meta-reference` |
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::nvidia` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss` |
### Environment Variables
The following environment variables can be configured:
- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`)
- `NVIDIA_API_KEY`: NVIDIA API Key (default: ``)
### Models
The following models are available by default:
- `meta-llama/Llama-3-8B-Instruct (meta/llama3-8b-instruct)`
- `meta-llama/Llama-3-70B-Instruct (meta/llama3-70b-instruct)`
- `meta-llama/Llama-3.1-8B-Instruct (meta/llama-3.1-8b-instruct)`
- `meta-llama/Llama-3.1-70B-Instruct (meta/llama-3.1-70b-instruct)`
- `meta-llama/Llama-3.1-405B-Instruct-FP8 (meta/llama-3.1-405b-instruct)`
- `meta-llama/Llama-3.2-1B-Instruct (meta/llama-3.2-1b-instruct)`
- `meta-llama/Llama-3.2-3B-Instruct (meta/llama-3.2-3b-instruct)`
- `meta-llama/Llama-3.2-11B-Vision-Instruct (meta/llama-3.2-11b-vision-instruct)`
- `meta-llama/Llama-3.2-90B-Vision-Instruct (meta/llama-3.2-90b-vision-instruct)`
### Prerequisite: API Keys
Make sure you have access to a NVIDIA API Key. You can get one by visiting [https://build.nvidia.com/](https://build.nvidia.com/).
## Running Llama Stack with NVIDIA
You can do this via Conda (build code) or Docker which has a pre-built image.
### Via Docker
This method allows you to get started quickly without having to build the distribution code.
```bash
LLAMA_STACK_PORT=5001
docker run \
-it \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-nvidia \
--yaml-config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
```
### Via Conda
```bash
llama stack build --template nvidia --image-type conda
llama stack run ./run.yaml \
--port 5001 \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
--env INFERENCE_MODEL=$INFERENCE_MODEL
```

View file

@ -0,0 +1,56 @@
# List of Distributions
Here are a list of distributions you can use to start a Llama Stack server that are provided out of the box.
## Selection of a Distribution / Template
Which templates / distributions to choose depends on the hardware you have for running LLM inference.
- **Do you want a hosted Llama Stack endpoint?** If so, we suggest leveraging our partners who host Llama Stack endpoints. Namely, _fireworks.ai_ and _together.xyz_.
- Read more about it here - [Remote-Hosted Endpoints](remote_hosted_distro/index).
- **Do you have access to machines with GPUs?** If you wish to run Llama Stack locally or on a cloud instance and host your own Llama Stack endpoint, we suggest:
- {dockerhub}`distribution-remote-vllm` ([Guide](self_hosted_distro/remote-vllm))
- {dockerhub}`distribution-meta-reference-gpu` ([Guide](self_hosted_distro/meta-reference-gpu))
- {dockerhub}`distribution-tgi` ([Guide](self_hosted_distro/tgi))
- {dockerhub}`distribution-nvidia` ([Guide](self_hosted_distro/nvidia))
- **Are you running on a "regular" desktop or laptop ?** We suggest using the ollama templte for quick prototyping and get started without having to worry about needing GPUs.
- {dockerhub}`distribution-ollama` ([link](self_hosted_distro/ollama))
- **Do you have an API key for a remote inference provider like Fireworks, Together, etc.?** If so, we suggest:
- {dockerhub}`distribution-together` ([Guide](self_hosted_distro/together))
- {dockerhub}`distribution-fireworks` ([Guide](self_hosted_distro/fireworks))
- **Do you want to run Llama Stack inference on your iOS / Android device** Lastly, we also provide templates for running Llama Stack inference on your iOS / Android device:
- [iOS SDK](ondevice_distro/ios_sdk)
- [Android](ondevice_distro/android_sdk)
- **If none of the above fit your needs, you can also build your own [custom distribution](building_distro).**
### Distribution Details
```{toctree}
:maxdepth: 1
remote_hosted_distro/index
self_hosted_distro/remote-vllm
self_hosted_distro/meta-reference-gpu
self_hosted_distro/tgi
self_hosted_distro/nvidia
self_hosted_distro/ollama
self_hosted_distro/together
self_hosted_distro/fireworks
ondevice_distro/index
```
### On-Device Distributions
```{toctree}
:maxdepth: 1
ondevice_distro/ios_sdk
ondevice_distro/android_sdk
```

View file

@ -15,11 +15,11 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::bedrock` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `remote::bedrock` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |

View file

@ -1,25 +1,18 @@
---
orphan: true
---
# Cerebras Distribution
```{toctree}
:maxdepth: 2
:hidden:
self
```
The `llamastack/distribution-cerebras` distribution consists of the following provider configurations.
| API | Provider(s) |
|-----|-------------|
| agents | `inline::meta-reference` |
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::cerebras` |
| memory | `inline::meta-reference` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
### Environment Variables

View file

@ -41,7 +41,7 @@ The script will first start up TGI server, then start up Llama Stack distributio
INFO: Started server process [1]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://[::]:5000 (Press CTRL+C to quit)
INFO: Uvicorn running on http://[::]:8321 (Press CTRL+C to quit)
```
To kill the server
@ -65,7 +65,7 @@ registry.dell.huggingface.co/enterprise-dell-inference-meta-llama-meta-llama-3.1
#### Start Llama Stack server pointing to TGI server
```
docker run --network host -it -p 5000:5000 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack/distribution-tgi --yaml_config /root/my-run.yaml
docker run --network host -it -p 8321:8321 -v ./run.yaml:/root/my-run.yaml --gpus=all llamastack/distribution-tgi --yaml_config /root/my-run.yaml
```
Make sure in you `run.yaml` file, you inference provider is pointing to the correct TGI server endpoint. E.g.

View file

@ -18,11 +18,11 @@ The `llamastack/distribution-fireworks` distribution consists of the following p
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::fireworks` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
### Environment Variables

View file

@ -18,11 +18,11 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `inline::meta-reference` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs.

View file

@ -18,11 +18,11 @@ The `llamastack/distribution-meta-reference-quantized-gpu` distribution consists
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `inline::meta-reference-quantized` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
The only difference vs. the `meta-reference-gpu` distribution is that it has support for more efficient inference -- with fp8, int4 quantization, etc.

View file

@ -0,0 +1,60 @@
# NVIDIA Distribution
The `llamastack/distribution-nvidia` distribution consists of the following provider configurations.
| API | Provider(s) |
|-----|-------------|
| agents | `inline::meta-reference` |
| inference | `remote::nvidia` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| telemetry | `inline::meta-reference` |
### Environment Variables
The following environment variables can be configured:
- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`)
- `NVIDIA_API_KEY`: NVIDIA API Key (default: ``)
### Models
The following models are available by default:
- `${env.INFERENCE_MODEL} (None)`
### Prerequisite: API Keys
Make sure you have access to a NVIDIA API Key. You can get one by visiting [https://build.nvidia.com/](https://build.nvidia.com/).
## Running Llama Stack with NVIDIA
You can do this via Conda (build code) or Docker which has a pre-built image.
### Via Docker
This method allows you to get started quickly without having to build the distribution code.
```bash
LLAMA_STACK_PORT=5001
docker run \
-it \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
-v ./run.yaml:/root/my-run.yaml \
llamastack/distribution-nvidia \
--yaml-config /root/my-run.yaml \
--port $LLAMA_STACK_PORT \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
```
### Via Conda
```bash
llama stack build --template nvidia --image-type conda
llama stack run ./run.yaml \
--port 5001 \
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
```

View file

@ -1,6 +1,3 @@
---
orphan: true
---
# Ollama Distribution
```{toctree}
@ -18,11 +15,11 @@ The `llamastack/distribution-ollama` distribution consists of the following prov
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::ollama` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
You should use this distribution if you have a regular desktop machine without very powerful GPUs. Of course, if you have powerful GPUs, you can still continue using this distribution since Ollama supports GPU acceleration.### Environment Variables

View file

@ -1,6 +1,3 @@
---
orphan: true
---
# Remote vLLM Distribution
```{toctree}
:maxdepth: 2
@ -14,11 +11,14 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following
| API | Provider(s) |
|-----|-------------|
| agents | `inline::meta-reference` |
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::vllm` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
You can use this distribution if you have GPUs and want to run an independent vLLM server container for running inference.

View file

@ -1,7 +1,3 @@
---
orphan: true
---
# TGI Distribution
```{toctree}
@ -19,11 +15,11 @@ The `llamastack/distribution-tgi` distribution consists of the following provide
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::tgi` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
You can use this distribution if you have GPUs and want to run an independent TGI server container for running inference.

View file

@ -1,6 +1,3 @@
---
orphan: true
---
# Together Distribution
```{toctree}
@ -18,11 +15,11 @@ The `llamastack/distribution-together` distribution consists of the following pr
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::together` |
| memory | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
| safety | `inline::llama-guard` |
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::memory-runtime` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
### Environment Variables

View file

@ -1,26 +1,24 @@
# Quick Start
In this guide, we'll through how you can use the Llama Stack client SDK to build a simple RAG agent.
In this guide, we'll walk through how you can use the Llama Stack (server and client SDK ) to test a simple RAG agent.
The most critical requirement for running the agent is running inference on the underlying Llama model. Depending on what hardware (GPUs) you have available, you have various options. We will use `Ollama` for this purpose as it is the easiest to get started with and yet robust.
A Llama Stack agent is a simple autonomous system that can perform tasks by combining a Llama model for reasoning with tools (e.g., RAG, web search, code execution, etc.) for taking actions.
First, let's set up some environment variables that we will use in the rest of the guide. Note that if you open up a new terminal, you will need to set these again.
At minimum, an agent requires a Llama model for inference and at least one tool that it can use.
In Llama Stack, we provide a server exposing multiple APIs. These APIs are backed by implementations from different providers. For this guide, we will use [Ollama](https://ollama.com/) as the inference provider.
```bash
export INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct"
# ollama names this model differently, and we must use the ollama name when loading the model
export OLLAMA_INFERENCE_MODEL="llama3.2:3b-instruct-fp16"
export LLAMA_STACK_PORT=5001
```
### 1. Start Ollama
```bash
ollama run $OLLAMA_INFERENCE_MODEL --keepalive 60m
ollama run llama3.2:3b-instruct-fp16 --keepalive 60m
```
By default, Ollama keeps the model loaded in memory for 5 minutes which can be too short. We set the `--keepalive` flag to 60 minutes to ensure the model remains loaded for sometime.
NOTE: If you do not have ollama, you can install it from [here](https://ollama.ai/docs/installation).
### 2. Start the Llama Stack server
@ -28,6 +26,13 @@ Llama Stack is based on a client-server architecture. It consists of a server wh
To get started quickly, we provide various Docker images for the server component that work with different inference providers out of the box. For this guide, we will use `llamastack/distribution-ollama` as the Docker image.
Lets setup some environment variables that we will use in the rest of the guide.
```bash
INFERENCE_MODEL="meta-llama/Llama-3.2-3B-Instruct"
LLAMA_STACK_PORT=8321
```
You can start the server using the following command:
```bash
docker run -it \
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
@ -45,6 +50,9 @@ Configuration for this is available at `distributions/ollama/run.yaml`.
You can interact with the Llama Stack server using various client SDKs. We will use the Python SDK which you can install using the following command. Note that you must be using Python 3.10 or newer:
```bash
yes | conda create -n stack-client python=3.10
conda activate stack-client
pip install llama-stack-client
```
@ -76,7 +84,10 @@ client = LlamaStackClient(base_url=f"http://localhost:{os.environ['LLAMA_STACK_P
# List available models
models = client.models.list()
print(models)
print("--- Available models: ---")
for m in models:
print(f"- {m.identifier}")
print()
response = client.inference.chat_completion(
model_id=os.environ["INFERENCE_MODEL"],
@ -93,59 +104,83 @@ print(response.completion_message.content)
Here is an example of a simple RAG agent that uses the Llama Stack client SDK.
```python
import asyncio
import os
from termcolor import cprint
from llama_stack_client import LlamaStackClient
from llama_stack_client.lib.agents.agent import Agent
from llama_stack_client.lib.agents.event_logger import EventLogger
from llama_stack_client.types import Attachment
from llama_stack_client.types.agent_create_params import AgentConfig
from llama_stack_client.types.tool_runtime import DocumentParam as Document
from llama_stack_client import LlamaStackClient
async def run_main():
urls = ["chat.rst", "llama3.rst", "datasets.rst", "lora_finetune.rst"]
attachments = [
Attachment(
content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}",
mime_type="text/plain",
)
for i, url in enumerate(urls)
]
# Define the client and point it to the server URL
client = LlamaStackClient(base_url=f"http://localhost:{os.environ['LLAMA_STACK_PORT']}")
client = LlamaStackClient(base_url=f"http://localhost:{os.environ['LLAMA_STACK_PORT']}")
agent_config = AgentConfig(
model=os.environ["INFERENCE_MODEL"],
instructions="You are a helpful assistant",
tools=[{"type": "memory"}], # enable Memory aka RAG
enable_session_persistence=True,
# Define the documents to be used for RAG
urls = ["chat.rst", "llama3.rst", "datasets.rst", "lora_finetune.rst"]
documents = [
Document(
document_id=f"num-{i}",
content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}",
mime_type="text/plain",
metadata={},
)
for i, url in enumerate(urls)
]
agent = Agent(client, agent_config)
session_id = agent.create_session("test-session")
user_prompts = [
(
"I am attaching documentation for Torchtune. Help me answer questions I will ask next.",
attachments,
),
(
"What are the top 5 topics that were explained? Only list succinct bullet points.",
None,
),
]
for prompt, attachments in user_prompts:
response = agent.create_turn(
messages=[{"role": "user", "content": prompt}],
attachments=attachments,
session_id=session_id,
)
for log in EventLogger().log(response):
log.print()
# Register a vector database
vector_db_id = "test-vector-db"
client.vector_dbs.register(
vector_db_id=vector_db_id,
embedding_model="all-MiniLM-L6-v2",
embedding_dimension=384,
)
# Insert the documents into the vector database
client.tool_runtime.rag_tool.insert(
documents=documents,
vector_db_id=vector_db_id,
chunk_size_in_tokens=512,
)
if __name__ == "__main__":
asyncio.run(run_main())
# Create an agent
agent_config = AgentConfig(
# Define the inference model to use
model=os.environ["INFERENCE_MODEL"],
# Define instructions for the agent ( aka system prompt)
instructions="You are a helpful assistant",
# Enable session persistence
enable_session_persistence=False,
# Define tools available to the agent
toolgroups = [
{
"name": "builtin::memory",
"args" : {
"vector_db_ids": [vector_db_id],
}
}
],
)
# Create an agent session
rag_agent = Agent(client, agent_config)
session_id = rag_agent.create_session("test-session")
# Define a user prompts
user_prompts = [
"What are the top 5 topics that were explained? Only list succinct bullet points.",
]
# Run the agent loop by calling the `create_turn` method
for prompt in user_prompts:
cprint(f'User> {prompt}', 'green')
response = rag_agent.create_turn(
messages=[{"role": "user", "content": prompt}],
session_id=session_id,
)
for log in EventLogger().log(response):
log.print()
```
## Next Steps

View file

@ -1,17 +1,15 @@
# Llama Stack
Llama Stack defines and standardizes the set of core building blocks needed to bring generative AI applications to market. These building blocks are presented in the form of interoperable APIs with a broad set of Service Providers providing their implementations.
Llama Stack defines and standardizes the core building blocks needed to bring generative AI applications to market. It provides a unified set of APIs with implementations from leading service providers, enabling seamless transitions between development and production environments.
We focus on making it easy to build production applications with the Llama model family - from the latest Llama 3.3 to specialized models like Llama Guard for safety.
```{image} ../_static/llama-stack.png
:alt: Llama Stack
:width: 400px
```
Our goal is to provide pre-packaged implementations which can be operated in a variety of deployment environments: developers start iterating with Desktops or their mobile devices and can seamlessly transition to on-prem or public cloud deployments. At every point in this transition, the same set of APIs and the same developer experience is available.
```{note}
The Stack APIs are rapidly improving but still a work-in-progress. We invite feedback as well as direct contributions.
```
Our goal is to provide pre-packaged implementations (aka "distributions") which can be run in a variety of deployment environments. LlamaStack can assist you in your entire app development lifecycle - start iterating on local, mobile or desktop and seamlessly transition to on-prem or public cloud deployments. At every point in this transition, the same set of APIs and the same developer experience is available.
## Quick Links
@ -45,7 +43,7 @@ A number of "adapters" are available for some popular Inference and Memory (Vect
| SambaNova | Hosted | | Y | | | |
| Ollama | Single Node | | Y | | |
| TGI | Hosted and Single Node | | Y | | |
| [NVIDIA NIM](https://build.nvidia.com/nim?filters=nimType%3Anim_type_run_anywhere&q=llama) | Hosted and Single Node | | Y | | |
| NVIDIA NIM | Hosted and Single Node | | Y | | |
| Chroma | Single Node | | | Y | | |
| Postgres | Single Node | | | Y | | |
| PyTorch ExecuTorch | On-device iOS | Y | Y | | |
@ -55,10 +53,12 @@ A number of "adapters" are available for some popular Inference and Memory (Vect
:hidden:
:maxdepth: 3
self
introduction/index
getting_started/index
concepts/index
distributions/index
distributions/selection
building_applications/index
benchmark_evaluations/index
playground/index

View file

@ -19,77 +19,41 @@ Building production AI applications today requires solving multiple challenges:
- Changing providers requires significant code changes.
### The Vision: A Universal Stack
### Our Solution: A Universal Stack
```{image} ../../_static/llama-stack.png
:alt: Llama Stack
:width: 400px
```
Llama Stack defines and standardizes the core building blocks needed to bring generative AI applications to market. These building blocks are presented as interoperable APIs with a broad set of Service Providers providing their implementations.
Llama Stack addresses these challenges through a service-oriented, API-first approach:
#### Service-oriented Design
Unlike other frameworks, Llama Stack is built with a service-oriented, REST API-first approach. Such a design not only allows for seamless transitions from local to remote deployments but also forces the design to be more declarative. This restriction can result in a much simpler, robust developer experience. The same code works across different environments:
**Develop Anywhere, Deploy Everywhere**
- Start locally with CPU-only setups
- Move to GPU acceleration when needed
- Deploy to cloud or edge without code changes
- Same APIs and developer experience everywhere
- Local development with CPU-only setups
- Self-hosted with GPU acceleration
- Cloud-hosted on providers like AWS, Fireworks, Together
- On-device for iOS and Android
#### Composability
The APIs we design are composable. An Agent abstractly depends on { Inference, Memory, Safety } APIs but does not care about the actual implementation details. Safety itself may require model inference and hence can depend on the Inference API.
#### Turnkey Solutions
We provide turnkey solutions for popular deployment scenarios. It should be easy to deploy a Llama Stack server on AWS or in a private data center. Either of these should allow a developer to get started with powerful agentic apps, model evaluations, or fine-tuning services in minutes.
We have built-in support for critical needs:
- Safety guardrails and content filtering
- Comprehensive evaluation capabilities
**Production-Ready Building Blocks**
- Pre-built safety guardrails and content filtering
- Built-in RAG and agent capabilities
- Comprehensive evaluation toolkit
- Full observability and monitoring
- Provider federation and fallback
#### Focus on Llama Models
As a Meta-initiated project, we explicitly focus on Meta's Llama series of models. Supporting the broad set of open models is no easy task and we want to start with models we understand best.
#### Supporting the Ecosystem
There is a vibrant ecosystem of Providers which provide efficient inference or scalable vector stores or powerful observability solutions. We want to make sure it is easy for developers to pick and choose the best implementations for their use cases. We also want to make sure it is easy for new Providers to onboard and participate in the ecosystem.
Additionally, we have designed every element of the Stack such that APIs as well as Resources (like Models) can be federated.
#### Rich Provider Ecosystem
```{list-table}
:header-rows: 1
* - Provider
- Local
- Self-hosted
- Cloud
* - Inference
- Ollama
- vLLM, TGI
- Fireworks, Together, AWS
* - Memory
- FAISS
- Chroma, pgvector
- Weaviate
* - Safety
- Llama Guard
- -
- AWS Bedrock
```
**True Provider Independence**
- Swap providers without application changes
- Mix and match best-in-class implementations
- Federation and fallback support
- No vendor lock-in
### Unified API Layer
### Our Philosophy
Llama Stack provides a consistent interface for:
- **Service-Oriented**: REST APIs enforce clean interfaces and enable seamless transitions across different environments.
- **Composability**: Every component is independent but works together seamlessly
- **Production Ready**: Built for real-world applications, not just demos
- **Turnkey Solutions**: Easy to deploy built in solutions for popular deployment scenarios
- **Llama First**: Explicit focus on Meta's Llama models and partnering ecosystem
- **Inference**: Run LLM models efficiently
- **Safety**: Apply content filtering and safety policies
- **Memory**: Store and retrieve knowledge for RAG
- **Agents**: Build multi-step workflows
- **Evaluation**: Test and improve application quality
With Llama Stack, you can focus on building your application while we handle the infrastructure complexity, essential capabilities, and provider integrations.

View file

@ -92,9 +92,10 @@ response = client.eval.evaluate_rows(
"type": "model",
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"sampling_params": {
"temperature": 0.0,
"strategy": {
"type": "greedy",
},
"max_tokens": 4096,
"top_p": 0.9,
"repeat_penalty": 1.0,
},
"system_message": system_message
@ -149,9 +150,10 @@ response = client.eval.evaluate_rows(
"type": "model",
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
"sampling_params": {
"temperature": 0.0,
"strategy": {
"type": "greedy",
},
"max_tokens": 4096,
"top_p": 0.9,
"repeat_penalty": 1.0,
},
}
@ -170,9 +172,9 @@ agent_config = {
"model": "meta-llama/Llama-3.1-405B-Instruct",
"instructions": "You are a helpful assistant",
"sampling_params": {
"strategy": "greedy",
"temperature": 0.0,
"top_p": 0.95,
"strategy": {
"type": "greedy",
},
},
"tools": [
{
@ -318,10 +320,9 @@ The `EvalTaskConfig` are user specified config to define:
"type": "model",
"model": "Llama3.2-3B-Instruct",
"sampling_params": {
"strategy": "greedy",
"temperature": 0,
"top_p": 0.95,
"top_k": 0,
"strategy": {
"type": "greedy",
},
"max_tokens": 0,
"repetition_penalty": 1.0
}
@ -337,10 +338,9 @@ The `EvalTaskConfig` are user specified config to define:
"type": "model",
"model": "Llama3.1-405B-Instruct",
"sampling_params": {
"strategy": "greedy",
"temperature": 0,
"top_p": 0.95,
"top_k": 0,
"strategy": {
"type": "greedy",
},
"max_tokens": 0,
"repetition_penalty": 1.0
}

View file

@ -214,7 +214,6 @@ llama model describe -m Llama3.2-3B-Instruct
| | } |
+-----------------------------+----------------------------------+
| Recommended sampling params | { |
| | "strategy": "top_p", |
| | "temperature": 1.0, |
| | "top_p": 0.9, |
| | "top_k": 0 |

View file

@ -23,8 +23,8 @@ subcommands:
```bash
$ llama-stack-client configure
> Enter the host name of the Llama Stack distribution server: localhost
> Enter the port number of the Llama Stack distribution server: 5000
Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:5000
> Enter the port number of the Llama Stack distribution server: 8321
Done! You can now use the Llama Stack Client CLI with endpoint http://localhost:8321
```
### `llama-stack-client providers list`
@ -200,10 +200,9 @@ Example eval_task_config.json:
"type": "model",
"model": "Llama3.1-405B-Instruct",
"sampling_params": {
"strategy": "greedy",
"temperature": 0,
"top_p": 0.95,
"top_k": 0,
"strategy": {
"type": "greedy"
},
"max_tokens": 0,
"repetition_penalty": 1.0
}

View file

@ -1,41 +0,0 @@
# Llama Stack Developer Cookbook
Based on your developer needs, below are references to guides to help you get started.
### Hosted Llama Stack Endpoint
* Developer Need: I want to connect to a Llama Stack endpoint to build my applications.
* Effort: 1min
* Guide:
- Checkout our [DeepLearning course](https://www.deeplearning.ai/short-courses/introducing-multimodal-llama-3-2) on building with Llama Stack apps on pre-hosted Llama Stack endpoint.
### Local meta-reference Llama Stack Server
* Developer Need: I want to start a local Llama Stack server with my GPU using meta-reference implementations.
* Effort: 5min
* Guide:
- Please see our [meta-reference-gpu](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/meta-reference-gpu.html) on starting up a meta-reference Llama Stack server.
### Llama Stack Server with Remote Providers
* Developer need: I want a Llama Stack distribution with a remote provider.
* Effort: 10min
* Guide
- Please see our [Distributions Guide](https://llama-stack.readthedocs.io/en/latest/concepts/index.html#distributions) on starting up distributions with remote providers.
### On-Device (iOS) Llama Stack
* Developer Need: I want to use Llama Stack on-Device
* Effort: 1.5hr
* Guide:
- Please see our [iOS Llama Stack SDK](./ios_sdk.md) implementations
### Assemble your own Llama Stack Distribution
* Developer Need: I want to assemble my own distribution with API providers to my likings
* Effort: 30min
* Guide
- Please see our [Building Distribution](./building_distro.md) guide for assembling your own Llama Stack distribution with your choice of API providers.
### Adding a New API Provider
* Developer Need: I want to add a new API provider to Llama Stack.
* Effort: 3hr
* Guide
- Please see our [Adding a New API Provider](https://llama-stack.readthedocs.io/en/latest/contributing/new_api_provider.html) guide for adding a new API provider.

View file

@ -32,8 +32,8 @@
"outputs": [],
"source": [
"HOST = \"localhost\" # Replace with your host\n",
"LOCAL_PORT = 5000 # Replace with your local distro port\n",
"CLOUD_PORT = 5001 # Replace with your cloud distro port"
"LOCAL_PORT = 8321 # Replace with your local distro port\n",
"CLOUD_PORT = 8322 # Replace with your cloud distro port"
]
},
{
@ -43,7 +43,7 @@
"source": [
"#### 2. Set Up Local and Cloud Clients\n",
"\n",
"Initialize both clients, specifying the `base_url` for each instance. In this case, we have the local distribution running on `http://localhost:5000` and the cloud distribution running on `http://localhost:5001`.\n"
"Initialize both clients, specifying the `base_url` for each instance. In this case, we have the local distribution running on `http://localhost:8321` and the cloud distribution running on `http://localhost:5001`.\n"
]
},
{

View file

@ -26,27 +26,28 @@
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import requests\n",
"import json\n",
"import asyncio\n",
"import nest_asyncio\n",
"import json\n",
"import os\n",
"from typing import Dict, List\n",
"\n",
"import nest_asyncio\n",
"import requests\n",
"from dotenv import load_dotenv\n",
"from llama_stack_client import LlamaStackClient\n",
"from llama_stack_client.lib.agents.custom_tool import CustomTool\n",
"from llama_stack_client.types.shared.tool_response_message import ToolResponseMessage\n",
"from llama_stack_client.types import CompletionMessage\n",
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.custom_tool import CustomTool\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types import CompletionMessage\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"from llama_stack_client.types.shared.tool_response_message import ToolResponseMessage\n",
"\n",
"# Allow asyncio to run in Jupyter Notebook\n",
"nest_asyncio.apply()\n",
"\n",
"HOST='localhost'\n",
"PORT=5001\n",
"MODEL_NAME='meta-llama/Llama-3.2-3B-Instruct'"
"HOST = \"localhost\"\n",
"PORT = 5001\n",
"MODEL_NAME = \"meta-llama/Llama-3.2-3B-Instruct\"\n"
]
},
{
@ -69,7 +70,7 @@
"outputs": [],
"source": [
"load_dotenv()\n",
"BRAVE_SEARCH_API_KEY = os.environ['BRAVE_SEARCH_API_KEY']"
"BRAVE_SEARCH_API_KEY = os.environ[\"BRAVE_SEARCH_API_KEY\"]\n"
]
},
{
@ -118,7 +119,7 @@
" cleaned = {k: v for k, v in results[idx].items() if k in selected_keys}\n",
" clean_response.append(cleaned)\n",
"\n",
" return {\"query\": query, \"top_k\": clean_response}"
" return {\"query\": query, \"top_k\": clean_response}\n"
]
},
{
@ -157,25 +158,29 @@
" for message in messages:\n",
" if isinstance(message, CompletionMessage) and message.tool_calls:\n",
" for tool_call in message.tool_calls:\n",
" if 'query' in tool_call.arguments:\n",
" query = tool_call.arguments['query']\n",
" if \"query\" in tool_call.arguments:\n",
" query = tool_call.arguments[\"query\"]\n",
" call_id = tool_call.call_id\n",
"\n",
" if query:\n",
" search_result = await self.run_impl(query)\n",
" return [ToolResponseMessage(\n",
" call_id=call_id,\n",
" role=\"ipython\",\n",
" content=self._format_response_for_agent(search_result),\n",
" tool_name=\"brave_search\"\n",
" )]\n",
" return [\n",
" ToolResponseMessage(\n",
" call_id=call_id,\n",
" role=\"ipython\",\n",
" content=self._format_response_for_agent(search_result),\n",
" tool_name=\"brave_search\",\n",
" )\n",
" ]\n",
"\n",
" return [ToolResponseMessage(\n",
" call_id=\"no_call_id\",\n",
" role=\"ipython\",\n",
" content=\"No query provided.\",\n",
" tool_name=\"brave_search\"\n",
" )]\n",
" return [\n",
" ToolResponseMessage(\n",
" call_id=\"no_call_id\",\n",
" role=\"ipython\",\n",
" content=\"No query provided.\",\n",
" tool_name=\"brave_search\",\n",
" )\n",
" ]\n",
"\n",
" def _format_response_for_agent(self, search_result):\n",
" parsed_result = json.loads(search_result)\n",
@ -186,7 +191,7 @@
" f\" URL: {result.get('url', 'No URL')}\\n\"\n",
" f\" Description: {result.get('description', 'No Description')}\\n\\n\"\n",
" )\n",
" return formatted_result"
" return formatted_result\n"
]
},
{
@ -209,7 +214,7 @@
"async def execute_search(query: str):\n",
" web_search_tool = WebSearchTool(api_key=BRAVE_SEARCH_API_KEY)\n",
" result = await web_search_tool.run_impl(query)\n",
" print(\"Search Results:\", result)"
" print(\"Search Results:\", result)\n"
]
},
{
@ -236,7 +241,7 @@
],
"source": [
"query = \"Latest developments in quantum computing\"\n",
"asyncio.run(execute_search(query))"
"asyncio.run(execute_search(query))\n"
]
},
{
@ -288,19 +293,17 @@
"\n",
" # Initialize custom tool (ensure `WebSearchTool` is defined earlier in the notebook)\n",
" webSearchTool = WebSearchTool(api_key=BRAVE_SEARCH_API_KEY)\n",
" \n",
"\n",
" # Define the agent configuration, including the model and tool setup\n",
" agent_config = AgentConfig(\n",
" model=MODEL_NAME,\n",
" instructions=\"\"\"You are a helpful assistant that responds to user queries with relevant information and cites sources when available.\"\"\",\n",
" sampling_params={\n",
" \"strategy\": \"greedy\",\n",
" \"temperature\": 1.0,\n",
" \"top_p\": 0.9,\n",
" \"strategy\": {\n",
" \"type\": \"greedy\",\n",
" },\n",
" },\n",
" tools=[\n",
" webSearchTool.get_tool_definition()\n",
" ],\n",
" tools=[webSearchTool.get_tool_definition()],\n",
" tool_choice=\"auto\",\n",
" tool_prompt_format=\"python_list\",\n",
" input_shields=input_shields,\n",
@ -329,8 +332,9 @@
" async for log in EventLogger().log(response):\n",
" log.print()\n",
"\n",
"\n",
"# Run the function asynchronously in a Jupyter Notebook cell\n",
"await run_main(disable_safety=True)"
"await run_main(disable_safety=True)\n"
]
}
],

View file

@ -50,8 +50,8 @@
"outputs": [],
"source": [
"HOST = \"localhost\" # Replace with your host\n",
"PORT = 5001 # Replace with your port\n",
"MODEL_NAME='meta-llama/Llama-3.2-3B-Instruct'"
"PORT = 5001 # Replace with your port\n",
"MODEL_NAME = \"meta-llama/Llama-3.2-3B-Instruct\"\n"
]
},
{
@ -60,10 +60,12 @@
"metadata": {},
"outputs": [],
"source": [
"from dotenv import load_dotenv\n",
"import os\n",
"\n",
"from dotenv import load_dotenv\n",
"\n",
"load_dotenv()\n",
"BRAVE_SEARCH_API_KEY = os.environ['BRAVE_SEARCH_API_KEY']"
"BRAVE_SEARCH_API_KEY = os.environ[\"BRAVE_SEARCH_API_KEY\"]\n"
]
},
{
@ -104,20 +106,22 @@
],
"source": [
"import os\n",
"\n",
"from llama_stack_client import LlamaStackClient\n",
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
"\n",
"\n",
"async def agent_example():\n",
" client = LlamaStackClient(base_url=f\"http://{HOST}:{PORT}\")\n",
" agent_config = AgentConfig(\n",
" model=MODEL_NAME,\n",
" instructions=\"You are a helpful assistant! If you call builtin tools like brave search, follow the syntax brave_search.call(…)\",\n",
" sampling_params={\n",
" \"strategy\": \"greedy\",\n",
" \"temperature\": 1.0,\n",
" \"top_p\": 0.9,\n",
" \"strategy\": {\n",
" \"type\": \"greedy\",\n",
" },\n",
" },\n",
" tools=[\n",
" {\n",
@ -157,7 +161,7 @@
" log.print()\n",
"\n",
"\n",
"await agent_example()"
"await agent_example()\n"
]
},
{

View file

@ -157,7 +157,15 @@ curl http://localhost:$LLAMA_STACK_PORT/alpha/inference/chat-completion
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Write me a 2-sentence poem about the moon"}
],
"sampling_params": {"temperature": 0.7, "seed": 42, "max_tokens": 512}
"sampling_params": {
"strategy": {
"type": "top_p",
"temperatrue": 0.7,
"top_p": 0.95,
},
"seed": 42,
"max_tokens": 512
}
}
EOF
```

View file

@ -83,8 +83,8 @@
},
"outputs": [],
"source": [
"LLAMA_STACK_API_TOGETHER_URL=\"https://llama-stack.together.ai\"\n",
"LLAMA31_8B_INSTRUCT = \"Llama3.1-8B-Instruct\""
"LLAMA_STACK_API_TOGETHER_URL = \"https://llama-stack.together.ai\"\n",
"LLAMA31_8B_INSTRUCT = \"Llama3.1-8B-Instruct\"\n"
]
},
{
@ -107,12 +107,13 @@
" AgentConfigToolSearchToolDefinition,\n",
")\n",
"\n",
"\n",
"# Helper function to create an agent with tools\n",
"async def create_tool_agent(\n",
" client: LlamaStackClient,\n",
" tools: List[Dict],\n",
" instructions: str = \"You are a helpful assistant\",\n",
" model: str = LLAMA31_8B_INSTRUCT\n",
" model: str = LLAMA31_8B_INSTRUCT,\n",
") -> Agent:\n",
" \"\"\"Create an agent with specified tools.\"\"\"\n",
" print(\"Using the following model: \", model)\n",
@ -120,9 +121,9 @@
" model=model,\n",
" instructions=instructions,\n",
" sampling_params={\n",
" \"strategy\": \"greedy\",\n",
" \"temperature\": 1.0,\n",
" \"top_p\": 0.9,\n",
" \"strategy\": {\n",
" \"type\": \"greedy\",\n",
" },\n",
" },\n",
" tools=tools,\n",
" tool_choice=\"auto\",\n",
@ -130,7 +131,7 @@
" enable_session_persistence=True,\n",
" )\n",
"\n",
" return Agent(client, agent_config)"
" return Agent(client, agent_config)\n"
]
},
{
@ -172,7 +173,8 @@
],
"source": [
"# comment this if you don't have a BRAVE_SEARCH_API_KEY\n",
"os.environ[\"BRAVE_SEARCH_API_KEY\"] = 'YOUR_BRAVE_SEARCH_API_KEY'\n",
"os.environ[\"BRAVE_SEARCH_API_KEY\"] = \"YOUR_BRAVE_SEARCH_API_KEY\"\n",
"\n",
"\n",
"async def create_search_agent(client: LlamaStackClient) -> Agent:\n",
" \"\"\"Create an agent with Brave Search capability.\"\"\"\n",
@ -186,8 +188,8 @@
"\n",
" return await create_tool_agent(\n",
" client=client,\n",
" tools=[search_tool], # set this to [] if you don't have a BRAVE_SEARCH_API_KEY\n",
" model = LLAMA31_8B_INSTRUCT,\n",
" tools=[search_tool], # set this to [] if you don't have a BRAVE_SEARCH_API_KEY\n",
" model=LLAMA31_8B_INSTRUCT,\n",
" instructions=\"\"\"\n",
" You are a research assistant that can search the web.\n",
" Always cite your sources with URLs when providing information.\n",
@ -198,9 +200,10 @@
"\n",
" SOURCES:\n",
" - [Source title](URL)\n",
" \"\"\"\n",
" \"\"\",\n",
" )\n",
"\n",
"\n",
"# Example usage\n",
"async def search_example():\n",
" client = LlamaStackClient(base_url=LLAMA_STACK_API_TOGETHER_URL)\n",
@ -212,7 +215,7 @@
" # Example queries\n",
" queries = [\n",
" \"What are the latest developments in quantum computing?\",\n",
" #\"Who won the most recent Super Bowl?\",\n",
" # \"Who won the most recent Super Bowl?\",\n",
" ]\n",
"\n",
" for query in queries:\n",
@ -227,8 +230,9 @@
" async for log in EventLogger().log(response):\n",
" log.print()\n",
"\n",
"\n",
"# Run the example (in Jupyter, use asyncio.run())\n",
"await search_example()"
"await search_example()\n"
]
},
{
@ -286,12 +290,16 @@
}
],
"source": [
"from typing import TypedDict, Optional, Dict, Any\n",
"from datetime import datetime\n",
"import json\n",
"from llama_stack_client.types.tool_param_definition_param import ToolParamDefinitionParam\n",
"from llama_stack_client.types import CompletionMessage,ToolResponseMessage\n",
"from datetime import datetime\n",
"from typing import Any, Dict, Optional, TypedDict\n",
"\n",
"from llama_stack_client.lib.agents.custom_tool import CustomTool\n",
"from llama_stack_client.types import CompletionMessage, ToolResponseMessage\n",
"from llama_stack_client.types.tool_param_definition_param import (\n",
" ToolParamDefinitionParam,\n",
")\n",
"\n",
"\n",
"class WeatherTool(CustomTool):\n",
" \"\"\"Example custom tool for weather information.\"\"\"\n",
@ -305,16 +313,15 @@
" def get_params_definition(self) -> Dict[str, ToolParamDefinitionParam]:\n",
" return {\n",
" \"location\": ToolParamDefinitionParam(\n",
" param_type=\"str\",\n",
" description=\"City or location name\",\n",
" required=True\n",
" param_type=\"str\", description=\"City or location name\", required=True\n",
" ),\n",
" \"date\": ToolParamDefinitionParam(\n",
" param_type=\"str\",\n",
" description=\"Optional date (YYYY-MM-DD)\",\n",
" required=False\n",
" )\n",
" required=False,\n",
" ),\n",
" }\n",
"\n",
" async def run(self, messages: List[CompletionMessage]) -> List[ToolResponseMessage]:\n",
" assert len(messages) == 1, \"Expected single message\"\n",
"\n",
@ -337,20 +344,14 @@
" )\n",
" return [message]\n",
"\n",
" async def run_impl(self, location: str, date: Optional[str] = None) -> Dict[str, Any]:\n",
" async def run_impl(\n",
" self, location: str, date: Optional[str] = None\n",
" ) -> Dict[str, Any]:\n",
" \"\"\"Simulate getting weather data (replace with actual API call).\"\"\"\n",
" # Mock implementation\n",
" if date:\n",
" return {\n",
" \"temperature\": 90.1,\n",
" \"conditions\": \"sunny\",\n",
" \"humidity\": 40.0\n",
" }\n",
" return {\n",
" \"temperature\": 72.5,\n",
" \"conditions\": \"partly cloudy\",\n",
" \"humidity\": 65.0\n",
" }\n",
" return {\"temperature\": 90.1, \"conditions\": \"sunny\", \"humidity\": 40.0}\n",
" return {\"temperature\": 72.5, \"conditions\": \"partly cloudy\", \"humidity\": 65.0}\n",
"\n",
"\n",
"async def create_weather_agent(client: LlamaStackClient) -> Agent:\n",
@ -358,38 +359,33 @@
"\n",
" # Create the agent with the tool\n",
" weather_tool = WeatherTool()\n",
" \n",
"\n",
" agent_config = AgentConfig(\n",
" model=LLAMA31_8B_INSTRUCT,\n",
" #model=model_name,\n",
" # model=model_name,\n",
" instructions=\"\"\"\n",
" You are a weather assistant that can provide weather information.\n",
" Always specify the location clearly in your responses.\n",
" Include both temperature and conditions in your summaries.\n",
" \"\"\",\n",
" sampling_params={\n",
" \"strategy\": \"greedy\",\n",
" \"temperature\": 1.0,\n",
" \"top_p\": 0.9,\n",
" \"strategy\": {\n",
" \"type\": \"greedy\",\n",
" },\n",
" },\n",
" tools=[\n",
" weather_tool.get_tool_definition()\n",
" ],\n",
" tools=[weather_tool.get_tool_definition()],\n",
" tool_choice=\"auto\",\n",
" tool_prompt_format=\"json\",\n",
" input_shields=[],\n",
" output_shields=[],\n",
" enable_session_persistence=True\n",
" enable_session_persistence=True,\n",
" )\n",
"\n",
" agent = Agent(\n",
" client=client,\n",
" agent_config=agent_config,\n",
" custom_tools=[weather_tool]\n",
" )\n",
" agent = Agent(client=client, agent_config=agent_config, custom_tools=[weather_tool])\n",
"\n",
" return agent\n",
"\n",
"\n",
"# Example usage\n",
"async def weather_example():\n",
" client = LlamaStackClient(base_url=LLAMA_STACK_API_TOGETHER_URL)\n",
@ -413,12 +409,14 @@
" async for log in EventLogger().log(response):\n",
" log.print()\n",
"\n",
"\n",
"# For Jupyter notebooks\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()\n",
"\n",
"# Run the example\n",
"await weather_example()"
"await weather_example()\n"
]
},
{

View file

@ -7,6 +7,7 @@
from datetime import datetime
from enum import Enum
from typing import (
Annotated,
Any,
AsyncIterator,
Dict,
@ -20,7 +21,6 @@ from typing import (
from llama_models.schema_utils import json_schema_type, register_schema, webmethod
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import Annotated
from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent, URL
from llama_stack.apis.inference import (
@ -33,7 +33,6 @@ from llama_stack.apis.inference import (
ToolResponseMessage,
UserMessage,
)
from llama_stack.apis.memory import MemoryBank
from llama_stack.apis.safety import SafetyViolation
from llama_stack.apis.tools import ToolDef
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@ -89,7 +88,7 @@ class MemoryRetrievalStep(StepCommon):
step_type: Literal[StepType.memory_retrieval.value] = (
StepType.memory_retrieval.value
)
memory_bank_ids: List[str]
vector_db_ids: str
inserted_context: InterleavedContent
@ -133,8 +132,6 @@ class Session(BaseModel):
turns: List[Turn]
started_at: datetime
memory_bank: Optional[MemoryBank] = None
class AgentToolGroupWithArgs(BaseModel):
name: str
@ -296,13 +293,13 @@ class AgentStepResponse(BaseModel):
@runtime_checkable
@trace_protocol
class Agents(Protocol):
@webmethod(route="/agents/create")
@webmethod(route="/agents", method="POST")
async def create_agent(
self,
agent_config: AgentConfig,
) -> AgentCreateResponse: ...
@webmethod(route="/agents/turn/create")
@webmethod(route="/agents/{agent_id}/session/{session_id}/turn", method="POST")
async def create_agent_turn(
self,
agent_id: str,
@ -318,36 +315,52 @@ class Agents(Protocol):
toolgroups: Optional[List[AgentToolGroup]] = None,
) -> Union[Turn, AsyncIterator[AgentTurnResponseStreamChunk]]: ...
@webmethod(route="/agents/turn/get")
@webmethod(
route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}", method="GET"
)
async def get_agents_turn(
self, agent_id: str, session_id: str, turn_id: str
self,
agent_id: str,
session_id: str,
turn_id: str,
) -> Turn: ...
@webmethod(route="/agents/step/get")
@webmethod(
route="/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}",
method="GET",
)
async def get_agents_step(
self, agent_id: str, session_id: str, turn_id: str, step_id: str
self,
agent_id: str,
session_id: str,
turn_id: str,
step_id: str,
) -> AgentStepResponse: ...
@webmethod(route="/agents/session/create")
@webmethod(route="/agents/{agent_id}/session", method="POST")
async def create_agent_session(
self,
agent_id: str,
session_name: str,
) -> AgentSessionCreateResponse: ...
@webmethod(route="/agents/session/get")
@webmethod(route="/agents/{agent_id}/session/{session_id}", method="GET")
async def get_agents_session(
self,
agent_id: str,
session_id: str,
agent_id: str,
turn_ids: Optional[List[str]] = None,
) -> Session: ...
@webmethod(route="/agents/session/delete")
async def delete_agents_session(self, agent_id: str, session_id: str) -> None: ...
@webmethod(route="/agents/{agent_id}/session/{session_id}", method="DELETE")
async def delete_agents_session(
self,
session_id: str,
agent_id: str,
) -> None: ...
@webmethod(route="/agents/delete")
async def delete_agents(
@webmethod(route="/agents/{agent_id}", method="DELETE")
async def delete_agent(
self,
agent_id: str,
) -> None: ...

View file

@ -137,7 +137,7 @@ class EventLogger:
event,
LogEvent(
role=None,
content=delta.content,
content=delta.tool_call,
end="",
color="cyan",
),
@ -208,7 +208,7 @@ class EventLogger:
):
details = event.payload.step_details
inserted_context = interleaved_content_as_str(details.inserted_context)
content = f"fetched {len(inserted_context)} bytes from {details.memory_bank_ids}"
content = f"fetched {len(inserted_context)} bytes from {details.vector_db_ids}"
yield (
event,

View file

@ -54,7 +54,7 @@ class BatchChatCompletionResponse(BaseModel):
@runtime_checkable
class BatchInference(Protocol):
@webmethod(route="/batch-inference/completion")
@webmethod(route="/batch-inference/completion", method="POST")
async def batch_completion(
self,
model: str,
@ -63,7 +63,7 @@ class BatchInference(Protocol):
logprobs: Optional[LogProbConfig] = None,
) -> BatchCompletionResponse: ...
@webmethod(route="/batch-inference/chat-completion")
@webmethod(route="/batch-inference/chat-completion", method="POST")
async def batch_chat_completion(
self,
model: str,

View file

@ -38,8 +38,9 @@ class _URLOrData(BaseModel):
@json_schema_type
class ImageContentItem(_URLOrData):
class ImageContentItem(BaseModel):
type: Literal["image"] = "image"
image: _URLOrData
@json_schema_type
@ -64,14 +65,16 @@ InterleavedContent = register_schema(
)
@json_schema_type
class TextDelta(BaseModel):
type: Literal["text"] = "text"
text: str
@json_schema_type
class ImageDelta(BaseModel):
type: Literal["image"] = "image"
data: bytes
image: bytes
@json_schema_type
@ -89,7 +92,7 @@ class ToolCallDelta(BaseModel):
# you either send an in-progress tool call so the client can stream a long
# code generation or you send the final parsed tool call at the end of the
# stream
content: Union[str, ToolCall]
tool_call: Union[str, ToolCall]
parse_status: ToolCallParseStatus

View file

@ -6,54 +6,71 @@
from typing import Literal, Union
from llama_models.schema_utils import register_schema
from llama_models.schema_utils import json_schema_type, register_schema
from pydantic import BaseModel, Field
from typing_extensions import Annotated
@json_schema_type
class StringType(BaseModel):
type: Literal["string"] = "string"
@json_schema_type
class NumberType(BaseModel):
type: Literal["number"] = "number"
@json_schema_type
class BooleanType(BaseModel):
type: Literal["boolean"] = "boolean"
@json_schema_type
class ArrayType(BaseModel):
type: Literal["array"] = "array"
@json_schema_type
class ObjectType(BaseModel):
type: Literal["object"] = "object"
@json_schema_type
class JsonType(BaseModel):
type: Literal["json"] = "json"
@json_schema_type
class UnionType(BaseModel):
type: Literal["union"] = "union"
@json_schema_type
class ChatCompletionInputType(BaseModel):
# expects List[Message] for messages
type: Literal["chat_completion_input"] = "chat_completion_input"
@json_schema_type
class CompletionInputType(BaseModel):
# expects InterleavedTextMedia for content
type: Literal["completion_input"] = "completion_input"
@json_schema_type
class AgentTurnInputType(BaseModel):
# expects List[Message] for messages (may also include attachments?)
type: Literal["agent_turn_input"] = "agent_turn_input"
@json_schema_type
class DialogType(BaseModel):
# expects List[Message] for messages
# this type semantically contains the output label whereas ChatCompletionInputType does not
type: Literal["dialog"] = "dialog"
ParamType = register_schema(
Annotated[
Union[

View file

@ -29,7 +29,7 @@ class DatasetIO(Protocol):
# keeping for aligning with inference/safety, but this is not used
dataset_store: DatasetStore
@webmethod(route="/datasetio/get-rows-paginated", method="GET")
@webmethod(route="/datasetio/rows", method="GET")
async def get_rows_paginated(
self,
dataset_id: str,
@ -38,7 +38,7 @@ class DatasetIO(Protocol):
filter_condition: Optional[str] = None,
) -> PaginatedRowsResult: ...
@webmethod(route="/datasetio/append-rows", method="POST")
@webmethod(route="/datasetio/rows", method="POST")
async def append_rows(
self, dataset_id: str, rows: List[Dict[str, Any]]
) -> None: ...

View file

@ -7,11 +7,9 @@
from typing import Any, Dict, List, Literal, Optional, Protocol
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.common.type_system import ParamType
from llama_stack.apis.resource import Resource, ResourceType
@ -44,8 +42,12 @@ class DatasetInput(CommonDatasetFields, BaseModel):
provider_dataset_id: Optional[str] = None
class ListDatasetsResponse(BaseModel):
data: List[Dataset]
class Datasets(Protocol):
@webmethod(route="/datasets/register", method="POST")
@webmethod(route="/datasets", method="POST")
async def register_dataset(
self,
dataset_id: str,
@ -56,16 +58,16 @@ class Datasets(Protocol):
metadata: Optional[Dict[str, Any]] = None,
) -> None: ...
@webmethod(route="/datasets/get", method="GET")
@webmethod(route="/datasets/{dataset_id}", method="GET")
async def get_dataset(
self,
dataset_id: str,
) -> Optional[Dataset]: ...
@webmethod(route="/datasets/list", method="GET")
async def list_datasets(self) -> List[Dataset]: ...
@webmethod(route="/datasets", method="GET")
async def list_datasets(self) -> ListDatasetsResponse: ...
@webmethod(route="/datasets/unregister", method="POST")
@webmethod(route="/datasets/{dataset_id}", method="DELETE")
async def unregister_dataset(
self,
dataset_id: str,

View file

@ -0,0 +1,35 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from llama_models.schema_utils import json_schema_type
@json_schema_type
class Api(Enum):
inference = "inference"
safety = "safety"
agents = "agents"
vector_io = "vector_io"
datasetio = "datasetio"
scoring = "scoring"
eval = "eval"
post_training = "post_training"
tool_runtime = "tool_runtime"
telemetry = "telemetry"
models = "models"
shields = "shields"
vector_dbs = "vector_dbs"
datasets = "datasets"
scoring_functions = "scoring_functions"
eval_tasks = "eval_tasks"
tool_groups = "tool_groups"
# built-in API
inspect = "inspect"

View file

@ -7,9 +7,7 @@
from typing import Any, Dict, List, Literal, Optional, Protocol, Union
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from typing_extensions import Annotated
from llama_stack.apis.agents import AgentConfig
@ -76,14 +74,14 @@ class EvaluateResponse(BaseModel):
class Eval(Protocol):
@webmethod(route="/eval/run-eval", method="POST")
@webmethod(route="/eval/tasks/{task_id}/jobs", method="POST")
async def run_eval(
self,
task_id: str,
task_config: EvalTaskConfig,
) -> Job: ...
@webmethod(route="/eval/evaluate-rows", method="POST")
@webmethod(route="/eval/tasks/{task_id}/evaluations", method="POST")
async def evaluate_rows(
self,
task_id: str,
@ -92,11 +90,11 @@ class Eval(Protocol):
task_config: EvalTaskConfig,
) -> EvaluateResponse: ...
@webmethod(route="/eval/job/status", method="GET")
@webmethod(route="/eval/tasks/{task_id}/jobs/{job_id}", method="GET")
async def job_status(self, task_id: str, job_id: str) -> Optional[JobStatus]: ...
@webmethod(route="/eval/job/cancel", method="POST")
@webmethod(route="/eval/tasks/{task_id}/jobs/{job_id}", method="DELETE")
async def job_cancel(self, task_id: str, job_id: str) -> None: ...
@webmethod(route="/eval/job/result", method="GET")
async def job_result(self, task_id: str, job_id: str) -> EvaluateResponse: ...
@webmethod(route="/eval/tasks/{task_id}/jobs/{job_id}/result", method="GET")
async def job_result(self, job_id: str, task_id: str) -> EvaluateResponse: ...

View file

@ -6,7 +6,6 @@
from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkable
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from llama_stack.apis.resource import Resource, ResourceType
@ -40,15 +39,22 @@ class EvalTaskInput(CommonEvalTaskFields, BaseModel):
provider_eval_task_id: Optional[str] = None
class ListEvalTasksResponse(BaseModel):
data: List[EvalTask]
@runtime_checkable
class EvalTasks(Protocol):
@webmethod(route="/eval-tasks/list", method="GET")
async def list_eval_tasks(self) -> List[EvalTask]: ...
@webmethod(route="/eval-tasks", method="GET")
async def list_eval_tasks(self) -> ListEvalTasksResponse: ...
@webmethod(route="/eval-tasks/get", method="GET")
async def get_eval_task(self, name: str) -> Optional[EvalTask]: ...
@webmethod(route="/eval-tasks/{eval_task_id}", method="GET")
async def get_eval_task(
self,
eval_task_id: str,
) -> Optional[EvalTask]: ...
@webmethod(route="/eval-tasks/register", method="POST")
@webmethod(route="/eval-tasks", method="POST")
async def register_eval_task(
self,
eval_task_id: str,

View file

@ -291,7 +291,7 @@ class ModelStore(Protocol):
class Inference(Protocol):
model_store: ModelStore
@webmethod(route="/inference/completion")
@webmethod(route="/inference/completion", method="POST")
async def completion(
self,
model_id: str,
@ -302,7 +302,7 @@ class Inference(Protocol):
logprobs: Optional[LogProbConfig] = None,
) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]: ...
@webmethod(route="/inference/chat-completion")
@webmethod(route="/inference/chat-completion", method="POST")
async def chat_completion(
self,
model_id: str,
@ -319,7 +319,7 @@ class Inference(Protocol):
ChatCompletionResponse, AsyncIterator[ChatCompletionResponseStreamChunk]
]: ...
@webmethod(route="/inference/embeddings")
@webmethod(route="/inference/embeddings", method="POST")
async def embeddings(
self,
model_id: str,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Dict, List, Protocol, runtime_checkable
from typing import List, Protocol, runtime_checkable
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel
@ -12,6 +12,7 @@ from pydantic import BaseModel
@json_schema_type
class ProviderInfo(BaseModel):
api: str
provider_id: str
provider_type: str
@ -34,13 +35,21 @@ class VersionInfo(BaseModel):
version: str
class ListProvidersResponse(BaseModel):
data: List[ProviderInfo]
class ListRoutesResponse(BaseModel):
data: List[RouteInfo]
@runtime_checkable
class Inspect(Protocol):
@webmethod(route="/providers/list", method="GET")
async def list_providers(self) -> Dict[str, ProviderInfo]: ...
@webmethod(route="/inspect/providers", method="GET")
async def list_providers(self) -> ListProvidersResponse: ...
@webmethod(route="/routes/list", method="GET")
async def list_routes(self) -> Dict[str, List[RouteInfo]]: ...
@webmethod(route="/inspect/routes", method="GET")
async def list_routes(self) -> ListRoutesResponse: ...
@webmethod(route="/health", method="GET")
async def health(self) -> HealthInfo: ...

View file

@ -1,152 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from typing import (
Annotated,
List,
Literal,
Optional,
Protocol,
runtime_checkable,
Union,
)
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@json_schema_type
class MemoryBankType(Enum):
vector = "vector"
keyvalue = "keyvalue"
keyword = "keyword"
graph = "graph"
# define params for each type of memory bank, this leads to a tagged union
# accepted as input from the API or from the config.
@json_schema_type
class VectorMemoryBankParams(BaseModel):
memory_bank_type: Literal[MemoryBankType.vector.value] = MemoryBankType.vector.value
embedding_model: str
chunk_size_in_tokens: int
overlap_size_in_tokens: Optional[int] = None
@json_schema_type
class KeyValueMemoryBankParams(BaseModel):
memory_bank_type: Literal[MemoryBankType.keyvalue.value] = (
MemoryBankType.keyvalue.value
)
@json_schema_type
class KeywordMemoryBankParams(BaseModel):
memory_bank_type: Literal[MemoryBankType.keyword.value] = (
MemoryBankType.keyword.value
)
@json_schema_type
class GraphMemoryBankParams(BaseModel):
memory_bank_type: Literal[MemoryBankType.graph.value] = MemoryBankType.graph.value
BankParams = Annotated[
Union[
VectorMemoryBankParams,
KeyValueMemoryBankParams,
KeywordMemoryBankParams,
GraphMemoryBankParams,
],
Field(discriminator="memory_bank_type"),
]
# Some common functionality for memory banks.
class MemoryBankResourceMixin(Resource):
type: Literal[ResourceType.memory_bank.value] = ResourceType.memory_bank.value
@property
def memory_bank_id(self) -> str:
return self.identifier
@property
def provider_memory_bank_id(self) -> str:
return self.provider_resource_id
@json_schema_type
class VectorMemoryBank(MemoryBankResourceMixin):
memory_bank_type: Literal[MemoryBankType.vector.value] = MemoryBankType.vector.value
embedding_model: str
chunk_size_in_tokens: int
embedding_dimension: Optional[int] = 384 # default to minilm-l6-v2
overlap_size_in_tokens: Optional[int] = None
@json_schema_type
class KeyValueMemoryBank(MemoryBankResourceMixin):
memory_bank_type: Literal[MemoryBankType.keyvalue.value] = (
MemoryBankType.keyvalue.value
)
# TODO: KeyValue and Keyword are so similar in name, oof. Get a better naming convention.
@json_schema_type
class KeywordMemoryBank(MemoryBankResourceMixin):
memory_bank_type: Literal[MemoryBankType.keyword.value] = (
MemoryBankType.keyword.value
)
@json_schema_type
class GraphMemoryBank(MemoryBankResourceMixin):
memory_bank_type: Literal[MemoryBankType.graph.value] = MemoryBankType.graph.value
MemoryBank = Annotated[
Union[
VectorMemoryBank,
KeyValueMemoryBank,
KeywordMemoryBank,
GraphMemoryBank,
],
Field(discriminator="memory_bank_type"),
]
class MemoryBankInput(BaseModel):
memory_bank_id: str
params: BankParams
provider_memory_bank_id: Optional[str] = None
@runtime_checkable
@trace_protocol
class MemoryBanks(Protocol):
@webmethod(route="/memory-banks/list", method="GET")
async def list_memory_banks(self) -> List[MemoryBank]: ...
@webmethod(route="/memory-banks/get", method="GET")
async def get_memory_bank(self, memory_bank_id: str) -> Optional[MemoryBank]: ...
@webmethod(route="/memory-banks/register", method="POST")
async def register_memory_bank(
self,
memory_bank_id: str,
params: BankParams,
provider_id: Optional[str] = None,
provider_memory_bank_id: Optional[str] = None,
) -> MemoryBank: ...
@webmethod(route="/memory-banks/unregister", method="POST")
async def unregister_memory_bank(self, memory_bank_id: str) -> None: ...

View file

@ -52,16 +52,23 @@ class ModelInput(CommonModelFields):
model_config = ConfigDict(protected_namespaces=())
class ListModelsResponse(BaseModel):
data: List[Model]
@runtime_checkable
@trace_protocol
class Models(Protocol):
@webmethod(route="/models/list", method="GET")
async def list_models(self) -> List[Model]: ...
@webmethod(route="/models", method="GET")
async def list_models(self) -> ListModelsResponse: ...
@webmethod(route="/models/get", method="GET")
async def get_model(self, identifier: str) -> Optional[Model]: ...
@webmethod(route="/models/{model_id}", method="GET")
async def get_model(
self,
model_id: str,
) -> Optional[Model]: ...
@webmethod(route="/models/register", method="POST")
@webmethod(route="/models", method="POST")
async def register_model(
self,
model_id: str,
@ -71,5 +78,8 @@ class Models(Protocol):
model_type: Optional[ModelType] = None,
) -> Model: ...
@webmethod(route="/models/unregister", method="POST")
async def unregister_model(self, model_id: str) -> None: ...
@webmethod(route="/models/{model_id}", method="DELETE")
async def unregister_model(
self,
model_id: str,
) -> None: ...

View file

@ -6,16 +6,13 @@
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Protocol, Union
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from typing_extensions import Annotated
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.common.job_types import JobStatus
from llama_stack.apis.common.training_types import Checkpoint
@ -27,11 +24,18 @@ class OptimizerType(Enum):
sgd = "sgd"
@json_schema_type
class DatasetFormat(Enum):
instruct = "instruct"
dialog = "dialog"
@json_schema_type
class DataConfig(BaseModel):
dataset_id: str
batch_size: int
shuffle: bool
data_format: DatasetFormat
validation_dataset_id: Optional[str] = None
packed: Optional[bool] = False
train_on_input: Optional[bool] = False
@ -152,6 +156,10 @@ class PostTrainingJobStatusResponse(BaseModel):
checkpoints: List[Checkpoint] = Field(default_factory=list)
class ListPostTrainingJobsResponse(BaseModel):
data: List[PostTrainingJob]
@json_schema_type
class PostTrainingJobArtifactsResponse(BaseModel):
"""Artifacts of a finetuning job."""
@ -190,7 +198,7 @@ class PostTraining(Protocol):
) -> PostTrainingJob: ...
@webmethod(route="/post-training/jobs", method="GET")
async def get_training_jobs(self) -> List[PostTrainingJob]: ...
async def get_training_jobs(self) -> ListPostTrainingJobsResponse: ...
@webmethod(route="/post-training/job/status", method="GET")
async def get_training_job_status(

View file

@ -14,7 +14,7 @@ from pydantic import BaseModel, Field
class ResourceType(Enum):
model = "model"
shield = "shield"
memory_bank = "memory_bank"
vector_db = "vector_db"
dataset = "dataset"
scoring_function = "scoring_function"
eval_task = "eval_task"
@ -37,5 +37,5 @@ class Resource(BaseModel):
provider_id: str = Field(description="ID of the provider that owns this resource")
type: ResourceType = Field(
description="Type of resource (e.g. 'model', 'shield', 'memory_bank', etc.)"
description="Type of resource (e.g. 'model', 'shield', 'vector_db', etc.)"
)

View file

@ -12,7 +12,6 @@ from pydantic import BaseModel, Field
from llama_stack.apis.inference import Message
from llama_stack.apis.shields import Shield
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@ -49,7 +48,7 @@ class ShieldStore(Protocol):
class Safety(Protocol):
shield_store: ShieldStore
@webmethod(route="/safety/run-shield")
@webmethod(route="/safety/run-shield", method="POST")
async def run_shield(
self,
shield_id: str,

View file

@ -11,7 +11,6 @@ from pydantic import BaseModel
from llama_stack.apis.scoring_functions import ScoringFn, ScoringFnParams
# mapping of metric to value
ScoringResultRow = Dict[str, Any]
@ -43,7 +42,7 @@ class ScoringFunctionStore(Protocol):
class Scoring(Protocol):
scoring_function_store: ScoringFunctionStore
@webmethod(route="/scoring/score-batch")
@webmethod(route="/scoring/score-batch", method="POST")
async def score_batch(
self,
dataset_id: str,
@ -51,7 +50,7 @@ class Scoring(Protocol):
save_results_dataset: bool = False,
) -> ScoreBatchResponse: ...
@webmethod(route="/scoring/score")
@webmethod(route="/scoring/score", method="POST")
async def score(
self,
input_rows: List[Dict[str, Any]],

View file

@ -21,7 +21,6 @@ from pydantic import BaseModel, Field
from typing_extensions import Annotated
from llama_stack.apis.common.type_system import ParamType
from llama_stack.apis.resource import Resource, ResourceType
@ -129,15 +128,21 @@ class ScoringFnInput(CommonScoringFnFields, BaseModel):
provider_scoring_fn_id: Optional[str] = None
class ListScoringFunctionsResponse(BaseModel):
data: List[ScoringFn]
@runtime_checkable
class ScoringFunctions(Protocol):
@webmethod(route="/scoring-functions/list", method="GET")
async def list_scoring_functions(self) -> List[ScoringFn]: ...
@webmethod(route="/scoring-functions", method="GET")
async def list_scoring_functions(self) -> ListScoringFunctionsResponse: ...
@webmethod(route="/scoring-functions/get", method="GET")
async def get_scoring_function(self, scoring_fn_id: str) -> Optional[ScoringFn]: ...
@webmethod(route="/scoring-functions/{scoring_fn_id}", method="GET")
async def get_scoring_function(
self, scoring_fn_id: str, /
) -> Optional[ScoringFn]: ...
@webmethod(route="/scoring-functions/register", method="POST")
@webmethod(route="/scoring-functions", method="POST")
async def register_scoring_function(
self,
scoring_fn_id: str,

View file

@ -38,16 +38,20 @@ class ShieldInput(CommonShieldFields):
provider_shield_id: Optional[str] = None
class ListShieldsResponse(BaseModel):
data: List[Shield]
@runtime_checkable
@trace_protocol
class Shields(Protocol):
@webmethod(route="/shields/list", method="GET")
async def list_shields(self) -> List[Shield]: ...
@webmethod(route="/shields", method="GET")
async def list_shields(self) -> ListShieldsResponse: ...
@webmethod(route="/shields/get", method="GET")
@webmethod(route="/shields/{identifier}", method="GET")
async def get_shield(self, identifier: str) -> Optional[Shield]: ...
@webmethod(route="/shields/register", method="POST")
@webmethod(route="/shields", method="POST")
async def register_shield(
self,
shield_id: str,

View file

@ -169,39 +169,57 @@ class QueryCondition(BaseModel):
value: Any
class QueryTracesResponse(BaseModel):
data: List[Trace]
class QuerySpansResponse(BaseModel):
data: List[Span]
class QuerySpanTreeResponse(BaseModel):
data: Dict[str, SpanWithStatus]
@runtime_checkable
class Telemetry(Protocol):
@webmethod(route="/telemetry/log-event")
@webmethod(route="/telemetry/events", method="POST")
async def log_event(
self, event: Event, ttl_seconds: int = DEFAULT_TTL_DAYS * 86400
) -> None: ...
@webmethod(route="/telemetry/query-traces", method="POST")
@webmethod(route="/telemetry/traces", method="GET")
async def query_traces(
self,
attribute_filters: Optional[List[QueryCondition]] = None,
limit: Optional[int] = 100,
offset: Optional[int] = 0,
order_by: Optional[List[str]] = None,
) -> List[Trace]: ...
) -> QueryTracesResponse: ...
@webmethod(route="/telemetry/get-span-tree", method="POST")
@webmethod(route="/telemetry/traces/{trace_id}", method="GET")
async def get_trace(self, trace_id: str) -> Trace: ...
@webmethod(route="/telemetry/traces/{trace_id}/spans/{span_id}", method="GET")
async def get_span(self, trace_id: str, span_id: str) -> Span: ...
@webmethod(route="/telemetry/spans/{span_id}/tree", method="GET")
async def get_span_tree(
self,
span_id: str,
attributes_to_return: Optional[List[str]] = None,
max_depth: Optional[int] = None,
) -> Dict[str, SpanWithStatus]: ...
) -> QuerySpanTreeResponse: ...
@webmethod(route="/telemetry/query-spans", method="POST")
@webmethod(route="/telemetry/spans", method="GET")
async def query_spans(
self,
attribute_filters: List[QueryCondition],
attributes_to_return: List[str],
max_depth: Optional[int] = None,
) -> List[Span]: ...
) -> QuerySpansResponse: ...
@webmethod(route="/telemetry/save-spans-to-dataset", method="POST")
@webmethod(route="/telemetry/spans/export", method="POST")
async def save_spans_to_dataset(
self,
attribute_filters: List[QueryCondition],

View file

@ -5,3 +5,4 @@
# the root directory of this source tree.
from .tools import * # noqa: F401 F403
from .rag_tool import * # noqa: F401 F403

View file

@ -0,0 +1,95 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Union
from llama_models.schema_utils import json_schema_type, register_schema, webmethod
from pydantic import BaseModel, Field
from typing_extensions import Annotated, Protocol, runtime_checkable
from llama_stack.apis.common.content_types import InterleavedContent, URL
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@json_schema_type
class RAGDocument(BaseModel):
document_id: str
content: InterleavedContent | URL
mime_type: str | None = None
metadata: Dict[str, Any] = Field(default_factory=dict)
@json_schema_type
class RAGQueryResult(BaseModel):
content: Optional[InterleavedContent] = None
@json_schema_type
class RAGQueryGenerator(Enum):
default = "default"
llm = "llm"
custom = "custom"
@json_schema_type
class DefaultRAGQueryGeneratorConfig(BaseModel):
type: Literal["default"] = "default"
separator: str = " "
@json_schema_type
class LLMRAGQueryGeneratorConfig(BaseModel):
type: Literal["llm"] = "llm"
model: str
template: str
RAGQueryGeneratorConfig = register_schema(
Annotated[
Union[
DefaultRAGQueryGeneratorConfig,
LLMRAGQueryGeneratorConfig,
],
Field(discriminator="type"),
],
name="RAGQueryGeneratorConfig",
)
@json_schema_type
class RAGQueryConfig(BaseModel):
# This config defines how a query is generated using the messages
# for memory bank retrieval.
query_generator_config: RAGQueryGeneratorConfig = Field(
default=DefaultRAGQueryGeneratorConfig()
)
max_tokens_in_context: int = 4096
max_chunks: int = 5
@runtime_checkable
@trace_protocol
class RAGToolRuntime(Protocol):
@webmethod(route="/tool-runtime/rag-tool/insert", method="POST")
async def insert(
self,
documents: List[RAGDocument],
vector_db_id: str,
chunk_size_in_tokens: int = 512,
) -> None:
"""Index documents so they can be used by the RAG system"""
...
@webmethod(route="/tool-runtime/rag-tool/query", method="POST")
async def query(
self,
content: InterleavedContent,
vector_db_ids: List[str],
query_config: Optional[RAGQueryConfig] = None,
) -> RAGQueryResult:
"""Query the RAG system for context; typically invoked by the agent"""
...

View file

@ -15,6 +15,8 @@ from llama_stack.apis.common.content_types import InterleavedContent, URL
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
from .rag_tool import RAGToolRuntime
@json_schema_type
class ToolParameter(BaseModel):
@ -74,13 +76,21 @@ class ToolInvocationResult(BaseModel):
class ToolStore(Protocol):
def get_tool(self, tool_name: str) -> Tool: ...
def get_tool_group(self, tool_group_id: str) -> ToolGroup: ...
def get_tool_group(self, toolgroup_id: str) -> ToolGroup: ...
class ListToolGroupsResponse(BaseModel):
data: List[ToolGroup]
class ListToolsResponse(BaseModel):
data: List[Tool]
@runtime_checkable
@trace_protocol
class ToolGroups(Protocol):
@webmethod(route="/toolgroups/register", method="POST")
@webmethod(route="/toolgroups", method="POST")
async def register_tool_group(
self,
toolgroup_id: str,
@ -91,36 +101,48 @@ class ToolGroups(Protocol):
"""Register a tool group"""
...
@webmethod(route="/toolgroups/get", method="GET")
@webmethod(route="/toolgroups/{toolgroup_id}", method="GET")
async def get_tool_group(
self,
toolgroup_id: str,
) -> ToolGroup: ...
@webmethod(route="/toolgroups/list", method="GET")
async def list_tool_groups(self) -> List[ToolGroup]:
@webmethod(route="/toolgroups", method="GET")
async def list_tool_groups(self) -> ListToolGroupsResponse:
"""List tool groups with optional provider"""
...
@webmethod(route="/tools/list", method="GET")
async def list_tools(self, tool_group_id: Optional[str] = None) -> List[Tool]:
@webmethod(route="/tools", method="GET")
async def list_tools(self, toolgroup_id: Optional[str] = None) -> ListToolsResponse:
"""List tools with optional tool group"""
...
@webmethod(route="/tools/get", method="GET")
async def get_tool(self, tool_name: str) -> Tool: ...
@webmethod(route="/tools/{tool_name}", method="GET")
async def get_tool(
self,
tool_name: str,
) -> Tool: ...
@webmethod(route="/toolgroups/unregister", method="POST")
async def unregister_tool_group(self, tool_group_id: str) -> None:
@webmethod(route="/toolgroups/{toolgroup_id}", method="DELETE")
async def unregister_toolgroup(
self,
toolgroup_id: str,
) -> None:
"""Unregister a tool group"""
...
class SpecialToolGroup(Enum):
rag_tool = "rag_tool"
@runtime_checkable
@trace_protocol
class ToolRuntime(Protocol):
tool_store: ToolStore
rag_tool: RAGToolRuntime
# TODO: This needs to be renamed once OPEN API generator name conflict issue is fixed.
@webmethod(route="/tool-runtime/list-tools", method="GET")
async def list_runtime_tools(
@ -129,7 +151,7 @@ class ToolRuntime(Protocol):
@webmethod(route="/tool-runtime/invoke", method="POST")
async def invoke_tool(
self, tool_name: str, args: Dict[str, Any]
self, tool_name: str, kwargs: Dict[str, Any]
) -> ToolInvocationResult:
"""Run a tool with the given arguments"""
...

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .memory_banks import * # noqa: F401 F403
from .vector_dbs import * # noqa: F401 F403

View file

@ -0,0 +1,66 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List, Literal, Optional, Protocol, runtime_checkable
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@json_schema_type
class VectorDB(Resource):
type: Literal[ResourceType.vector_db.value] = ResourceType.vector_db.value
embedding_model: str
embedding_dimension: int
@property
def vector_db_id(self) -> str:
return self.identifier
@property
def provider_vector_db_id(self) -> str:
return self.provider_resource_id
class VectorDBInput(BaseModel):
vector_db_id: str
embedding_model: str
embedding_dimension: int
provider_vector_db_id: Optional[str] = None
class ListVectorDBsResponse(BaseModel):
data: List[VectorDB]
@runtime_checkable
@trace_protocol
class VectorDBs(Protocol):
@webmethod(route="/vector-dbs", method="GET")
async def list_vector_dbs(self) -> ListVectorDBsResponse: ...
@webmethod(route="/vector-dbs/{vector_db_id}", method="GET")
async def get_vector_db(
self,
vector_db_id: str,
) -> Optional[VectorDB]: ...
@webmethod(route="/vector-dbs", method="POST")
async def register_vector_db(
self,
vector_db_id: str,
embedding_model: str,
embedding_dimension: Optional[int] = 384,
provider_id: Optional[str] = None,
provider_vector_db_id: Optional[str] = None,
) -> VectorDB: ...
@webmethod(route="/vector-dbs/{vector_db_id}", method="DELETE")
async def unregister_vector_db(self, vector_db_id: str) -> None: ...

View file

@ -0,0 +1,7 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from .vector_io import * # noqa: F401 F403

View file

@ -13,55 +13,45 @@ from typing import Any, Dict, List, Optional, Protocol, runtime_checkable
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.inference import InterleavedContent
from llama_stack.apis.memory_banks import MemoryBank
from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
@json_schema_type
class MemoryBankDocument(BaseModel):
document_id: str
content: InterleavedContent | URL
mime_type: str | None = None
metadata: Dict[str, Any] = Field(default_factory=dict)
class Chunk(BaseModel):
content: InterleavedContent
token_count: int
document_id: str
metadata: Dict[str, Any] = Field(default_factory=dict)
@json_schema_type
class QueryDocumentsResponse(BaseModel):
class QueryChunksResponse(BaseModel):
chunks: List[Chunk]
scores: List[float]
class MemoryBankStore(Protocol):
def get_memory_bank(self, bank_id: str) -> Optional[MemoryBank]: ...
class VectorDBStore(Protocol):
def get_vector_db(self, vector_db_id: str) -> Optional[VectorDB]: ...
@runtime_checkable
@trace_protocol
class Memory(Protocol):
memory_bank_store: MemoryBankStore
class VectorIO(Protocol):
vector_db_store: VectorDBStore
# this will just block now until documents are inserted, but it should
# this will just block now until chunks are inserted, but it should
# probably return a Job instance which can be polled for completion
@webmethod(route="/memory/insert")
async def insert_documents(
@webmethod(route="/vector-io/insert", method="POST")
async def insert_chunks(
self,
bank_id: str,
documents: List[MemoryBankDocument],
vector_db_id: str,
chunks: List[Chunk],
ttl_seconds: Optional[int] = None,
) -> None: ...
@webmethod(route="/memory/query")
async def query_documents(
@webmethod(route="/vector-io/query", method="POST")
async def query_chunks(
self,
bank_id: str,
vector_db_id: str,
query: InterleavedContent,
params: Optional[Dict[str, Any]] = None,
) -> QueryDocumentsResponse: ...
) -> QueryChunksResponse: ...

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
LLAMA_STACK_API_VERSION = "alpha"
LLAMA_STACK_API_VERSION = "v1"

View file

@ -13,7 +13,6 @@ from termcolor import colored
from llama_stack.cli.subcommand import Subcommand
from llama_stack.cli.table import print_table
from llama_stack.distribution.utils.serialize import EnumEncoder
class ModelDescribe(Subcommand):
@ -72,7 +71,7 @@ class ModelDescribe(Subcommand):
rows.append(
(
"Recommended sampling params",
json.dumps(sampling_params, cls=EnumEncoder, indent=4),
json.dumps(sampling_params, indent=4),
)
)

View file

@ -0,0 +1,307 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import argparse
import importlib.resources
import json
import os
import shutil
import textwrap
from functools import lru_cache
from pathlib import Path
from typing import Dict, Optional
import yaml
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.validation import Validator
from termcolor import cprint
from llama_stack.cli.table import print_table
from llama_stack.distribution.build import build_image, ImageType
from llama_stack.distribution.datatypes import (
BuildConfig,
DistributionSpec,
Provider,
StackRunConfig,
)
from llama_stack.distribution.distribution import get_provider_registry
from llama_stack.distribution.resolver import InvalidProviderError
from llama_stack.distribution.utils.config_dirs import DISTRIBS_BASE_DIR
from llama_stack.distribution.utils.dynamic import instantiate_class_type
from llama_stack.providers.datatypes import Api
TEMPLATES_PATH = Path(__file__).parent.parent.parent / "templates"
@lru_cache()
def available_templates_specs() -> Dict[str, BuildConfig]:
import yaml
template_specs = {}
for p in TEMPLATES_PATH.rglob("*build.yaml"):
template_name = p.parent.name
with open(p, "r") as f:
build_config = BuildConfig(**yaml.safe_load(f))
template_specs[template_name] = build_config
return template_specs
def run_stack_build_command(
parser: argparse.ArgumentParser, args: argparse.Namespace
) -> None:
if args.list_templates:
return _run_template_list_cmd()
current_conda_env = os.environ.get("CONDA_DEFAULT_ENV")
image_name = args.image_name or current_conda_env
if args.template:
available_templates = available_templates_specs()
if args.template not in available_templates:
cprint(
f"Could not find template {args.template}. Please run `llama stack build --list-templates` to check out the available templates",
color="red",
)
return
build_config = available_templates[args.template]
if args.image_type:
build_config.image_type = args.image_type
else:
cprint(
f"Please specify a image-type (docker | conda | venv) for {args.template}",
color="red",
)
return
_run_stack_build_command_from_build_config(
build_config,
image_name=image_name,
template_name=args.template,
)
return
if not args.config and not args.template:
name = prompt(
"> Enter a name for your Llama Stack (e.g. my-local-stack): ",
validator=Validator.from_callable(
lambda x: len(x) > 0,
error_message="Name cannot be empty, please enter a name",
),
)
image_type = prompt(
"> Enter the image type you want your Llama Stack to be built as (docker or conda or venv): ",
validator=Validator.from_callable(
lambda x: x in ["docker", "conda", "venv"],
error_message="Invalid image type, please enter conda or docker or venv",
),
default="conda",
)
if image_type == "conda":
if not image_name:
cprint(
f"No current conda environment detected or specified, will create a new conda environment with the name `llamastack-{name}`",
color="yellow",
)
image_name = f"llamastack-{name}"
else:
cprint(
f"Using conda environment {image_name}",
color="green",
)
cprint(
textwrap.dedent(
"""
Llama Stack is composed of several APIs working together. Let's select
the provider types (implementations) you want to use for these APIs.
""",
),
color="green",
)
print("Tip: use <TAB> to see options for the providers.\n")
providers = dict()
for api, providers_for_api in get_provider_registry().items():
available_providers = [
x
for x in providers_for_api.keys()
if x not in ("remote", "remote::sample")
]
api_provider = prompt(
"> Enter provider for API {}: ".format(api.value),
completer=WordCompleter(available_providers),
complete_while_typing=True,
validator=Validator.from_callable(
lambda x: x in available_providers,
error_message="Invalid provider, use <TAB> to see options",
),
)
providers[api.value] = api_provider
description = prompt(
"\n > (Optional) Enter a short description for your Llama Stack: ",
default="",
)
distribution_spec = DistributionSpec(
providers=providers,
description=description,
)
build_config = BuildConfig(
image_type=image_type, distribution_spec=distribution_spec
)
else:
with open(args.config, "r") as f:
try:
build_config = BuildConfig(**yaml.safe_load(f))
except Exception as e:
cprint(
f"Could not parse config file {args.config}: {e}",
color="red",
)
return
_run_stack_build_command_from_build_config(build_config, image_name=image_name)
def _generate_run_config(
build_config: BuildConfig, build_dir: Path, image_name: str
) -> None:
"""
Generate a run.yaml template file for user to edit from a build.yaml file
"""
apis = list(build_config.distribution_spec.providers.keys())
run_config = StackRunConfig(
container_image=(
image_name if build_config.image_type == ImageType.container.value else None
),
image_name=image_name,
apis=apis,
providers={},
)
# build providers dict
provider_registry = get_provider_registry()
for api in apis:
run_config.providers[api] = []
provider_types = build_config.distribution_spec.providers[api]
if isinstance(provider_types, str):
provider_types = [provider_types]
for i, provider_type in enumerate(provider_types):
pid = provider_type.split("::")[-1]
p = provider_registry[Api(api)][provider_type]
if p.deprecation_error:
raise InvalidProviderError(p.deprecation_error)
config_type = instantiate_class_type(
provider_registry[Api(api)][provider_type].config_class
)
if hasattr(config_type, "sample_run_config"):
config = config_type.sample_run_config(
__distro_dir__=f"distributions/{image_name}"
)
else:
config = {}
p_spec = Provider(
provider_id=f"{pid}-{i}" if len(provider_types) > 1 else pid,
provider_type=provider_type,
config=config,
)
run_config.providers[api].append(p_spec)
run_config_file = build_dir / f"{image_name}-run.yaml"
with open(run_config_file, "w") as f:
to_write = json.loads(run_config.model_dump_json())
f.write(yaml.dump(to_write, sort_keys=False))
cprint(
f"You can now edit {run_config_file} and run `llama stack run {image_name}`",
color="green",
)
def _run_stack_build_command_from_build_config(
build_config: BuildConfig,
image_name: Optional[str] = None,
template_name: Optional[str] = None,
) -> None:
if build_config.image_type == ImageType.container.value:
if template_name:
image_name = f"distribution-{template_name}"
else:
if not image_name:
raise ValueError(
"Please specify an image name when building a docker image without a template"
)
elif build_config.image_type == ImageType.conda.value:
if not image_name:
raise ValueError("Please specify an image name when building a conda image")
if template_name:
build_dir = DISTRIBS_BASE_DIR / template_name
build_file_path = build_dir / f"{template_name}-build.yaml"
else:
build_dir = DISTRIBS_BASE_DIR / image_name
build_file_path = build_dir / f"{image_name}-build.yaml"
os.makedirs(build_dir, exist_ok=True)
with open(build_file_path, "w") as f:
to_write = json.loads(build_config.model_dump_json())
f.write(yaml.dump(to_write, sort_keys=False))
return_code = build_image(
build_config, build_file_path, image_name, template_name=template_name
)
if return_code != 0:
return
if template_name:
# copy run.yaml from template to build_dir instead of generating it again
template_path = (
importlib.resources.files("llama_stack")
/ f"templates/{template_name}/run.yaml"
)
with importlib.resources.as_file(template_path) as path:
run_config_file = build_dir / f"{template_name}-run.yaml"
shutil.copy(path, run_config_file)
# Find all ${env.VARIABLE} patterns
cprint("Build Successful!", color="green")
else:
_generate_run_config(build_config, build_dir, image_name)
def _run_template_list_cmd() -> None:
# eventually, this should query a registry at llama.meta.com/llamastack/distributions
headers = [
"Template Name",
# "Providers",
"Description",
]
rows = []
for template_name, spec in available_templates_specs().items():
rows.append(
[
template_name,
# json.dumps(spec.distribution_spec.providers, indent=2),
spec.distribution_spec.description,
]
)
print_table(
rows,
headers,
separate_rows=True,
)

Some files were not shown because too many files have changed in this diff Show more