mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-21 16:07:16 +00:00
Compare commits
41 commits
v0.2.24-de
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
9191005ca1 | ||
|
0e96279bee | ||
|
5aaf1a8bca | ||
|
122de785c4 | ||
|
444f6c88f3 | ||
|
6a13a99e77 | ||
|
407bade359 | ||
|
ffeb86385c | ||
|
b215eb5944 | ||
|
d9274d199e | ||
|
ec364499f5 | ||
|
6a74894e22 | ||
|
5aafce4ff3 | ||
|
5678c25b9d | ||
|
7294385df3 | ||
|
8943335e0b | ||
|
e7f4ddcc86 | ||
|
ab2d5febb4 | ||
|
94faec7bc5 | ||
|
483d53cc37 | ||
|
48581bf651 | ||
|
2c43285e22 | ||
|
add64e8e2a | ||
|
1f38359d95 | ||
|
08cbb69ef7 | ||
|
112a974005 | ||
|
9936f33f7e | ||
|
359df3a37c | ||
|
21772de5d3 | ||
|
573e783ff0 | ||
|
165b8b07f4 | ||
|
f675fdda0f | ||
|
7a256895aa | ||
|
83d2193077 | ||
|
316b76db7a | ||
|
b11bcfde11 | ||
|
943558af36 | ||
|
224c99560c | ||
|
c9f0bebcb7 | ||
|
a701f68bd7 | ||
|
4c9d944380 |
322 changed files with 46889 additions and 21521 deletions
19
.dockerignore
Normal file
19
.dockerignore
Normal file
|
@ -0,0 +1,19 @@
|
|||
.venv
|
||||
__pycache__
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
*.so
|
||||
.git
|
||||
.gitignore
|
||||
htmlcov*
|
||||
.coverage
|
||||
coverage*
|
||||
.cache
|
||||
.mypy_cache
|
||||
.pytest_cache
|
||||
.ruff_cache
|
||||
uv.lock
|
||||
node_modules
|
||||
build
|
||||
/tmp
|
|
@ -82,11 +82,13 @@ runs:
|
|||
echo "No recording changes"
|
||||
fi
|
||||
|
||||
- name: Write inference logs to file
|
||||
- name: Write docker logs to file
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo docker logs ollama > ollama-${{ inputs.inference-mode }}.log || true
|
||||
# Ollama logs (if ollama container exists)
|
||||
sudo docker logs ollama > ollama-${{ inputs.inference-mode }}.log 2>&1 || true
|
||||
# Note: distro container logs are now dumped in integration-tests.sh before container is removed
|
||||
|
||||
- name: Upload logs
|
||||
if: ${{ always() }}
|
||||
|
|
|
@ -57,7 +57,7 @@ runs:
|
|||
echo "Building Llama Stack"
|
||||
|
||||
LLAMA_STACK_DIR=. \
|
||||
uv run --no-sync llama stack build --template ci-tests --image-type venv
|
||||
uv run --no-sync llama stack list-deps ci-tests | xargs -L1 uv pip install
|
||||
|
||||
- name: Configure git for commits
|
||||
shell: bash
|
||||
|
|
1
.github/workflows/README.md
vendored
1
.github/workflows/README.md
vendored
|
@ -14,6 +14,7 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl
|
|||
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
|
||||
| Pre-commit Bot | [precommit-trigger.yml](precommit-trigger.yml) | Pre-commit bot for PR |
|
||||
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build |
|
||||
| Test llama stack list-deps | [providers-list-deps.yml](providers-list-deps.yml) | Test llama stack list-deps |
|
||||
| Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project |
|
||||
| Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration |
|
||||
| Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec |
|
||||
|
|
7
.github/workflows/install-script-ci.yml
vendored
7
.github/workflows/install-script-ci.yml
vendored
|
@ -30,8 +30,11 @@ jobs:
|
|||
|
||||
- name: Build a single provider
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync \
|
||||
llama stack build --template starter --image-type container --image-name test
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=starter \
|
||||
--tag llama-stack:starter-ci
|
||||
|
||||
- name: Run installer end-to-end
|
||||
run: |
|
||||
|
|
18
.github/workflows/integration-auth-tests.yml
vendored
18
.github/workflows/integration-auth-tests.yml
vendored
|
@ -73,6 +73,24 @@ jobs:
|
|||
image_name: kube
|
||||
apis: []
|
||||
providers: {}
|
||||
storage:
|
||||
backends:
|
||||
kv_default:
|
||||
type: kv_sqlite
|
||||
db_path: $run_dir/kvstore.db
|
||||
sql_default:
|
||||
type: sql_sqlite
|
||||
db_path: $run_dir/sql_store.db
|
||||
stores:
|
||||
metadata:
|
||||
namespace: registry
|
||||
backend: kv_default
|
||||
inference:
|
||||
table_name: inference_store
|
||||
backend: sql_default
|
||||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
server:
|
||||
port: 8321
|
||||
EOF
|
||||
|
|
4
.github/workflows/integration-tests.yml
vendored
4
.github/workflows/integration-tests.yml
vendored
|
@ -47,7 +47,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
client-type: [library, server]
|
||||
client-type: [library, server, docker]
|
||||
# Use Python 3.13 only on nightly schedule (daily latest client test), otherwise use 3.12
|
||||
python-version: ${{ github.event.schedule == '0 0 * * *' && fromJSON('["3.12", "3.13"]') || fromJSON('["3.12"]') }}
|
||||
client-version: ${{ (github.event.schedule == '0 0 * * *' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
|
||||
|
@ -82,7 +82,7 @@ jobs:
|
|||
env:
|
||||
OPENAI_API_KEY: dummy
|
||||
with:
|
||||
stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || 'server:ci-tests' }}
|
||||
stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || matrix.client-type == 'server' && 'server:ci-tests' || 'docker:ci-tests' }}
|
||||
setup: ${{ matrix.config.setup }}
|
||||
inference-mode: 'replay'
|
||||
suite: ${{ matrix.config.suite }}
|
||||
|
|
|
@ -144,7 +144,7 @@ jobs:
|
|||
|
||||
- name: Build Llama Stack
|
||||
run: |
|
||||
uv run --no-sync llama stack build --template ci-tests --image-type venv
|
||||
uv run --no-sync llama stack list-deps ci-tests | xargs -L1 uv pip install
|
||||
|
||||
- name: Check Storage and Memory Available Before Tests
|
||||
if: ${{ always() }}
|
||||
|
@ -169,9 +169,7 @@ jobs:
|
|||
run: |
|
||||
uv run --no-sync \
|
||||
pytest -sv --stack-config="files=inline::localfs,inference=inline::sentence-transformers,vector_io=${{ matrix.vector-io-provider }}" \
|
||||
tests/integration/vector_io \
|
||||
--embedding-model inline::sentence-transformers/nomic-ai/nomic-embed-text-v1.5 \
|
||||
--embedding-dimension 768
|
||||
tests/integration/vector_io
|
||||
|
||||
- name: Check Storage and Memory Available After Tests
|
||||
if: ${{ always() }}
|
||||
|
|
2
.github/workflows/pre-commit.yml
vendored
2
.github/workflows/pre-commit.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
.pre-commit-config.yaml
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
|
|
4
.github/workflows/precommit-trigger.yml
vendored
4
.github/workflows/precommit-trigger.yml
vendored
|
@ -99,7 +99,7 @@ jobs:
|
|||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: ${{ steps.check_author.outputs.pr_number }},
|
||||
body: `⏳ Running pre-commit hooks on PR #${{ steps.check_author.outputs.pr_number }}...`
|
||||
body: `⏳ Running [pre-commit hooks](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}) on PR #${{ steps.check_author.outputs.pr_number }}...`
|
||||
});
|
||||
|
||||
- name: Checkout PR branch (same-repo)
|
||||
|
@ -141,7 +141,7 @@ jobs:
|
|||
|
||||
- name: Set up Node.js
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
|
|
57
.github/workflows/providers-build.yml
vendored
57
.github/workflows/providers-build.yml
vendored
|
@ -14,6 +14,8 @@ on:
|
|||
- '.github/workflows/providers-build.yml'
|
||||
- 'llama_stack/distributions/**'
|
||||
- 'pyproject.toml'
|
||||
- 'containers/Containerfile'
|
||||
- '.dockerignore'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
|
@ -24,6 +26,8 @@ on:
|
|||
- '.github/workflows/providers-build.yml'
|
||||
- 'llama_stack/distributions/**'
|
||||
- 'pyproject.toml'
|
||||
- 'containers/Containerfile'
|
||||
- '.dockerignore'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
||||
|
@ -60,15 +64,19 @@ jobs:
|
|||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Print build dependencies
|
||||
- name: Install distribution into venv
|
||||
if: matrix.image-type == 'venv'
|
||||
run: |
|
||||
uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
|
||||
uv run llama stack list-deps ${{ matrix.distro }} | xargs -L1 uv pip install
|
||||
|
||||
- name: Run Llama Stack Build
|
||||
- name: Build container image
|
||||
if: matrix.image-type == 'container'
|
||||
run: |
|
||||
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
||||
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=${{ matrix.distro }} \
|
||||
--tag llama-stack:${{ matrix.distro }}-ci
|
||||
|
||||
- name: Print dependencies in the image
|
||||
if: matrix.image-type == 'venv'
|
||||
|
@ -86,8 +94,8 @@ jobs:
|
|||
|
||||
- name: Build a single provider
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama
|
||||
|
||||
uv pip install -e .
|
||||
uv run --no-sync llama stack list-deps --providers inference=remote::ollama | xargs -L1 uv pip install
|
||||
build-custom-container-distribution:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
@ -97,11 +105,16 @@ jobs:
|
|||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Build a single provider
|
||||
- name: Build container image
|
||||
run: |
|
||||
yq -i '.image_type = "container"' llama_stack/distributions/ci-tests/build.yaml
|
||||
yq -i '.image_name = "test"' llama_stack/distributions/ci-tests/build.yaml
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
|
||||
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' llama_stack/distributions/ci-tests/build.yaml)
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=ci-tests \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
|
||||
-t llama-stack:ci-tests
|
||||
|
||||
- name: Inspect the container image entrypoint
|
||||
run: |
|
||||
|
@ -112,7 +125,7 @@ jobs:
|
|||
fi
|
||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||
echo "Entrypoint: $entrypoint"
|
||||
if [ "$entrypoint" != "[llama stack run /app/run.yaml]" ]; then
|
||||
if [ "$entrypoint" != "[/usr/local/bin/llama-stack-entrypoint.sh]" ]; then
|
||||
echo "Entrypoint is not correct"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -129,17 +142,19 @@ jobs:
|
|||
- name: Pin distribution to UBI9 base
|
||||
run: |
|
||||
yq -i '
|
||||
.image_type = "container" |
|
||||
.image_name = "ubi9-test" |
|
||||
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
|
||||
' llama_stack/distributions/ci-tests/build.yaml
|
||||
|
||||
- name: Build dev container (UBI9)
|
||||
env:
|
||||
USE_COPY_NOT_MOUNT: "true"
|
||||
LLAMA_STACK_DIR: "."
|
||||
- name: Build UBI9 container image
|
||||
run: |
|
||||
uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
|
||||
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' llama_stack/distributions/ci-tests/build.yaml)
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=ci-tests \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
|
||||
-t llama-stack:ci-tests-ubi9
|
||||
|
||||
- name: Inspect UBI9 image
|
||||
run: |
|
||||
|
@ -150,7 +165,7 @@ jobs:
|
|||
fi
|
||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||
echo "Entrypoint: $entrypoint"
|
||||
if [ "$entrypoint" != "[llama stack run /app/run.yaml]" ]; then
|
||||
if [ "$entrypoint" != "[/usr/local/bin/llama-stack-entrypoint.sh]" ]; then
|
||||
echo "Entrypoint is not correct"
|
||||
exit 1
|
||||
fi
|
||||
|
|
105
.github/workflows/providers-list-deps.yml
vendored
Normal file
105
.github/workflows/providers-list-deps.yml
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
name: Test llama stack list-deps
|
||||
|
||||
run-name: Test llama stack list-deps
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'llama_stack/cli/stack/list_deps.py'
|
||||
- 'llama_stack/cli/stack/_list_deps.py'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-list-deps.yml'
|
||||
- 'llama_stack/templates/**'
|
||||
- 'pyproject.toml'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- 'llama_stack/cli/stack/list_deps.py'
|
||||
- 'llama_stack/cli/stack/_list_deps.py'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-list-deps.yml'
|
||||
- 'llama_stack/templates/**'
|
||||
- 'pyproject.toml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
distros: ${{ steps.set-matrix.outputs.distros }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Generate Distribution List
|
||||
id: set-matrix
|
||||
run: |
|
||||
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
||||
echo "distros=$distros" >> "$GITHUB_OUTPUT"
|
||||
|
||||
list-deps:
|
||||
needs: generate-matrix
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
distro: ${{ fromJson(needs.generate-matrix.outputs.distros) }}
|
||||
image-type: [venv, container]
|
||||
fail-fast: false # We want to run all jobs even if some fail
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Print dependencies
|
||||
run: |
|
||||
uv run llama stack list-deps ${{ matrix.distro }}
|
||||
|
||||
- name: Install Distro using llama stack list-deps
|
||||
run: |
|
||||
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
||||
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack list-deps ${{ matrix.distro }} | xargs -L1 uv pip install
|
||||
|
||||
- name: Print dependencies in the image
|
||||
if: matrix.image-type == 'venv'
|
||||
run: |
|
||||
uv pip list
|
||||
|
||||
show-single-provider:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Show a single provider
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack list-deps --providers inference=remote::ollama
|
||||
|
||||
list-deps-from-config:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: list-des from Config
|
||||
env:
|
||||
USE_COPY_NOT_MOUNT: "true"
|
||||
LLAMA_STACK_DIR: "."
|
||||
run: |
|
||||
uv run llama stack list-deps llama_stack/distributions/ci-tests/build.yaml
|
2
.github/workflows/python-build-test.yml
vendored
2
.github/workflows/python-build-test.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@eb1897b8dc4b5d5bfe39a428a8f2304605e0983c # v7.0.0
|
||||
uses: astral-sh/setup-uv@3259c6206f993105e3a61b142c2d97bf4b9ef83d # v7.1.0
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: true
|
||||
|
|
|
@ -46,9 +46,9 @@ jobs:
|
|||
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml
|
||||
cat tests/external/ramalama-stack/run.yaml
|
||||
|
||||
- name: Build distro from config file
|
||||
- name: Install distribution dependencies
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/ramalama-stack/build.yaml
|
||||
uv run llama stack list-deps tests/external/ramalama-stack/build.yaml | xargs -L1 uv pip install
|
||||
|
||||
- name: Start Llama Stack server in background
|
||||
if: ${{ matrix.image-type }} == 'venv'
|
||||
|
|
7
.github/workflows/test-external.yml
vendored
7
.github/workflows/test-external.yml
vendored
|
@ -44,11 +44,14 @@ jobs:
|
|||
|
||||
- name: Print distro dependencies
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync llama stack build --config tests/external/build.yaml --print-deps-only
|
||||
uv run --no-sync llama stack list-deps tests/external/build.yaml
|
||||
|
||||
- name: Build distro from config file
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync llama stack build --config tests/external/build.yaml
|
||||
uv venv ci-test
|
||||
source ci-test/bin/activate
|
||||
uv pip install -e .
|
||||
LLAMA_STACK_LOGGING=all=CRITICAL llama stack list-deps tests/external/build.yaml | xargs -L1 uv pip install
|
||||
|
||||
- name: Start Llama Stack server in background
|
||||
if: ${{ matrix.image-type }} == 'venv'
|
||||
|
|
2
.github/workflows/ui-unit-tests.yml
vendored
2
.github/workflows/ui-unit-tests.yml
vendored
|
@ -29,7 +29,7 @@ jobs:
|
|||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
|
|
|
@ -167,9 +167,9 @@ under the LICENSE file in the root directory of this source tree.
|
|||
|
||||
Some tips about common tasks you work on while contributing to Llama Stack:
|
||||
|
||||
### Using `llama stack build`
|
||||
### Installing dependencies of distributions
|
||||
|
||||
Building a stack image will use the production version of the `llama-stack` and `llama-stack-client` packages. If you are developing with a llama-stack repository checked out and need your code to be reflected in the stack image, set `LLAMA_STACK_DIR` and `LLAMA_STACK_CLIENT_DIR` to the appropriate checked out directories when running any of the `llama` CLI commands.
|
||||
When installing dependencies for a distribution, you can use `llama stack list-deps` to view and install the required packages.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
|
@ -177,7 +177,12 @@ cd work/
|
|||
git clone https://github.com/llamastack/llama-stack.git
|
||||
git clone https://github.com/llamastack/llama-stack-client-python.git
|
||||
cd llama-stack
|
||||
LLAMA_STACK_DIR=$(pwd) LLAMA_STACK_CLIENT_DIR=../llama-stack-client-python llama stack build --distro <...>
|
||||
|
||||
# Show dependencies for a distribution
|
||||
llama stack list-deps <distro-name>
|
||||
|
||||
# Install dependencies
|
||||
llama stack list-deps <distro-name> | xargs -L1 uv pip install
|
||||
```
|
||||
|
||||
### Updating distribution configurations
|
||||
|
|
|
@ -27,8 +27,11 @@ MODEL="Llama-4-Scout-17B-16E-Instruct"
|
|||
# get meta url from llama.com
|
||||
huggingface-cli download meta-llama/$MODEL --local-dir ~/.llama/$MODEL
|
||||
|
||||
# install dependencies for the distribution
|
||||
llama stack list-deps meta-reference-gpu | xargs -L1 uv pip install
|
||||
|
||||
# start a llama stack server
|
||||
INFERENCE_MODEL=meta-llama/$MODEL llama stack build --run --template meta-reference-gpu
|
||||
INFERENCE_MODEL=meta-llama/$MODEL llama stack run meta-reference-gpu
|
||||
|
||||
# install client to interact with the server
|
||||
pip install llama-stack-client
|
||||
|
@ -89,7 +92,7 @@ As more providers start supporting Llama 4, you can use them in Llama Stack as w
|
|||
To try Llama Stack locally, run:
|
||||
|
||||
```bash
|
||||
curl -LsSf https://github.com/meta-llama/llama-stack/raw/main/scripts/install.sh | bash
|
||||
curl -LsSf https://github.com/llamastack/llama-stack/raw/main/scripts/install.sh | bash
|
||||
```
|
||||
|
||||
### Overview
|
||||
|
|
|
@ -98,21 +98,30 @@ data:
|
|||
- provider_id: model-context-protocol
|
||||
provider_type: remote::model-context-protocol
|
||||
config: {}
|
||||
metadata_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: llamastack_kvstore
|
||||
inference_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
storage:
|
||||
backends:
|
||||
kv_default:
|
||||
type: kv_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||
sql_default:
|
||||
type: sql_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
references:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
models:
|
||||
- metadata:
|
||||
embedding_dimension: 768
|
||||
|
@ -137,5 +146,4 @@ data:
|
|||
port: 8323
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: llama-stack-config
|
||||
|
|
|
@ -95,21 +95,30 @@ providers:
|
|||
- provider_id: model-context-protocol
|
||||
provider_type: remote::model-context-protocol
|
||||
config: {}
|
||||
metadata_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: llamastack_kvstore
|
||||
inference_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
storage:
|
||||
backends:
|
||||
kv_default:
|
||||
type: kv_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||
sql_default:
|
||||
type: sql_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
references:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
models:
|
||||
- metadata:
|
||||
embedding_dimension: 768
|
||||
|
|
8
client-sdks/stainless/README.md
Normal file
8
client-sdks/stainless/README.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
These are the source-of-truth configuration files used to generate the Stainless client SDKs via Stainless.
|
||||
|
||||
- `openapi.yml`: this is the OpenAPI specification for the Llama Stack API.
|
||||
- `openapi.stainless.yml`: this is the Stainless _configuration_ which instructs Stainless how to generate the client SDKs.
|
||||
|
||||
A small side note: notice the `.yml` suffixes since Stainless uses that suffix typically for its configuration files.
|
||||
|
||||
These files go hand-in-hand. As of now, only the `openapi.yml` file is automatically generated using the `run_openapi_generator.sh` script.
|
610
client-sdks/stainless/openapi.stainless.yml
Normal file
610
client-sdks/stainless/openapi.stainless.yml
Normal file
|
@ -0,0 +1,610 @@
|
|||
# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json
|
||||
|
||||
organization:
|
||||
# Name of your organization or company, used to determine the name of the client
|
||||
# and headings.
|
||||
name: llama-stack-client
|
||||
docs: https://llama-stack.readthedocs.io/en/latest/
|
||||
contact: llamastack@meta.com
|
||||
security:
|
||||
- {}
|
||||
- BearerAuth: []
|
||||
security_schemes:
|
||||
BearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
# `targets` define the output targets and their customization options, such as
|
||||
# whether to emit the Node SDK and what it's package name should be.
|
||||
targets:
|
||||
node:
|
||||
package_name: llama-stack-client
|
||||
production_repo: llamastack/llama-stack-client-typescript
|
||||
publish:
|
||||
npm: false
|
||||
python:
|
||||
package_name: llama_stack_client
|
||||
production_repo: llamastack/llama-stack-client-python
|
||||
options:
|
||||
use_uv: true
|
||||
publish:
|
||||
pypi: true
|
||||
project_name: llama_stack_client
|
||||
kotlin:
|
||||
reverse_domain: com.llama_stack_client.api
|
||||
production_repo: null
|
||||
publish:
|
||||
maven: false
|
||||
go:
|
||||
package_name: llama-stack-client
|
||||
production_repo: llamastack/llama-stack-client-go
|
||||
options:
|
||||
enable_v2: true
|
||||
back_compat_use_shared_package: false
|
||||
|
||||
# `client_settings` define settings for the API client, such as extra constructor
|
||||
# arguments (used for authentication), retry behavior, idempotency, etc.
|
||||
client_settings:
|
||||
default_env_prefix: LLAMA_STACK_CLIENT
|
||||
opts:
|
||||
api_key:
|
||||
type: string
|
||||
read_env: LLAMA_STACK_CLIENT_API_KEY
|
||||
auth: { security_scheme: BearerAuth }
|
||||
nullable: true
|
||||
|
||||
# `environments` are a map of the name of the environment (e.g. "sandbox",
|
||||
# "production") to the corresponding url to use.
|
||||
environments:
|
||||
production: http://any-hosted-llama-stack.com
|
||||
|
||||
# `pagination` defines [pagination schemes] which provides a template to match
|
||||
# endpoints and generate next-page and auto-pagination helpers in the SDKs.
|
||||
pagination:
|
||||
- name: datasets_iterrows
|
||||
type: offset
|
||||
request:
|
||||
dataset_id:
|
||||
type: string
|
||||
start_index:
|
||||
type: integer
|
||||
x-stainless-pagination-property:
|
||||
purpose: offset_count_param
|
||||
limit:
|
||||
type: integer
|
||||
response:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
next_index:
|
||||
type: integer
|
||||
x-stainless-pagination-property:
|
||||
purpose: offset_count_start_field
|
||||
- name: openai_cursor_page
|
||||
type: cursor
|
||||
request:
|
||||
limit:
|
||||
type: integer
|
||||
after:
|
||||
type: string
|
||||
x-stainless-pagination-property:
|
||||
purpose: next_cursor_param
|
||||
response:
|
||||
data:
|
||||
type: array
|
||||
items: {}
|
||||
has_more:
|
||||
type: boolean
|
||||
last_id:
|
||||
type: string
|
||||
x-stainless-pagination-property:
|
||||
purpose: next_cursor_field
|
||||
# `resources` define the structure and organziation for your API, such as how
|
||||
# methods and models are grouped together and accessed. See the [configuration
|
||||
# guide] for more information.
|
||||
#
|
||||
# [configuration guide]:
|
||||
# https://app.stainlessapi.com/docs/guides/configure#resources
|
||||
resources:
|
||||
$shared:
|
||||
models:
|
||||
agent_config: AgentConfig
|
||||
interleaved_content_item: InterleavedContentItem
|
||||
interleaved_content: InterleavedContent
|
||||
param_type: ParamType
|
||||
safety_violation: SafetyViolation
|
||||
sampling_params: SamplingParams
|
||||
scoring_result: ScoringResult
|
||||
message: Message
|
||||
user_message: UserMessage
|
||||
completion_message: CompletionMessage
|
||||
tool_response_message: ToolResponseMessage
|
||||
system_message: SystemMessage
|
||||
tool_call: ToolCall
|
||||
query_result: RAGQueryResult
|
||||
document: RAGDocument
|
||||
query_config: RAGQueryConfig
|
||||
response_format: ResponseFormat
|
||||
toolgroups:
|
||||
models:
|
||||
tool_group: ToolGroup
|
||||
list_tool_groups_response: ListToolGroupsResponse
|
||||
methods:
|
||||
register: post /v1/toolgroups
|
||||
get: get /v1/toolgroups/{toolgroup_id}
|
||||
list: get /v1/toolgroups
|
||||
unregister: delete /v1/toolgroups/{toolgroup_id}
|
||||
tools:
|
||||
methods:
|
||||
get: get /v1/tools/{tool_name}
|
||||
list:
|
||||
endpoint: get /v1/tools
|
||||
paginated: false
|
||||
|
||||
tool_runtime:
|
||||
models:
|
||||
tool_def: ToolDef
|
||||
tool_invocation_result: ToolInvocationResult
|
||||
methods:
|
||||
list_tools:
|
||||
endpoint: get /v1/tool-runtime/list-tools
|
||||
paginated: false
|
||||
invoke_tool: post /v1/tool-runtime/invoke
|
||||
subresources:
|
||||
rag_tool:
|
||||
methods:
|
||||
insert: post /v1/tool-runtime/rag-tool/insert
|
||||
query: post /v1/tool-runtime/rag-tool/query
|
||||
|
||||
responses:
|
||||
models:
|
||||
response_object_stream: OpenAIResponseObjectStream
|
||||
response_object: OpenAIResponseObject
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/responses
|
||||
streaming:
|
||||
stream_event_model: responses.response_object_stream
|
||||
param_discriminator: stream
|
||||
retrieve: get /v1/responses/{response_id}
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/responses
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/responses/{response_id}
|
||||
subresources:
|
||||
input_items:
|
||||
methods:
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/responses/{response_id}/input_items
|
||||
|
||||
conversations:
|
||||
models:
|
||||
conversation_object: Conversation
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/conversations
|
||||
retrieve: get /v1/conversations/{conversation_id}
|
||||
update:
|
||||
type: http
|
||||
endpoint: post /v1/conversations/{conversation_id}
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/conversations/{conversation_id}
|
||||
subresources:
|
||||
items:
|
||||
methods:
|
||||
get:
|
||||
type: http
|
||||
endpoint: get /v1/conversations/{conversation_id}/items/{item_id}
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/conversations/{conversation_id}/items
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/conversations/{conversation_id}/items
|
||||
|
||||
inspect:
|
||||
models:
|
||||
healthInfo: HealthInfo
|
||||
providerInfo: ProviderInfo
|
||||
routeInfo: RouteInfo
|
||||
versionInfo: VersionInfo
|
||||
methods:
|
||||
health: get /v1/health
|
||||
version: get /v1/version
|
||||
|
||||
embeddings:
|
||||
models:
|
||||
create_embeddings_response: OpenAIEmbeddingsResponse
|
||||
methods:
|
||||
create: post /v1/embeddings
|
||||
|
||||
chat:
|
||||
models:
|
||||
chat_completion_chunk: OpenAIChatCompletionChunk
|
||||
subresources:
|
||||
completions:
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/chat/completions
|
||||
streaming:
|
||||
stream_event_model: chat.chat_completion_chunk
|
||||
param_discriminator: stream
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/chat/completions
|
||||
retrieve:
|
||||
type: http
|
||||
endpoint: get /v1/chat/completions/{completion_id}
|
||||
completions:
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/completions
|
||||
streaming:
|
||||
param_discriminator: stream
|
||||
|
||||
vector_io:
|
||||
models:
|
||||
queryChunksResponse: QueryChunksResponse
|
||||
methods:
|
||||
insert: post /v1/vector-io/insert
|
||||
query: post /v1/vector-io/query
|
||||
|
||||
vector_stores:
|
||||
models:
|
||||
vector_store: VectorStoreObject
|
||||
list_vector_stores_response: VectorStoreListResponse
|
||||
vector_store_delete_response: VectorStoreDeleteResponse
|
||||
vector_store_search_response: VectorStoreSearchResponsePage
|
||||
methods:
|
||||
create: post /v1/vector_stores
|
||||
list:
|
||||
endpoint: get /v1/vector_stores
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}
|
||||
update: post /v1/vector_stores/{vector_store_id}
|
||||
delete: delete /v1/vector_stores/{vector_store_id}
|
||||
search: post /v1/vector_stores/{vector_store_id}/search
|
||||
subresources:
|
||||
files:
|
||||
models:
|
||||
vector_store_file: VectorStoreFileObject
|
||||
methods:
|
||||
list: get /v1/vector_stores/{vector_store_id}/files
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
update: post /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
delete: delete /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
create: post /v1/vector_stores/{vector_store_id}/files
|
||||
content: get /v1/vector_stores/{vector_store_id}/files/{file_id}/content
|
||||
file_batches:
|
||||
models:
|
||||
vector_store_file_batches: VectorStoreFileBatchObject
|
||||
list_vector_store_files_in_batch_response: VectorStoreFilesListInBatchResponse
|
||||
methods:
|
||||
create: post /v1/vector_stores/{vector_store_id}/file_batches
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}
|
||||
list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files
|
||||
cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel
|
||||
|
||||
models:
|
||||
models:
|
||||
model: Model
|
||||
list_models_response: ListModelsResponse
|
||||
methods:
|
||||
retrieve: get /v1/models/{model_id}
|
||||
list:
|
||||
endpoint: get /v1/models
|
||||
paginated: false
|
||||
register: post /v1/models
|
||||
unregister: delete /v1/models/{model_id}
|
||||
subresources:
|
||||
openai:
|
||||
methods:
|
||||
list:
|
||||
endpoint: get /v1/models
|
||||
paginated: false
|
||||
|
||||
providers:
|
||||
models:
|
||||
list_providers_response: ListProvidersResponse
|
||||
methods:
|
||||
list:
|
||||
endpoint: get /v1/providers
|
||||
paginated: false
|
||||
retrieve: get /v1/providers/{provider_id}
|
||||
|
||||
routes:
|
||||
models:
|
||||
list_routes_response: ListRoutesResponse
|
||||
methods:
|
||||
list:
|
||||
endpoint: get /v1/inspect/routes
|
||||
paginated: false
|
||||
|
||||
|
||||
moderations:
|
||||
models:
|
||||
create_response: ModerationObject
|
||||
methods:
|
||||
create: post /v1/moderations
|
||||
|
||||
|
||||
safety:
|
||||
models:
|
||||
run_shield_response: RunShieldResponse
|
||||
methods:
|
||||
run_shield: post /v1/safety/run-shield
|
||||
|
||||
|
||||
shields:
|
||||
models:
|
||||
shield: Shield
|
||||
list_shields_response: ListShieldsResponse
|
||||
methods:
|
||||
retrieve: get /v1/shields/{identifier}
|
||||
list:
|
||||
endpoint: get /v1/shields
|
||||
paginated: false
|
||||
register: post /v1/shields
|
||||
delete: delete /v1/shields/{identifier}
|
||||
|
||||
synthetic_data_generation:
|
||||
models:
|
||||
syntheticDataGenerationResponse: SyntheticDataGenerationResponse
|
||||
methods:
|
||||
generate: post /v1/synthetic-data-generation/generate
|
||||
|
||||
telemetry:
|
||||
models:
|
||||
span_with_status: SpanWithStatus
|
||||
trace: Trace
|
||||
query_spans_response: QuerySpansResponse
|
||||
event: Event
|
||||
query_condition: QueryCondition
|
||||
methods:
|
||||
query_traces:
|
||||
endpoint: post /v1alpha/telemetry/traces
|
||||
skip_test_reason: 'unsupported query params in java / kotlin'
|
||||
get_span_tree: post /v1alpha/telemetry/spans/{span_id}/tree
|
||||
query_spans:
|
||||
endpoint: post /v1alpha/telemetry/spans
|
||||
skip_test_reason: 'unsupported query params in java / kotlin'
|
||||
query_metrics:
|
||||
endpoint: post /v1alpha/telemetry/metrics/{metric_name}
|
||||
skip_test_reason: 'unsupported query params in java / kotlin'
|
||||
# log_event: post /v1alpha/telemetry/events
|
||||
save_spans_to_dataset: post /v1alpha/telemetry/spans/export
|
||||
get_span: get /v1alpha/telemetry/traces/{trace_id}/spans/{span_id}
|
||||
get_trace: get /v1alpha/telemetry/traces/{trace_id}
|
||||
|
||||
scoring:
|
||||
methods:
|
||||
score: post /v1/scoring/score
|
||||
score_batch: post /v1/scoring/score-batch
|
||||
scoring_functions:
|
||||
methods:
|
||||
retrieve: get /v1/scoring-functions/{scoring_fn_id}
|
||||
list:
|
||||
endpoint: get /v1/scoring-functions
|
||||
paginated: false
|
||||
register: post /v1/scoring-functions
|
||||
models:
|
||||
scoring_fn: ScoringFn
|
||||
scoring_fn_params: ScoringFnParams
|
||||
list_scoring_functions_response: ListScoringFunctionsResponse
|
||||
|
||||
benchmarks:
|
||||
methods:
|
||||
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
|
||||
list:
|
||||
endpoint: get /v1alpha/eval/benchmarks
|
||||
paginated: false
|
||||
register: post /v1alpha/eval/benchmarks
|
||||
models:
|
||||
benchmark: Benchmark
|
||||
list_benchmarks_response: ListBenchmarksResponse
|
||||
|
||||
files:
|
||||
methods:
|
||||
create: post /v1/files
|
||||
list: get /v1/files
|
||||
retrieve: get /v1/files/{file_id}
|
||||
delete: delete /v1/files/{file_id}
|
||||
content: get /v1/files/{file_id}/content
|
||||
models:
|
||||
file: OpenAIFileObject
|
||||
list_files_response: ListOpenAIFileResponse
|
||||
delete_file_response: OpenAIFileDeleteResponse
|
||||
|
||||
alpha:
|
||||
subresources:
|
||||
inference:
|
||||
methods:
|
||||
rerank: post /v1alpha/inference/rerank
|
||||
|
||||
post_training:
|
||||
models:
|
||||
algorithm_config: AlgorithmConfig
|
||||
post_training_job: PostTrainingJob
|
||||
list_post_training_jobs_response: ListPostTrainingJobsResponse
|
||||
methods:
|
||||
preference_optimize: post /v1alpha/post-training/preference-optimize
|
||||
supervised_fine_tune: post /v1alpha/post-training/supervised-fine-tune
|
||||
subresources:
|
||||
job:
|
||||
methods:
|
||||
artifacts: get /v1alpha/post-training/job/artifacts
|
||||
cancel: post /v1alpha/post-training/job/cancel
|
||||
status: get /v1alpha/post-training/job/status
|
||||
list:
|
||||
endpoint: get /v1alpha/post-training/jobs
|
||||
paginated: false
|
||||
|
||||
eval:
|
||||
methods:
|
||||
evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
|
||||
run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
|
||||
evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
|
||||
run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
|
||||
|
||||
subresources:
|
||||
jobs:
|
||||
methods:
|
||||
cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
|
||||
status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
|
||||
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result
|
||||
models:
|
||||
evaluate_response: EvaluateResponse
|
||||
benchmark_config: BenchmarkConfig
|
||||
job: Job
|
||||
|
||||
agents:
|
||||
methods:
|
||||
create: post /v1alpha/agents
|
||||
list: get /v1alpha/agents
|
||||
retrieve: get /v1alpha/agents/{agent_id}
|
||||
delete: delete /v1alpha/agents/{agent_id}
|
||||
models:
|
||||
inference_step: InferenceStep
|
||||
tool_execution_step: ToolExecutionStep
|
||||
tool_response: ToolResponse
|
||||
shield_call_step: ShieldCallStep
|
||||
memory_retrieval_step: MemoryRetrievalStep
|
||||
subresources:
|
||||
session:
|
||||
models:
|
||||
session: Session
|
||||
methods:
|
||||
list: get /v1alpha/agents/{agent_id}/sessions
|
||||
create: post /v1alpha/agents/{agent_id}/session
|
||||
delete: delete /v1alpha/agents/{agent_id}/session/{session_id}
|
||||
retrieve: get /v1alpha/agents/{agent_id}/session/{session_id}
|
||||
steps:
|
||||
methods:
|
||||
retrieve: get /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}
|
||||
turn:
|
||||
models:
|
||||
turn: Turn
|
||||
turn_response_event: AgentTurnResponseEvent
|
||||
agent_turn_response_stream_chunk: AgentTurnResponseStreamChunk
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1alpha/agents/{agent_id}/session/{session_id}/turn
|
||||
streaming:
|
||||
stream_event_model: alpha.agents.turn.agent_turn_response_stream_chunk
|
||||
param_discriminator: stream
|
||||
retrieve: get /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}
|
||||
resume:
|
||||
type: http
|
||||
endpoint: post /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume
|
||||
streaming:
|
||||
stream_event_model: alpha.agents.turn.agent_turn_response_stream_chunk
|
||||
param_discriminator: stream
|
||||
|
||||
beta:
|
||||
subresources:
|
||||
datasets:
|
||||
models:
|
||||
list_datasets_response: ListDatasetsResponse
|
||||
methods:
|
||||
register: post /v1beta/datasets
|
||||
retrieve: get /v1beta/datasets/{dataset_id}
|
||||
list:
|
||||
endpoint: get /v1beta/datasets
|
||||
paginated: false
|
||||
unregister: delete /v1beta/datasets/{dataset_id}
|
||||
iterrows: get /v1beta/datasetio/iterrows/{dataset_id}
|
||||
appendrows: post /v1beta/datasetio/append-rows/{dataset_id}
|
||||
|
||||
|
||||
settings:
|
||||
license: MIT
|
||||
unwrap_response_fields: [ data ]
|
||||
|
||||
openapi:
|
||||
transformations:
|
||||
- command: renameValue
|
||||
reason: pydantic reserved name
|
||||
args:
|
||||
filter:
|
||||
only:
|
||||
- '$.components.schemas.InferenceStep.properties.model_response'
|
||||
rename:
|
||||
python:
|
||||
property_name: 'inference_model_response'
|
||||
|
||||
# - command: renameValue
|
||||
# reason: pydantic reserved name
|
||||
# args:
|
||||
# filter:
|
||||
# only:
|
||||
# - '$.components.schemas.Model.properties.model_type'
|
||||
# rename:
|
||||
# python:
|
||||
# property_name: 'type'
|
||||
- command: mergeObject
|
||||
reason: Better return_type using enum
|
||||
args:
|
||||
target:
|
||||
- '$.components.schemas'
|
||||
object:
|
||||
ReturnType:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
enum:
|
||||
- string
|
||||
- number
|
||||
- boolean
|
||||
- array
|
||||
- object
|
||||
- json
|
||||
- union
|
||||
- chat_completion_input
|
||||
- completion_input
|
||||
- agent_turn_input
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
- command: replaceProperties
|
||||
reason: Replace return type properties with better model (see above)
|
||||
args:
|
||||
filter:
|
||||
only:
|
||||
- '$.components.schemas.ScoringFn.properties.return_type'
|
||||
- '$.components.schemas.RegisterScoringFunctionRequest.properties.return_type'
|
||||
value:
|
||||
$ref: '#/components/schemas/ReturnType'
|
||||
- command: oneOfToAnyOf
|
||||
reason: Prism (mock server) doesn't like one of our requests as it technically matches multiple variants
|
||||
- reason: For better names
|
||||
command: extractToRefs
|
||||
args:
|
||||
ref:
|
||||
target: '$.components.schemas.ToolCallDelta.properties.tool_call'
|
||||
name: '#/components/schemas/ToolCallOrString'
|
||||
|
||||
# `readme` is used to configure the code snippets that will be rendered in the
|
||||
# README.md of various SDKs. In particular, you can change the `headline`
|
||||
# snippet's endpoint and the arguments to call it with.
|
||||
readme:
|
||||
example_requests:
|
||||
default:
|
||||
type: request
|
||||
endpoint: post /v1/chat/completions
|
||||
params: &ref_0 {}
|
||||
headline:
|
||||
type: request
|
||||
endpoint: post /v1/models
|
||||
params: *ref_0
|
||||
pagination:
|
||||
type: request
|
||||
endpoint: post /v1/chat/completions
|
||||
params: {}
|
13322
client-sdks/stainless/openapi.yml
Normal file
13322
client-sdks/stainless/openapi.yml
Normal file
File diff suppressed because it is too large
Load diff
137
containers/Containerfile
Normal file
137
containers/Containerfile
Normal file
|
@ -0,0 +1,137 @@
|
|||
# syntax=docker/dockerfile:1.6
|
||||
#
|
||||
# This Dockerfile is used to build the Llama Stack container image.
|
||||
# Example:
|
||||
# docker build \
|
||||
# -f containers/Containerfile \
|
||||
# --build-arg DISTRO_NAME=starter \
|
||||
# --tag llama-stack:starter .
|
||||
|
||||
ARG BASE_IMAGE=python:3.12-slim
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
ARG INSTALL_MODE="pypi"
|
||||
ARG LLAMA_STACK_DIR="/workspace"
|
||||
ARG LLAMA_STACK_CLIENT_DIR=""
|
||||
ARG PYPI_VERSION=""
|
||||
ARG TEST_PYPI_VERSION=""
|
||||
ARG KEEP_WORKSPACE=""
|
||||
ARG DISTRO_NAME="starter"
|
||||
ARG RUN_CONFIG_PATH=""
|
||||
ARG UV_HTTP_TIMEOUT=500
|
||||
ENV UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT}
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
WORKDIR /app
|
||||
|
||||
RUN set -eux; \
|
||||
if command -v dnf >/dev/null 2>&1; then \
|
||||
dnf -y update && \
|
||||
dnf install -y iputils git net-tools wget \
|
||||
vim-minimal python3.12 python3.12-pip python3.12-wheel \
|
||||
python3.12-setuptools python3.12-devel gcc gcc-c++ make && \
|
||||
ln -sf /usr/bin/pip3.12 /usr/local/bin/pip && \
|
||||
ln -sf /usr/bin/python3.12 /usr/local/bin/python && \
|
||||
dnf clean all; \
|
||||
elif command -v apt-get >/dev/null 2>&1; then \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
iputils-ping net-tools iproute2 dnsutils telnet \
|
||||
curl wget git procps psmisc lsof traceroute bubblewrap \
|
||||
gcc g++ && \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
echo "Unsupported base image: expected dnf or apt-get" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
RUN pip install --no-cache-dir uv
|
||||
ENV UV_SYSTEM_PYTHON=1
|
||||
|
||||
ENV INSTALL_MODE=${INSTALL_MODE}
|
||||
ENV LLAMA_STACK_DIR=${LLAMA_STACK_DIR}
|
||||
ENV LLAMA_STACK_CLIENT_DIR=${LLAMA_STACK_CLIENT_DIR}
|
||||
ENV PYPI_VERSION=${PYPI_VERSION}
|
||||
ENV TEST_PYPI_VERSION=${TEST_PYPI_VERSION}
|
||||
ENV KEEP_WORKSPACE=${KEEP_WORKSPACE}
|
||||
ENV DISTRO_NAME=${DISTRO_NAME}
|
||||
ENV RUN_CONFIG_PATH=${RUN_CONFIG_PATH}
|
||||
|
||||
# Copy the repository so editable installs and run configurations are available.
|
||||
COPY . /workspace
|
||||
|
||||
# Install the client package if it is provided
|
||||
# NOTE: this is installed before llama-stack since llama-stack depends on llama-stack-client-python
|
||||
RUN set -eux; \
|
||||
if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then \
|
||||
if [ ! -d "$LLAMA_STACK_CLIENT_DIR" ]; then \
|
||||
echo "LLAMA_STACK_CLIENT_DIR is set but $LLAMA_STACK_CLIENT_DIR does not exist" >&2; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
uv pip install --no-cache-dir -e "$LLAMA_STACK_CLIENT_DIR"; \
|
||||
fi;
|
||||
|
||||
# Install llama-stack
|
||||
RUN set -eux; \
|
||||
if [ "$INSTALL_MODE" = "editable" ]; then \
|
||||
if [ ! -d "$LLAMA_STACK_DIR" ]; then \
|
||||
echo "INSTALL_MODE=editable requires LLAMA_STACK_DIR to point to a directory inside the build context" >&2; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
uv pip install --no-cache-dir -e "$LLAMA_STACK_DIR"; \
|
||||
elif [ "$INSTALL_MODE" = "test-pypi" ]; then \
|
||||
uv pip install --no-cache-dir fastapi libcst; \
|
||||
if [ -n "$TEST_PYPI_VERSION" ]; then \
|
||||
uv pip install --no-cache-dir --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match "llama-stack==$TEST_PYPI_VERSION"; \
|
||||
else \
|
||||
uv pip install --no-cache-dir --extra-index-url https://test.pypi.org/simple/ --index-strategy unsafe-best-match llama-stack; \
|
||||
fi; \
|
||||
else \
|
||||
if [ -n "$PYPI_VERSION" ]; then \
|
||||
uv pip install --no-cache-dir "llama-stack==$PYPI_VERSION"; \
|
||||
else \
|
||||
uv pip install --no-cache-dir llama-stack; \
|
||||
fi; \
|
||||
fi;
|
||||
|
||||
# Install the dependencies for the distribution
|
||||
RUN set -eux; \
|
||||
if [ -z "$DISTRO_NAME" ]; then \
|
||||
echo "DISTRO_NAME must be provided" >&2; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
deps="$(llama stack list-deps "$DISTRO_NAME")"; \
|
||||
if [ -n "$deps" ]; then \
|
||||
printf '%s\n' "$deps" | xargs -L1 uv pip install --no-cache-dir; \
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
RUN set -eux; \
|
||||
pip uninstall -y uv; \
|
||||
should_remove=1; \
|
||||
if [ -n "$KEEP_WORKSPACE" ]; then should_remove=0; fi; \
|
||||
if [ "$INSTALL_MODE" = "editable" ]; then should_remove=0; fi; \
|
||||
case "$RUN_CONFIG_PATH" in \
|
||||
/workspace*) should_remove=0 ;; \
|
||||
esac; \
|
||||
if [ "$should_remove" -eq 1 ] && [ -d /workspace ]; then rm -rf /workspace; fi
|
||||
|
||||
RUN cat <<'EOF' >/usr/local/bin/llama-stack-entrypoint.sh
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
if [ -n "$RUN_CONFIG_PATH" ] && [ -f "$RUN_CONFIG_PATH" ]; then
|
||||
exec llama stack run "$RUN_CONFIG_PATH" "$@"
|
||||
fi
|
||||
|
||||
if [ -n "$DISTRO_NAME" ]; then
|
||||
exec llama stack run "$DISTRO_NAME" "$@"
|
||||
fi
|
||||
|
||||
exec llama stack run "$@"
|
||||
EOF
|
||||
RUN chmod +x /usr/local/bin/llama-stack-entrypoint.sh
|
||||
|
||||
RUN mkdir -p /.llama /.cache && chmod -R g+rw /app /.llama /.cache
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/llama-stack-entrypoint.sh"]
|
|
@ -51,8 +51,8 @@ device: cpu
|
|||
You can access the HuggingFace trainer via the `starter` distribution:
|
||||
|
||||
```bash
|
||||
llama stack build --distro starter --image-type venv
|
||||
llama stack run ~/.llama/distributions/starter/starter-run.yaml
|
||||
llama stack list-deps starter | xargs -L1 uv pip install
|
||||
llama stack run starter
|
||||
```
|
||||
|
||||
### Usage Example
|
||||
|
|
|
@ -175,8 +175,7 @@ llama-stack-client benchmarks register \
|
|||
**1. Start the Llama Stack API Server**
|
||||
|
||||
```bash
|
||||
# Build and run a distribution (example: together)
|
||||
llama stack build --distro together --image-type venv
|
||||
llama stack list-deps together | xargs -L1 uv pip install
|
||||
llama stack run together
|
||||
```
|
||||
|
||||
|
@ -209,7 +208,7 @@ The playground works with any Llama Stack distribution. Popular options include:
|
|||
<TabItem value="together" label="Together AI">
|
||||
|
||||
```bash
|
||||
llama stack build --distro together --image-type venv
|
||||
llama stack list-deps together | xargs -L1 uv pip install
|
||||
llama stack run together
|
||||
```
|
||||
|
||||
|
@ -222,7 +221,7 @@ llama stack run together
|
|||
<TabItem value="ollama" label="Ollama (Local)">
|
||||
|
||||
```bash
|
||||
llama stack build --distro ollama --image-type venv
|
||||
llama stack list-deps ollama | xargs -L1 uv pip install
|
||||
llama stack run ollama
|
||||
```
|
||||
|
||||
|
@ -235,7 +234,7 @@ llama stack run ollama
|
|||
<TabItem value="meta-reference" label="Meta Reference">
|
||||
|
||||
```bash
|
||||
llama stack build --distro meta-reference --image-type venv
|
||||
llama stack list-deps meta-reference | xargs -L1 uv pip install
|
||||
llama stack run meta-reference
|
||||
```
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ RAG enables your applications to reference and recall information from external
|
|||
In one terminal, start the Llama Stack server:
|
||||
|
||||
```bash
|
||||
uv run llama stack build --distro starter --image-type venv --run
|
||||
llama stack list-deps starter | xargs -L1 uv pip install
|
||||
llama stack run starter
|
||||
```
|
||||
|
||||
### 2. Connect with OpenAI Client
|
||||
|
@ -87,18 +88,19 @@ Llama Stack provides OpenAI-compatible RAG capabilities through:
|
|||
To enable automatic vector store creation without specifying embedding models, configure a default embedding model in your run.yaml like so:
|
||||
|
||||
```yaml
|
||||
models:
|
||||
- model_id: nomic-ai/nomic-embed-text-v1.5
|
||||
provider_id: inline::sentence-transformers
|
||||
metadata:
|
||||
embedding_dimension: 768
|
||||
default_configured: true
|
||||
vector_stores:
|
||||
default_provider_id: faiss
|
||||
default_embedding_model:
|
||||
provider_id: sentence-transformers
|
||||
model_id: nomic-ai/nomic-embed-text-v1.5
|
||||
```
|
||||
|
||||
With this configuration:
|
||||
- `client.vector_stores.create()` works without requiring embedding model parameters
|
||||
- The system automatically uses the default model and its embedding dimension for any newly created vector store
|
||||
- Only one model can be marked as `default_configured: true`
|
||||
- `client.vector_stores.create()` works without requiring embedding model or provider parameters
|
||||
- The system automatically uses the default vector store provider (`faiss`) when multiple providers are available
|
||||
- The system automatically uses the default embedding model (`sentence-transformers/nomic-ai/nomic-embed-text-v1.5`) for any newly created vector store
|
||||
- The `default_provider_id` specifies which vector storage backend to use
|
||||
- The `default_embedding_model` specifies both the inference provider and model for embeddings
|
||||
|
||||
## Vector Store Operations
|
||||
|
||||
|
@ -107,14 +109,15 @@ With this configuration:
|
|||
You can create vector stores with automatic or explicit embedding model selection:
|
||||
|
||||
```python
|
||||
# Automatic - uses default configured embedding model
|
||||
# Automatic - uses default configured embedding model and vector store provider
|
||||
vs = client.vector_stores.create()
|
||||
|
||||
# Explicit - specify embedding model when you need a specific one
|
||||
# Explicit - specify embedding model and/or provider when you need specific ones
|
||||
vs = client.vector_stores.create(
|
||||
extra_body={
|
||||
"embedding_model": "nomic-ai/nomic-embed-text-v1.5",
|
||||
"embedding_dimension": 768
|
||||
"provider_id": "faiss", # Optional: specify vector store provider
|
||||
"embedding_model": "sentence-transformers/nomic-ai/nomic-embed-text-v1.5",
|
||||
"embedding_dimension": 768 # Optional: will be auto-detected if not provided
|
||||
}
|
||||
)
|
||||
```
|
||||
|
|
|
@ -62,6 +62,10 @@ The new `/v2` API must be introduced alongside the existing `/v1` API and run in
|
|||
|
||||
When a `/v2` API is introduced, a clear and generous deprecation policy for the `/v1` API must be published simultaneously. This policy must outline the timeline for the eventual removal of the `/v1` API, giving users ample time to migrate.
|
||||
|
||||
### Deprecated APIs
|
||||
|
||||
Deprecated APIs are those that are no longer actively maintained or supported. Depreated APIs are marked with the flag `deprecated = True` in the OpenAPI spec. These APIs will be removed in a future release.
|
||||
|
||||
### API Stability vs. Provider Stability
|
||||
|
||||
The leveling introduced in this document relates to the stability of the API and not specifically the providers within the API.
|
||||
|
|
|
@ -158,17 +158,16 @@ under the LICENSE file in the root directory of this source tree.
|
|||
|
||||
Some tips about common tasks you work on while contributing to Llama Stack:
|
||||
|
||||
### Using `llama stack build`
|
||||
### Setup for development
|
||||
|
||||
Building a stack image will use the production version of the `llama-stack` and `llama-stack-client` packages. If you are developing with a llama-stack repository checked out and need your code to be reflected in the stack image, set `LLAMA_STACK_DIR` and `LLAMA_STACK_CLIENT_DIR` to the appropriate checked out directories when running any of the `llama` CLI commands.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
cd work/
|
||||
git clone https://github.com/meta-llama/llama-stack.git
|
||||
git clone https://github.com/meta-llama/llama-stack-client-python.git
|
||||
cd llama-stack
|
||||
LLAMA_STACK_DIR=$(pwd) LLAMA_STACK_CLIENT_DIR=../llama-stack-client-python llama stack build --distro <...>
|
||||
uv run llama stack list-deps <distro-name> | xargs -L1 uv pip install
|
||||
|
||||
# (Optional) If you are developing the llama-stack-client-python package, you can add it as an editable package.
|
||||
git clone https://github.com/meta-llama/llama-stack-client-python.git
|
||||
uv add --editable ../llama-stack-client-python
|
||||
```
|
||||
|
||||
### Updating distribution configurations
|
||||
|
|
|
@ -67,7 +67,7 @@ def get_base_url(self) -> str:
|
|||
|
||||
## Testing the Provider
|
||||
|
||||
Before running tests, you must have required dependencies installed. This depends on the providers or distributions you are testing. For example, if you are testing the `together` distribution, you should install dependencies via `llama stack build --distro together`.
|
||||
Before running tests, you must have required dependencies installed. This depends on the providers or distributions you are testing. For example, if you are testing the `together` distribution, install its dependencies with `llama stack list-deps together | xargs -L1 uv pip install`.
|
||||
|
||||
### 1. Integration Testing
|
||||
|
||||
|
|
|
@ -5,225 +5,79 @@ sidebar_label: Build your own Distribution
|
|||
sidebar_position: 3
|
||||
---
|
||||
|
||||
This guide will walk you through the steps to get started with building a Llama Stack distribution from scratch with your choice of API providers.
|
||||
This guide walks you through inspecting existing distributions, customising their configuration, and building runnable artefacts for your own deployment.
|
||||
|
||||
### Explore existing distributions
|
||||
|
||||
### Setting your log level
|
||||
All first-party distributions live under `llama_stack/distributions/`. Each directory contains:
|
||||
|
||||
In order to specify the proper logging level users can apply the following environment variable `LLAMA_STACK_LOGGING` with the following format:
|
||||
- `build.yaml` – the distribution specification (providers, additional dependencies, optional external provider directories).
|
||||
- `run.yaml` – sample run configuration (when provided).
|
||||
- Documentation fragments that power this site.
|
||||
|
||||
`LLAMA_STACK_LOGGING=server=debug;core=info`
|
||||
|
||||
Where each category in the following list:
|
||||
|
||||
- all
|
||||
- core
|
||||
- server
|
||||
- router
|
||||
- inference
|
||||
- agents
|
||||
- safety
|
||||
- eval
|
||||
- tools
|
||||
- client
|
||||
|
||||
Can be set to any of the following log levels:
|
||||
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
- critical
|
||||
|
||||
The default global log level is `info`. `all` sets the log level for all components.
|
||||
|
||||
A user can also set `LLAMA_STACK_LOG_FILE` which will pipe the logs to the specified path as well as to the terminal. An example would be: `export LLAMA_STACK_LOG_FILE=server.log`
|
||||
|
||||
### Llama Stack Build
|
||||
|
||||
In order to build your own distribution, we recommend you clone the `llama-stack` repository.
|
||||
|
||||
|
||||
```
|
||||
git clone git@github.com:meta-llama/llama-stack.git
|
||||
cd llama-stack
|
||||
pip install -e .
|
||||
```
|
||||
Use the CLI to build your distribution.
|
||||
The main points to consider are:
|
||||
1. **Image Type** - Do you want a venv environment or a Container (eg. Docker)
|
||||
2. **Template** - Do you want to use a template to build your distribution? or start from scratch ?
|
||||
3. **Config** - Do you want to use a pre-existing config file to build your distribution?
|
||||
|
||||
```
|
||||
llama stack build -h
|
||||
usage: llama stack build [-h] [--config CONFIG] [--template TEMPLATE] [--distro DISTRIBUTION] [--list-distros] [--image-type {container,venv}] [--image-name IMAGE_NAME] [--print-deps-only]
|
||||
[--run] [--providers PROVIDERS]
|
||||
|
||||
Build a Llama stack container
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack.cores/**/build.yaml. If this argument is not provided, you will be prompted to
|
||||
enter information interactively (default: None)
|
||||
--template TEMPLATE (deprecated) Name of the example template config to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default:
|
||||
None)
|
||||
--distro DISTRIBUTION, --distribution DISTRIBUTION
|
||||
Name of the distribution to use for build. You may use `llama stack build --list-distros` to check out the available distributions (default: None)
|
||||
--list-distros, --list-distributions
|
||||
Show the available distributions for building a Llama Stack distribution (default: False)
|
||||
--image-type {container,venv}
|
||||
Image Type to use for the build. If not specified, will use the image type from the template config. (default: None)
|
||||
--image-name IMAGE_NAME
|
||||
[for image-type=container|venv] Name of the virtual environment to use for the build. If not specified, currently active environment will be used if found. (default:
|
||||
None)
|
||||
--print-deps-only Print the dependencies for the stack only, without building the stack (default: False)
|
||||
--run Run the stack after building using the same image type, name, and other applicable arguments (default: False)
|
||||
--providers PROVIDERS
|
||||
Build a config for a list of providers and only those providers. This list is formatted like: api1=provider1,api2=provider2. Where there can be multiple providers per
|
||||
API. (default: None)
|
||||
```
|
||||
|
||||
After this step is complete, a file named `<name>-build.yaml` and template file `<name>-run.yaml` will be generated and saved at the output file path specified at the end of the command.
|
||||
Browse that folder to understand available providers and copy a distribution to use as a starting point. When creating a new stack, duplicate an existing directory, rename it, and adjust the `build.yaml` file to match your requirements.
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="template" label="Building from a template">
|
||||
To build from alternative API providers, we provide distribution templates for users to get started building a distribution backed by different providers.
|
||||
<TabItem value="container" label="Building a container">
|
||||
|
||||
The following command will allow you to see the available templates and their corresponding providers.
|
||||
```
|
||||
llama stack build --list-templates
|
||||
Use the Containerfile at `containers/Containerfile`, which installs `llama-stack`, resolves distribution dependencies via `llama stack list-deps`, and sets the entrypoint to `llama stack run`.
|
||||
|
||||
```bash
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg DISTRO_NAME=starter \
|
||||
--tag llama-stack:starter
|
||||
```
|
||||
|
||||
```
|
||||
------------------------------+-----------------------------------------------------------------------------+
|
||||
| Template Name | Description |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| watsonx | Use watsonx for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| vllm-gpu | Use a built-in vLLM engine for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| together | Use Together.AI for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| tgi | Use (an external) TGI server for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| starter | Quick start template for running Llama Stack with several popular providers |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| sambanova | Use SambaNova for running LLM inference and safety |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| remote-vllm | Use (an external) vLLM server for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| postgres-demo | Quick start template for running Llama Stack with several popular providers |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| passthrough | Use Passthrough hosted llama-stack endpoint for LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| open-benchmark | Distribution for running open benchmarks |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| ollama | Use (an external) Ollama server for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| nvidia | Use NVIDIA NIM for running LLM inference, evaluation and safety |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| meta-reference-gpu | Use Meta Reference for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| llama_api | Distribution for running e2e tests in CI |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| hf-serverless | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| hf-endpoint | Use (an external) Hugging Face Inference Endpoint for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| groq | Use Groq for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| fireworks | Use Fireworks.AI for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| experimental-post-training | Experimental template for post training |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| dell | Dell's distribution of Llama Stack. TGI inference via Dell's custom |
|
||||
| | container |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| ci-tests | Distribution for running e2e tests in CI |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| cerebras | Use Cerebras for running LLM inference |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
| bedrock | Use AWS Bedrock for running LLM inference and safety |
|
||||
+------------------------------+-----------------------------------------------------------------------------+
|
||||
```
|
||||
Handy build arguments:
|
||||
|
||||
You may then pick a template to build your distribution with providers fitted to your liking.
|
||||
- `DISTRO_NAME` – distribution directory name (defaults to `starter`).
|
||||
- `RUN_CONFIG_PATH` – absolute path inside the build context for a run config that should be baked into the image (e.g. `/workspace/run.yaml`).
|
||||
- `INSTALL_MODE=editable` – install the repository copied into `/workspace` with `uv pip install -e`. Pair it with `--build-arg LLAMA_STACK_DIR=/workspace`.
|
||||
- `LLAMA_STACK_CLIENT_DIR` – optional editable install of the Python client.
|
||||
- `PYPI_VERSION` / `TEST_PYPI_VERSION` – pin specific releases when not using editable installs.
|
||||
- `KEEP_WORKSPACE=1` – retain `/workspace` in the final image if you need to access additional files (such as sample configs or provider bundles).
|
||||
|
||||
For example, to build a distribution with TGI as the inference provider, you can run:
|
||||
```
|
||||
$ llama stack build --distro starter
|
||||
...
|
||||
You can now edit ~/.llama/distributions/llamastack-starter/starter-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-starter/starter-run.yaml`
|
||||
```
|
||||
Make sure any custom `build.yaml`, run configs, or provider directories you reference are included in the Docker build context so the Containerfile can read them.
|
||||
|
||||
```{tip}
|
||||
The generated `run.yaml` file is a starting point for your configuration. For comprehensive guidance on customizing it for your specific needs, infrastructure, and deployment scenarios, see [Customizing Your run.yaml Configuration](customizing_run_yaml.md).
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="scratch" label="Building from Scratch">
|
||||
<TabItem value="external" label="Building with external providers">
|
||||
|
||||
If the provided templates do not fit your use case, you could start off with running `llama stack build` which will allow you to a interactively enter wizard where you will be prompted to enter build configurations.
|
||||
External providers live outside the main repository but can be bundled by pointing `external_providers_dir` to a directory that contains your provider packages.
|
||||
|
||||
It would be best to start with a template and understand the structure of the config file and the various concepts ( APIS, providers, resources, etc.) before starting from scratch.
|
||||
```
|
||||
llama stack build
|
||||
1. Copy providers into the build context, for example `cp -R path/to/providers providers.d`.
|
||||
2. Update `build.yaml` with the directory and provider entries.
|
||||
3. Adjust run configs to use the in-container path (usually `/.llama/providers.d`). Pass `--build-arg RUN_CONFIG_PATH=/workspace/run.yaml` if you want to bake the config.
|
||||
|
||||
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
|
||||
> Enter the image type you want your Llama Stack to be built as (container or venv): venv
|
||||
|
||||
Llama Stack is composed of several APIs working together. Let's select
|
||||
the provider types (implementations) you want to use for these APIs.
|
||||
|
||||
Tip: use <TAB> to see options for the providers.
|
||||
|
||||
> Enter provider for API inference: inline::meta-reference
|
||||
> Enter provider for API safety: inline::llama-guard
|
||||
> Enter provider for API agents: inline::meta-reference
|
||||
> Enter provider for API memory: inline::faiss
|
||||
> Enter provider for API datasetio: inline::meta-reference
|
||||
> Enter provider for API scoring: inline::meta-reference
|
||||
> Enter provider for API eval: inline::meta-reference
|
||||
> Enter provider for API telemetry: inline::meta-reference
|
||||
|
||||
> (Optional) Enter a short description for your Llama Stack:
|
||||
|
||||
You can now edit ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml and run `llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml`
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="config" label="Building from a pre-existing build config file">
|
||||
- In addition to templates, you may customize the build to your liking through editing config files and build from config files with the following command.
|
||||
|
||||
- The config file will be of contents like the ones in `llama_stack/distributions/*build.yaml`.
|
||||
|
||||
```
|
||||
llama stack build --config llama_stack/distributions/starter/build.yaml
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="external" label="Building with External Providers">
|
||||
|
||||
Llama Stack supports external providers that live outside of the main codebase. This allows you to create and maintain your own providers independently or use community-provided providers.
|
||||
|
||||
To build a distribution with external providers, you need to:
|
||||
|
||||
1. Configure the `external_providers_dir` in your build configuration file:
|
||||
Example `build.yaml` excerpt for a custom Ollama provider:
|
||||
|
||||
```yaml
|
||||
# Example my-external-stack.yaml with external providers
|
||||
version: '2'
|
||||
distribution_spec:
|
||||
description: Custom distro for CI tests
|
||||
providers:
|
||||
inference:
|
||||
- remote::custom_ollama
|
||||
# Add more providers as needed
|
||||
image_type: container
|
||||
image_name: ci-test
|
||||
# Path to external provider implementations
|
||||
external_providers_dir: ~/.llama/providers.d
|
||||
- remote::custom_ollama
|
||||
external_providers_dir: /workspace/providers.d
|
||||
```
|
||||
|
||||
Inside `providers.d/custom_ollama/provider.py`, define `get_provider_spec()` so the CLI can discover dependencies:
|
||||
|
||||
```python
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
|
||||
|
||||
def get_provider_spec() -> ProviderSpec:
|
||||
return ProviderSpec(
|
||||
provider_type="remote::custom_ollama",
|
||||
module="llama_stack_ollama_provider",
|
||||
config_class="llama_stack_ollama_provider.config.OllamaImplConfig",
|
||||
pip_packages=[
|
||||
"ollama",
|
||||
"aiohttp",
|
||||
"llama-stack-provider-ollama",
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
Here's an example for a custom Ollama provider:
|
||||
|
@ -232,9 +86,9 @@ Here's an example for a custom Ollama provider:
|
|||
adapter:
|
||||
adapter_type: custom_ollama
|
||||
pip_packages:
|
||||
- ollama
|
||||
- aiohttp
|
||||
- llama-stack-provider-ollama # This is the provider package
|
||||
- ollama
|
||||
- aiohttp
|
||||
- llama-stack-provider-ollama # This is the provider package
|
||||
config_class: llama_stack_ollama_provider.config.OllamaImplConfig
|
||||
module: llama_stack_ollama_provider
|
||||
api_dependencies: []
|
||||
|
@ -245,53 +99,22 @@ The `pip_packages` section lists the Python packages required by the provider, a
|
|||
provider package itself. The package must be available on PyPI or can be provided from a local
|
||||
directory or a git repository (git must be installed on the build environment).
|
||||
|
||||
2. Build your distribution using the config file:
|
||||
For deeper guidance, see the [External Providers documentation](../providers/external/).
|
||||
|
||||
```
|
||||
llama stack build --config my-external-stack.yaml
|
||||
```
|
||||
|
||||
For more information on external providers, including directory structure, provider types, and implementation requirements, see the [External Providers documentation](../providers/external/).
|
||||
</TabItem>
|
||||
<TabItem value="container" label="Building Container">
|
||||
</Tabs>
|
||||
|
||||
:::tip Podman Alternative
|
||||
Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
|
||||
:::
|
||||
### Run your stack server
|
||||
|
||||
To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
|
||||
|
||||
```
|
||||
llama stack build --distro starter --image-type container
|
||||
```
|
||||
|
||||
```
|
||||
$ llama stack build --distro starter --image-type container
|
||||
...
|
||||
Containerfile created successfully in /tmp/tmp.viA3a3Rdsg/ContainerfileFROM python:3.10-slim
|
||||
...
|
||||
```
|
||||
|
||||
You can now edit ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml and run `llama stack run ~/meta-llama/llama-stack/tmp/configs/ollama-run.yaml`
|
||||
```
|
||||
|
||||
Now set some environment variables for the inference model ID and Llama Stack Port and create a local directory to mount into the container's file system.
|
||||
After building the image, launch it directly with Docker or Podman—the entrypoint calls `llama stack run` using the baked distribution or the bundled run config:
|
||||
|
||||
```bash
|
||||
export INFERENCE_MODEL="llama3.2:3b"
|
||||
export LLAMA_STACK_PORT=8321
|
||||
mkdir -p ~/.llama
|
||||
```
|
||||
|
||||
After this step is successful, you should be able to find the built container image and test it with the below Docker command:
|
||||
|
||||
```
|
||||
docker run -d \
|
||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-v ~/.llama:/root/.llama \
|
||||
-e INFERENCE_MODEL=$INFERENCE_MODEL \
|
||||
-e OLLAMA_URL=http://host.docker.internal:11434 \
|
||||
localhost/distribution-ollama:dev \
|
||||
llama-stack:starter \
|
||||
--port $LLAMA_STACK_PORT
|
||||
```
|
||||
|
||||
|
@ -311,131 +134,14 @@ Here are the docker flags and their uses:
|
|||
|
||||
* `--port $LLAMA_STACK_PORT`: Port number for the server to listen on
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
||||
### Running your Stack server
|
||||
Now, let's start the Llama Stack Distribution Server. You will need the YAML configuration file which was written out at the end by the `llama stack build` step.
|
||||
If you prepared a custom run config, mount it into the container and reference it explicitly:
|
||||
|
||||
```bash
|
||||
docker run \
|
||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-v $(pwd)/run.yaml:/app/run.yaml \
|
||||
llama-stack:starter \
|
||||
/app/run.yaml
|
||||
```
|
||||
llama stack run -h
|
||||
usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME]
|
||||
[--image-type {venv}] [--enable-ui]
|
||||
[config | distro]
|
||||
|
||||
Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.
|
||||
|
||||
positional arguments:
|
||||
config | distro Path to config file to use for the run or name of known distro (`llama stack list` for a list). (default: None)
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. (default: 8321)
|
||||
--image-name IMAGE_NAME
|
||||
[DEPRECATED] This flag is no longer supported. Please activate your virtual environment before running. (default: None)
|
||||
--image-type {venv}
|
||||
[DEPRECATED] This flag is no longer supported. Please activate your virtual environment before running. (default: None)
|
||||
--enable-ui Start the UI server (default: False)
|
||||
```
|
||||
|
||||
**Note:** Container images built with `llama stack build --image-type container` cannot be run using `llama stack run`. Instead, they must be run directly using Docker or Podman commands as shown in the container building section above.
|
||||
|
||||
```
|
||||
# Start using template name
|
||||
llama stack run tgi
|
||||
|
||||
# Start using config file
|
||||
llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml
|
||||
```
|
||||
|
||||
```
|
||||
$ llama stack run ~/.llama/distributions/llamastack-my-local-stack/my-local-stack-run.yaml
|
||||
|
||||
Serving API inspect
|
||||
GET /health
|
||||
GET /providers/list
|
||||
GET /routes/list
|
||||
Serving API inference
|
||||
POST /inference/chat_completion
|
||||
POST /inference/completion
|
||||
POST /inference/embeddings
|
||||
...
|
||||
Serving API agents
|
||||
POST /agents/create
|
||||
POST /agents/session/create
|
||||
POST /agents/turn/create
|
||||
POST /agents/delete
|
||||
POST /agents/session/delete
|
||||
POST /agents/session/get
|
||||
POST /agents/step/get
|
||||
POST /agents/turn/get
|
||||
|
||||
Listening on ['::', '0.0.0.0']:8321
|
||||
INFO: Started server process [2935911]
|
||||
INFO: Waiting for application startup.
|
||||
INFO: Application startup complete.
|
||||
INFO: Uvicorn running on http://['::', '0.0.0.0']:8321 (Press CTRL+C to quit)
|
||||
INFO: 2401:db00:35c:2d2b:face:0:c9:0:54678 - "GET /models/list HTTP/1.1" 200 OK
|
||||
```
|
||||
|
||||
### Listing Distributions
|
||||
Using the list command, you can view all existing Llama Stack distributions, including stacks built from templates, from scratch, or using custom configuration files.
|
||||
|
||||
```
|
||||
llama stack list -h
|
||||
usage: llama stack list [-h]
|
||||
|
||||
list the build stacks
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
```
|
||||
|
||||
Example Usage
|
||||
|
||||
```
|
||||
llama stack list
|
||||
```
|
||||
|
||||
```
|
||||
------------------------------+-----------------------------------------------------------------+--------------+------------+
|
||||
| Stack Name | Path | Build Config | Run Config |
|
||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
||||
| together | ~/.llama/distributions/together | Yes | No |
|
||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
||||
| bedrock | ~/.llama/distributions/bedrock | Yes | No |
|
||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
||||
| starter | ~/.llama/distributions/starter | Yes | Yes |
|
||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
||||
| remote-vllm | ~/.llama/distributions/remote-vllm | Yes | Yes |
|
||||
+------------------------------+-----------------------------------------------------------------------------+--------------+
|
||||
```
|
||||
|
||||
### Removing a Distribution
|
||||
Use the remove command to delete a distribution you've previously built.
|
||||
|
||||
```
|
||||
llama stack rm -h
|
||||
usage: llama stack rm [-h] [--all] [name]
|
||||
|
||||
Remove the build stack
|
||||
|
||||
positional arguments:
|
||||
name Name of the stack to delete (default: None)
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--all, -a Delete all stacks (use with caution) (default: False)
|
||||
```
|
||||
|
||||
Example
|
||||
```
|
||||
llama stack rm llamastack-test
|
||||
```
|
||||
|
||||
To keep your environment organized and avoid clutter, consider using `llama stack list` to review old or unused distributions and `llama stack rm <name>` to delete them when they're no longer needed.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you encounter any issues, ask questions in our discord or search through our [GitHub Issues](https://github.com/meta-llama/llama-stack/issues), or file an new issue.
|
||||
|
|
|
@ -44,18 +44,32 @@ providers:
|
|||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config:
|
||||
persistence_store:
|
||||
type: sqlite
|
||||
namespace: null
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/agents_store.db
|
||||
persistence:
|
||||
agent_state:
|
||||
backend: kv_default
|
||||
namespace: agents
|
||||
responses:
|
||||
backend: sql_default
|
||||
table_name: responses
|
||||
telemetry:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config: {}
|
||||
metadata_store:
|
||||
namespace: null
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/registry.db
|
||||
storage:
|
||||
backends:
|
||||
kv_default:
|
||||
type: kv_sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/kvstore.db
|
||||
sql_default:
|
||||
type: sql_sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/sqlstore.db
|
||||
references:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
|
@ -12,7 +12,7 @@ This avoids the overhead of setting up a server.
|
|||
```bash
|
||||
# setup
|
||||
uv pip install llama-stack
|
||||
llama stack build --distro starter --image-type venv
|
||||
llama stack list-deps starter | xargs -L1 uv pip install
|
||||
```
|
||||
|
||||
```python
|
||||
|
|
|
@ -1,56 +1,155 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
stack_run_config.yaml: "version: '2'\nimage_name: kubernetes-demo\napis:\n- agents\n-
|
||||
inference\n- files\n- safety\n- telemetry\n- tool_runtime\n- vector_io\nproviders:\n
|
||||
\ inference:\n - provider_id: vllm-inference\n provider_type: remote::vllm\n
|
||||
\ config:\n url: ${env.VLLM_URL:=http://localhost:8000/v1}\n max_tokens:
|
||||
${env.VLLM_MAX_TOKENS:=4096}\n api_token: ${env.VLLM_API_TOKEN:=fake}\n tls_verify:
|
||||
${env.VLLM_TLS_VERIFY:=true}\n - provider_id: vllm-safety\n provider_type:
|
||||
remote::vllm\n config:\n url: ${env.VLLM_SAFETY_URL:=http://localhost:8000/v1}\n
|
||||
\ max_tokens: ${env.VLLM_MAX_TOKENS:=4096}\n api_token: ${env.VLLM_API_TOKEN:=fake}\n
|
||||
\ tls_verify: ${env.VLLM_TLS_VERIFY:=true}\n - provider_id: sentence-transformers\n
|
||||
\ provider_type: inline::sentence-transformers\n config: {}\n vector_io:\n
|
||||
\ - provider_id: ${env.ENABLE_CHROMADB:+chromadb}\n provider_type: remote::chromadb\n
|
||||
\ config:\n url: ${env.CHROMADB_URL:=}\n kvstore:\n type: postgres\n
|
||||
\ host: ${env.POSTGRES_HOST:=localhost}\n port: ${env.POSTGRES_PORT:=5432}\n
|
||||
\ db: ${env.POSTGRES_DB:=llamastack}\n user: ${env.POSTGRES_USER:=llamastack}\n
|
||||
\ password: ${env.POSTGRES_PASSWORD:=llamastack}\n files:\n - provider_id:
|
||||
meta-reference-files\n provider_type: inline::localfs\n config:\n storage_dir:
|
||||
${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}\n metadata_store:\n
|
||||
\ type: sqlite\n db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db
|
||||
\ \n safety:\n - provider_id: llama-guard\n provider_type: inline::llama-guard\n
|
||||
\ config:\n excluded_categories: []\n agents:\n - provider_id: meta-reference\n
|
||||
\ provider_type: inline::meta-reference\n config:\n persistence_store:\n
|
||||
\ type: postgres\n host: ${env.POSTGRES_HOST:=localhost}\n port:
|
||||
${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n user:
|
||||
${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\n
|
||||
\ responses_store:\n type: postgres\n host: ${env.POSTGRES_HOST:=localhost}\n
|
||||
\ port: ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n
|
||||
\ user: ${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\n
|
||||
\ telemetry:\n - provider_id: meta-reference\n provider_type: inline::meta-reference\n
|
||||
\ config:\n service_name: \"${env.OTEL_SERVICE_NAME:=\\u200B}\"\n sinks:
|
||||
${env.TELEMETRY_SINKS:=console}\n tool_runtime:\n - provider_id: brave-search\n
|
||||
\ provider_type: remote::brave-search\n config:\n api_key: ${env.BRAVE_SEARCH_API_KEY:+}\n
|
||||
\ max_results: 3\n - provider_id: tavily-search\n provider_type: remote::tavily-search\n
|
||||
\ config:\n api_key: ${env.TAVILY_SEARCH_API_KEY:+}\n max_results:
|
||||
3\n - provider_id: rag-runtime\n provider_type: inline::rag-runtime\n config:
|
||||
{}\n - provider_id: model-context-protocol\n provider_type: remote::model-context-protocol\n
|
||||
\ config: {}\nmetadata_store:\n type: postgres\n host: ${env.POSTGRES_HOST:=localhost}\n
|
||||
\ port: ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n user:
|
||||
${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\n
|
||||
\ table_name: llamastack_kvstore\ninference_store:\n type: postgres\n host:
|
||||
${env.POSTGRES_HOST:=localhost}\n port: ${env.POSTGRES_PORT:=5432}\n db: ${env.POSTGRES_DB:=llamastack}\n
|
||||
\ user: ${env.POSTGRES_USER:=llamastack}\n password: ${env.POSTGRES_PASSWORD:=llamastack}\nmodels:\n-
|
||||
metadata:\n embedding_dimension: 384\n model_id: all-MiniLM-L6-v2\n provider_id:
|
||||
sentence-transformers\n model_type: embedding\n- metadata: {}\n model_id: ${env.INFERENCE_MODEL}\n
|
||||
\ provider_id: vllm-inference\n model_type: llm\n- metadata: {}\n model_id:
|
||||
${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}\n provider_id: vllm-safety\n
|
||||
\ model_type: llm\nshields:\n- shield_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}\nvector_dbs:
|
||||
[]\ndatasets: []\nscoring_fns: []\nbenchmarks: []\ntool_groups:\n- toolgroup_id:
|
||||
builtin::websearch\n provider_id: tavily-search\n- toolgroup_id: builtin::rag\n
|
||||
\ provider_id: rag-runtime\nserver:\n port: 8321\n auth:\n provider_config:\n
|
||||
\ type: github_token\n"
|
||||
stack_run_config.yaml: |
|
||||
version: '2'
|
||||
image_name: kubernetes-demo
|
||||
apis:
|
||||
- agents
|
||||
- inference
|
||||
- files
|
||||
- safety
|
||||
- telemetry
|
||||
- tool_runtime
|
||||
- vector_io
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: vllm-inference
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
url: ${env.VLLM_URL:=http://localhost:8000/v1}
|
||||
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
||||
api_token: ${env.VLLM_API_TOKEN:=fake}
|
||||
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
||||
- provider_id: vllm-safety
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
url: ${env.VLLM_SAFETY_URL:=http://localhost:8000/v1}
|
||||
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
||||
api_token: ${env.VLLM_API_TOKEN:=fake}
|
||||
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
||||
- provider_id: sentence-transformers
|
||||
provider_type: inline::sentence-transformers
|
||||
config: {}
|
||||
vector_io:
|
||||
- provider_id: ${env.ENABLE_CHROMADB:+chromadb}
|
||||
provider_type: remote::chromadb
|
||||
config:
|
||||
url: ${env.CHROMADB_URL:=}
|
||||
kvstore:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
files:
|
||||
- provider_id: meta-reference-files
|
||||
provider_type: inline::localfs
|
||||
config:
|
||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db
|
||||
safety:
|
||||
- provider_id: llama-guard
|
||||
provider_type: inline::llama-guard
|
||||
config:
|
||||
excluded_categories: []
|
||||
agents:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config:
|
||||
persistence_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
responses_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
telemetry:
|
||||
- provider_id: meta-reference
|
||||
provider_type: inline::meta-reference
|
||||
config:
|
||||
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
|
||||
sinks: ${env.TELEMETRY_SINKS:=console}
|
||||
tool_runtime:
|
||||
- provider_id: brave-search
|
||||
provider_type: remote::brave-search
|
||||
config:
|
||||
api_key: ${env.BRAVE_SEARCH_API_KEY:+}
|
||||
max_results: 3
|
||||
- provider_id: tavily-search
|
||||
provider_type: remote::tavily-search
|
||||
config:
|
||||
api_key: ${env.TAVILY_SEARCH_API_KEY:+}
|
||||
max_results: 3
|
||||
- provider_id: rag-runtime
|
||||
provider_type: inline::rag-runtime
|
||||
config: {}
|
||||
- provider_id: model-context-protocol
|
||||
provider_type: remote::model-context-protocol
|
||||
config: {}
|
||||
storage:
|
||||
backends:
|
||||
kv_default:
|
||||
type: kv_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||
sql_default:
|
||||
type: sql_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
references:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
models:
|
||||
- metadata:
|
||||
embedding_dimension: 768
|
||||
model_id: nomic-embed-text-v1.5
|
||||
provider_id: sentence-transformers
|
||||
model_type: embedding
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
provider_id: vllm-inference
|
||||
model_type: llm
|
||||
- metadata: {}
|
||||
model_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}
|
||||
provider_id: vllm-safety
|
||||
model_type: llm
|
||||
shields:
|
||||
- shield_id: ${env.SAFETY_MODEL:=meta-llama/Llama-Guard-3-1B}
|
||||
vector_dbs: []
|
||||
datasets: []
|
||||
scoring_fns: []
|
||||
benchmarks: []
|
||||
tool_groups:
|
||||
- toolgroup_id: builtin::websearch
|
||||
provider_id: tavily-search
|
||||
- toolgroup_id: builtin::rag
|
||||
provider_id: rag-runtime
|
||||
server:
|
||||
port: 8321
|
||||
auth:
|
||||
provider_config:
|
||||
type: github_token
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: llama-stack-config
|
||||
|
|
|
@ -93,21 +93,30 @@ providers:
|
|||
- provider_id: model-context-protocol
|
||||
provider_type: remote::model-context-protocol
|
||||
config: {}
|
||||
metadata_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: llamastack_kvstore
|
||||
inference_store:
|
||||
type: postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
storage:
|
||||
backends:
|
||||
kv_default:
|
||||
type: kv_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
table_name: ${env.POSTGRES_TABLE_NAME:=llamastack_kvstore}
|
||||
sql_default:
|
||||
type: sql_postgres
|
||||
host: ${env.POSTGRES_HOST:=localhost}
|
||||
port: ${env.POSTGRES_PORT:=5432}
|
||||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
references:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
models:
|
||||
- metadata:
|
||||
embedding_dimension: 768
|
||||
|
|
|
@ -59,7 +59,7 @@ Start a Llama Stack server on localhost. Here is an example of how you can do th
|
|||
uv venv starter --python 3.12
|
||||
source starter/bin/activate # On Windows: starter\Scripts\activate
|
||||
pip install --no-cache llama-stack==0.2.2
|
||||
llama stack build --distro starter --image-type venv
|
||||
llama stack list-deps starter | xargs -L1 uv pip install
|
||||
export FIREWORKS_API_KEY=<SOME_KEY>
|
||||
llama stack run starter --port 5050
|
||||
```
|
||||
|
|
|
@ -166,10 +166,10 @@ docker run \
|
|||
|
||||
### Via venv
|
||||
|
||||
Make sure you have done `pip install llama-stack` and have the Llama Stack CLI available.
|
||||
Install the distribution dependencies before launching:
|
||||
|
||||
```bash
|
||||
llama stack build --distro dell --image-type venv
|
||||
llama stack list-deps dell | xargs -L1 uv pip install
|
||||
INFERENCE_MODEL=$INFERENCE_MODEL \
|
||||
DEH_URL=$DEH_URL \
|
||||
CHROMA_URL=$CHROMA_URL \
|
||||
|
|
|
@ -21,7 +21,7 @@ The `llamastack/distribution-meta-reference-gpu` distribution consists of the fo
|
|||
| inference | `inline::meta-reference` |
|
||||
| safety | `inline::llama-guard` |
|
||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::model-context-protocol` |
|
||||
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
|
||||
|
||||
|
||||
|
@ -81,10 +81,10 @@ docker run \
|
|||
|
||||
### Via venv
|
||||
|
||||
Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available.
|
||||
Make sure you have the Llama Stack CLI available.
|
||||
|
||||
```bash
|
||||
llama stack build --distro meta-reference-gpu --image-type venv
|
||||
llama stack list-deps meta-reference-gpu | xargs -L1 uv pip install
|
||||
INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \
|
||||
llama stack run distributions/meta-reference-gpu/run.yaml \
|
||||
--port 8321
|
||||
|
|
|
@ -16,7 +16,7 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov
|
|||
| post_training | `remote::nvidia` |
|
||||
| safety | `remote::nvidia` |
|
||||
| scoring | `inline::basic` |
|
||||
| tool_runtime | `inline::rag-runtime` |
|
||||
| tool_runtime | |
|
||||
| vector_io | `inline::faiss` |
|
||||
|
||||
|
||||
|
@ -136,11 +136,11 @@ docker run \
|
|||
|
||||
### Via venv
|
||||
|
||||
If you've set up your local development environment, you can also build the image using your local virtual environment.
|
||||
If you've set up your local development environment, you can also install the distribution dependencies using your local virtual environment.
|
||||
|
||||
```bash
|
||||
INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct
|
||||
llama stack build --distro nvidia --image-type venv
|
||||
llama stack list-deps nvidia | xargs -L1 uv pip install
|
||||
NVIDIA_API_KEY=$NVIDIA_API_KEY \
|
||||
INFERENCE_MODEL=$INFERENCE_MODEL \
|
||||
llama stack run ./run.yaml \
|
||||
|
|
|
@ -169,7 +169,11 @@ docker run \
|
|||
Ensure you have configured the starter distribution using the environment variables explained above.
|
||||
|
||||
```bash
|
||||
uv run --with llama-stack llama stack build --distro starter --image-type venv --run
|
||||
# Install dependencies for the starter distribution
|
||||
uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install
|
||||
|
||||
# Run the server
|
||||
uv run --with llama-stack llama stack run starter
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
|
|
|
@ -23,6 +23,17 @@ Another simple way to start interacting with Llama Stack is to just spin up a co
|
|||
If you have built a container image and want to deploy it in a Kubernetes cluster instead of starting the Llama Stack server locally. See [Kubernetes Deployment Guide](../deploying/kubernetes_deployment) for more details.
|
||||
|
||||
|
||||
## Configure logging
|
||||
|
||||
Control log output via environment variables before starting the server.
|
||||
|
||||
- `LLAMA_STACK_LOGGING` sets per-component levels, e.g. `LLAMA_STACK_LOGGING=server=debug;core=info`.
|
||||
- Supported categories: `all`, `core`, `server`, `router`, `inference`, `agents`, `safety`, `eval`, `tools`, `client`.
|
||||
- Levels: `debug`, `info`, `warning`, `error`, `critical` (default is `info`). Use `all=<level>` to apply globally.
|
||||
- `LLAMA_STACK_LOG_FILE=/path/to/log` mirrors logs to a file while still printing to stdout.
|
||||
|
||||
Export these variables prior to running `llama stack run`, launching a container, or starting the server through any other pathway.
|
||||
|
||||
```{toctree}
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
|
|
@ -58,15 +58,19 @@ Llama Stack is a server that exposes multiple APIs, you connect with it using th
|
|||
|
||||
<Tabs>
|
||||
<TabItem value="venv" label="Using venv">
|
||||
You can use Python to build and run the Llama Stack server, which is useful for testing and development.
|
||||
You can use Python to install dependencies and run the Llama Stack server, which is useful for testing and development.
|
||||
|
||||
Llama Stack uses a [YAML configuration file](../distributions/configuration) to specify the stack setup,
|
||||
which defines the providers and their settings. The generated configuration serves as a starting point that you can [customize for your specific needs](../distributions/customizing_run_yaml).
|
||||
Now let's build and run the Llama Stack config for Ollama.
|
||||
Now let's install dependencies and run the Llama Stack config for Ollama.
|
||||
We use `starter` as template. By default all providers are disabled, this requires enable ollama by passing environment variables.
|
||||
|
||||
```bash
|
||||
llama stack build --distro starter --image-type venv --run
|
||||
# Install dependencies for the starter distribution
|
||||
uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install
|
||||
|
||||
# Run the server
|
||||
llama stack run starter
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="container" label="Using a Container">
|
||||
|
@ -304,7 +308,7 @@ stream = agent.create_turn(
|
|||
for event in AgentEventLogger().log(stream):
|
||||
event.print()
|
||||
```
|
||||
### ii. Run the Script
|
||||
#### ii. Run the Script
|
||||
Let's run the script using `uv`
|
||||
```bash
|
||||
uv run python agent.py
|
||||
|
|
|
@ -24,10 +24,13 @@ ollama run llama3.2:3b --keepalive 60m
|
|||
|
||||
#### Step 2: Run the Llama Stack server
|
||||
|
||||
We will use `uv` to run the Llama Stack server.
|
||||
We will use `uv` to install dependencies and run the Llama Stack server.
|
||||
```bash
|
||||
OLLAMA_URL=http://localhost:11434 \
|
||||
uv run --with llama-stack llama stack build --distro starter --image-type venv --run
|
||||
# Install dependencies for the starter distribution
|
||||
uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install
|
||||
|
||||
# Run the server
|
||||
OLLAMA_URL=http://localhost:11434 uv run --with llama-stack llama stack run starter
|
||||
```
|
||||
#### Step 3: Run the demo
|
||||
Now open up a new terminal and copy the following script into a file named `demo_script.py`.
|
||||
|
|
|
@ -14,16 +14,18 @@ Meta's reference implementation of an agent system that can use tools, access ve
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `persistence_store` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `responses_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | |
|
||||
| `persistence` | `<class 'inline.agents.meta_reference.config.AgentPersistenceConfig'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
persistence_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/agents_store.db
|
||||
responses_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/responses_store.db
|
||||
persistence:
|
||||
agent_state:
|
||||
namespace: agents
|
||||
backend: kv_default
|
||||
responses:
|
||||
table_name: responses
|
||||
backend: sql_default
|
||||
max_write_queue_size: 10000
|
||||
num_writers: 4
|
||||
```
|
||||
|
|
|
@ -14,7 +14,7 @@ Reference implementation of batches API with KVStore persistence.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Configuration for the key-value store backend. |
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Configuration for the key-value store backend. |
|
||||
| `max_concurrent_batches` | `<class 'int'>` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
|
||||
| `max_concurrent_requests_per_batch` | `<class 'int'>` | No | 10 | Maximum number of concurrent requests to process per batch. |
|
||||
|
||||
|
@ -22,6 +22,6 @@ Reference implementation of batches API with KVStore persistence.
|
|||
|
||||
```yaml
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/batches.db
|
||||
namespace: batches
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -14,12 +14,12 @@ Local filesystem-based dataset I/O provider for reading and writing datasets to
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/localfs_datasetio.db
|
||||
namespace: datasetio::localfs
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -14,12 +14,12 @@ HuggingFace datasets provider for accessing and managing datasets from the Huggi
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/huggingface_datasetio.db
|
||||
namespace: datasetio::huggingface
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
---
|
||||
description: "Llama Stack Evaluation API for running evaluations on model and agent candidates."
|
||||
description: "Evaluations
|
||||
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates."
|
||||
sidebar_label: Eval
|
||||
title: Eval
|
||||
---
|
||||
|
@ -8,6 +10,8 @@ title: Eval
|
|||
|
||||
## Overview
|
||||
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates.
|
||||
Evaluations
|
||||
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates.
|
||||
|
||||
This section contains documentation for all available providers for the **eval** API.
|
||||
|
|
|
@ -14,12 +14,12 @@ Meta's reference implementation of evaluation tasks with support for multiple la
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/meta_reference_eval.db
|
||||
namespace: eval
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -240,6 +240,6 @@ additional_pip_packages:
|
|||
- sqlalchemy[asyncio]
|
||||
```
|
||||
|
||||
No other steps are required other than `llama stack build` and `llama stack run`. The build process will use `module` to install all of the provider dependencies, retrieve the spec, etc.
|
||||
No other steps are required beyond installing dependencies with `llama stack list-deps <distro> | xargs -L1 uv pip install` and then running `llama stack run`. The CLI will use `module` to install the provider dependencies, retrieve the spec, etc.
|
||||
|
||||
The provider will now be available in Llama Stack with the type `remote::ramalama`.
|
||||
|
|
|
@ -15,7 +15,7 @@ Local filesystem-based file storage provider for managing files and documents lo
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `storage_dir` | `<class 'str'>` | No | | Directory to store uploaded files |
|
||||
| `metadata_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | SQL store configuration for file metadata |
|
||||
| `metadata_store` | `<class 'llama_stack.core.storage.datatypes.SqlStoreReference'>` | No | | SQL store configuration for file metadata |
|
||||
| `ttl_secs` | `<class 'int'>` | No | 31536000 | |
|
||||
|
||||
## Sample Configuration
|
||||
|
@ -23,6 +23,6 @@ Local filesystem-based file storage provider for managing files and documents lo
|
|||
```yaml
|
||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/dummy/files}
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/files_metadata.db
|
||||
table_name: files_metadata
|
||||
backend: sql_default
|
||||
```
|
||||
|
|
|
@ -20,7 +20,7 @@ AWS S3-based file storage provider for scalable cloud file management with metad
|
|||
| `aws_secret_access_key` | `str \| None` | No | | AWS secret access key (optional if using IAM roles) |
|
||||
| `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) |
|
||||
| `auto_create_bucket` | `<class 'bool'>` | No | False | Automatically create the S3 bucket if it doesn't exist |
|
||||
| `metadata_store` | `utils.sqlstore.sqlstore.SqliteSqlStoreConfig \| utils.sqlstore.sqlstore.PostgresSqlStoreConfig` | No | sqlite | SQL store configuration for file metadata |
|
||||
| `metadata_store` | `<class 'llama_stack.core.storage.datatypes.SqlStoreReference'>` | No | | SQL store configuration for file metadata |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
@ -32,6 +32,6 @@ aws_secret_access_key: ${env.AWS_SECRET_ACCESS_KEY:=}
|
|||
endpoint_url: ${env.S3_ENDPOINT_URL:=}
|
||||
auto_create_bucket: ${env.S3_AUTO_CREATE_BUCKET:=false}
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/s3_files_metadata.db
|
||||
table_name: s3_files_metadata
|
||||
backend: sql_default
|
||||
```
|
||||
|
|
|
@ -79,13 +79,13 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `db_path` | `<class 'str'>` | No | | |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Config for KV store backend |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
db_path: ${env.CHROMADB_PATH}
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/chroma_inline_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::chroma
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -95,12 +95,12 @@ more details about Faiss in general.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/faiss_store.db
|
||||
persistence:
|
||||
namespace: vector_io::faiss
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -14,14 +14,14 @@ Meta's reference implementation of a vector database.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/faiss_store.db
|
||||
persistence:
|
||||
namespace: vector_io::faiss
|
||||
backend: kv_default
|
||||
```
|
||||
## Deprecation Notice
|
||||
|
||||
|
|
|
@ -17,14 +17,14 @@ Please refer to the remote provider documentation.
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `db_path` | `<class 'str'>` | No | | |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend (SQLite only for now) |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Config for KV store backend (SQLite only for now) |
|
||||
| `consistency_level` | `<class 'str'>` | No | Strong | The consistency level of the Milvus server |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
db_path: ${env.MILVUS_DB_PATH:=~/.llama/dummy}/milvus.db
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/milvus_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::milvus
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -98,13 +98,13 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `path` | `<class 'str'>` | No | | |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
path: ${env.QDRANT_PATH:=~/.llama/~/.llama/dummy}/qdrant.db
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/qdrant_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::qdrant
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -28,7 +28,7 @@ description: |
|
|||
#### Empirical Example
|
||||
|
||||
Consider the histogram below in which 10,000 randomly generated strings were inserted
|
||||
in batches of 100 into both Faiss and sqlite-vec using `client.tool_runtime.rag_tool.insert()`.
|
||||
in batches of 100 into both Faiss and sqlite-vec.
|
||||
|
||||
```{image} ../../../../_static/providers/vector_io/write_time_comparison_sqlite-vec-faiss.png
|
||||
:alt: Comparison of SQLite-Vec and Faiss write times
|
||||
|
@ -233,7 +233,7 @@ Datasets that can fit in memory, frequent reads | Faiss | Optimized for speed, i
|
|||
#### Empirical Example
|
||||
|
||||
Consider the histogram below in which 10,000 randomly generated strings were inserted
|
||||
in batches of 100 into both Faiss and sqlite-vec using `client.tool_runtime.rag_tool.insert()`.
|
||||
in batches of 100 into both Faiss and sqlite-vec.
|
||||
|
||||
```{image} ../../../../_static/providers/vector_io/write_time_comparison_sqlite-vec-faiss.png
|
||||
:alt: Comparison of SQLite-Vec and Faiss write times
|
||||
|
@ -408,13 +408,13 @@ See [sqlite-vec's GitHub repo](https://github.com/asg017/sqlite-vec/tree/main) f
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `db_path` | `<class 'str'>` | No | | Path to the SQLite database file |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend (SQLite only for now) |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Config for KV store backend (SQLite only for now) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec.db
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::sqlite_vec
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -17,15 +17,15 @@ Please refer to the sqlite-vec provider documentation.
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `db_path` | `<class 'str'>` | No | | Path to the SQLite database file |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend (SQLite only for now) |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Config for KV store backend (SQLite only for now) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec.db
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/sqlite_vec_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::sqlite_vec
|
||||
backend: kv_default
|
||||
```
|
||||
## Deprecation Notice
|
||||
|
||||
|
|
|
@ -78,13 +78,13 @@ See [Chroma's documentation](https://docs.trychroma.com/docs/overview/introducti
|
|||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `url` | `str \| None` | No | | |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Config for KV store backend |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
url: ${env.CHROMADB_URL}
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/chroma_remote_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::chroma_remote
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -408,7 +408,7 @@ For more details on TLS configuration, refer to the [TLS setup guide](https://mi
|
|||
| `uri` | `<class 'str'>` | No | | The URI of the Milvus server |
|
||||
| `token` | `str \| None` | No | | The token of the Milvus server |
|
||||
| `consistency_level` | `<class 'str'>` | No | Strong | The consistency level of the Milvus server |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | Config for KV store backend |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Config for KV store backend |
|
||||
| `config` | `dict` | No | `{}` | This configuration allows additional fields to be passed through to the underlying Milvus client. See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general. |
|
||||
|
||||
:::note
|
||||
|
@ -420,7 +420,7 @@ This configuration class accepts additional fields beyond those listed above. Yo
|
|||
```yaml
|
||||
uri: ${env.MILVUS_ENDPOINT}
|
||||
token: ${env.MILVUS_TOKEN}
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/milvus_remote_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::milvus_remote
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -218,7 +218,7 @@ See [PGVector's documentation](https://github.com/pgvector/pgvector) for more de
|
|||
| `db` | `str \| None` | No | postgres | |
|
||||
| `user` | `str \| None` | No | postgres | |
|
||||
| `password` | `str \| None` | No | mysecretpassword | |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) |
|
||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
@ -228,7 +228,7 @@ port: ${env.PGVECTOR_PORT:=5432}
|
|||
db: ${env.PGVECTOR_DB}
|
||||
user: ${env.PGVECTOR_USER}
|
||||
password: ${env.PGVECTOR_PASSWORD}
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/pgvector_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::pgvector
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -26,13 +26,13 @@ Please refer to the inline provider documentation.
|
|||
| `prefix` | `str \| None` | No | | |
|
||||
| `timeout` | `int \| None` | No | | |
|
||||
| `host` | `str \| None` | No | | |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||
| `persistence` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
api_key: ${env.QDRANT_API_KEY:=}
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/qdrant_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::qdrant_remote
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -75,14 +75,14 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
|
|||
|-------|------|----------|---------|-------------|
|
||||
| `weaviate_api_key` | `str \| None` | No | | The API key for the Weaviate instance |
|
||||
| `weaviate_cluster_url` | `str \| None` | No | localhost:8080 | The URL of the Weaviate cluster |
|
||||
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) |
|
||||
| `persistence` | `llama_stack.core.storage.datatypes.KVStoreReference \| None` | No | | Config for KV store backend (SQLite only for now) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
weaviate_api_key: null
|
||||
weaviate_cluster_url: ${env.WEAVIATE_CLUSTER_URL:=localhost:8080}
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/weaviate_registry.db
|
||||
persistence:
|
||||
namespace: vector_io::weaviate
|
||||
backend: kv_default
|
||||
```
|
||||
|
|
|
@ -32,7 +32,6 @@ Commands:
|
|||
scoring_functions Manage scoring functions.
|
||||
shields Manage safety shield services.
|
||||
toolgroups Manage available tool groups.
|
||||
vector_dbs Manage vector databases.
|
||||
```
|
||||
|
||||
### `llama-stack-client configure`
|
||||
|
@ -211,53 +210,6 @@ Unregister a model from distribution endpoint
|
|||
llama-stack-client models unregister <model_id>
|
||||
```
|
||||
|
||||
## Vector DB Management
|
||||
Manage vector databases.
|
||||
|
||||
|
||||
### `llama-stack-client vector_dbs list`
|
||||
Show available vector dbs on distribution endpoint
|
||||
```bash
|
||||
llama-stack-client vector_dbs list
|
||||
```
|
||||
```
|
||||
┏━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
||||
┃ identifier ┃ provider_id ┃ provider_resource_id ┃ vector_db_type ┃ params ┃
|
||||
┡━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
|
||||
│ my_demo_vector_db │ faiss │ my_demo_vector_db │ │ embedding_dimension: 768 │
|
||||
│ │ │ │ │ embedding_model: nomic-embed-text-v1.5 │
|
||||
│ │ │ │ │ type: vector_db │
|
||||
│ │ │ │ │ │
|
||||
└──────────────────────────┴─────────────┴──────────────────────────┴────────────────┴───────────────────────────────────┘
|
||||
```
|
||||
|
||||
### `llama-stack-client vector_dbs register`
|
||||
Create a new vector db
|
||||
```bash
|
||||
llama-stack-client vector_dbs register <vector-db-id> [--provider-id <provider-id>] [--provider-vector-db-id <provider-vector-db-id>] [--embedding-model <embedding-model>] [--embedding-dimension <embedding-dimension>]
|
||||
```
|
||||
|
||||
|
||||
Required arguments:
|
||||
- `VECTOR_DB_ID`: Vector DB ID
|
||||
|
||||
Optional arguments:
|
||||
- `--provider-id`: Provider ID for the vector db
|
||||
- `--provider-vector-db-id`: Provider's vector db ID
|
||||
- `--embedding-model`: Embedding model to use. Default: `nomic-embed-text-v1.5`
|
||||
- `--embedding-dimension`: Dimension of embeddings. Default: 768
|
||||
|
||||
### `llama-stack-client vector_dbs unregister`
|
||||
Delete a vector db
|
||||
```bash
|
||||
llama-stack-client vector_dbs unregister <vector-db-id>
|
||||
```
|
||||
|
||||
|
||||
Required arguments:
|
||||
- `VECTOR_DB_ID`: Vector DB ID
|
||||
|
||||
|
||||
## Shield Management
|
||||
Manage safety shield services.
|
||||
### `llama-stack-client shields list`
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -2864,7 +2864,7 @@
|
|||
}
|
||||
],
|
||||
"source": [
|
||||
"!llama stack build --distro experimental-post-training --image-type venv --image-name __system__"
|
||||
"!llama stack list-deps experimental-post-training | xargs -L1 uv pip install"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
"source": [
|
||||
"# NBVAL_SKIP\n",
|
||||
"!pip install -U llama-stack\n",
|
||||
"!UV_SYSTEM_PYTHON=1 llama stack build --distro fireworks --image-type venv"
|
||||
"llama stack list-deps fireworks | xargs -L1 uv pip install\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -136,7 +136,8 @@
|
|||
" \"\"\"Build and run LlamaStack server in one step using --run flag\"\"\"\n",
|
||||
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
|
||||
" process = subprocess.Popen(\n",
|
||||
" \"uv run --with llama-stack llama stack build --distro starter --image-type venv --run\",\n",
|
||||
" \"uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install\",\n",
|
||||
" \"uv run --with llama-stack llama stack run starter\",\n",
|
||||
" shell=True,\n",
|
||||
" stdout=log_file,\n",
|
||||
" stderr=log_file,\n",
|
||||
|
@ -172,7 +173,7 @@
|
|||
"\n",
|
||||
"def kill_llama_stack_server():\n",
|
||||
" # Kill any existing llama stack server processes using pkill command\n",
|
||||
" os.system(\"pkill -f llama_stack.core.server.server\")"
|
||||
" os.system(\"pkill -f llama_stack.core.server.server\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
@ -105,7 +105,8 @@
|
|||
" \"\"\"Build and run LlamaStack server in one step using --run flag\"\"\"\n",
|
||||
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
|
||||
" process = subprocess.Popen(\n",
|
||||
" \"uv run --with llama-stack llama stack build --distro starter --image-type venv --run\",\n",
|
||||
" \"uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install\",\n",
|
||||
" \"uv run --with llama-stack llama stack run starter\",\n",
|
||||
" shell=True,\n",
|
||||
" stdout=log_file,\n",
|
||||
" stderr=log_file,\n",
|
||||
|
|
|
@ -92,7 +92,7 @@
|
|||
"metadata": {},
|
||||
"source": [
|
||||
"```bash\n",
|
||||
"LLAMA_STACK_DIR=$(pwd) llama stack build --distro nvidia --image-type venv\n",
|
||||
"uv run --with llama-stack llama stack list-deps nvidia | xargs -L1 uv pip install\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
"metadata": {},
|
||||
"source": [
|
||||
"```bash\n",
|
||||
"LLAMA_STACK_DIR=$(pwd) llama stack build --distro nvidia --image-type venv\n",
|
||||
"uv run --with llama-stack llama stack list-deps nvidia | xargs -L1 uv pip install\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -196,16 +196,10 @@ def _get_endpoint_functions(
|
|||
def _get_defining_class(member_fn: str, derived_cls: type) -> type:
|
||||
"Find the class in which a member function is first defined in a class inheritance hierarchy."
|
||||
|
||||
# This import must be dynamic here
|
||||
from llama_stack.apis.tools import RAGToolRuntime, ToolRuntime
|
||||
|
||||
# iterate in reverse member resolution order to find most specific class first
|
||||
for cls in reversed(inspect.getmro(derived_cls)):
|
||||
for name, _ in inspect.getmembers(cls, inspect.isfunction):
|
||||
if name == member_fn:
|
||||
# HACK ALERT
|
||||
if cls == RAGToolRuntime:
|
||||
return ToolRuntime
|
||||
return cls
|
||||
|
||||
raise ValidationError(
|
||||
|
|
|
@ -30,3 +30,5 @@ fi
|
|||
stack_dir=$(dirname $(dirname $THIS_DIR))
|
||||
PYTHONPATH=$PYTHONPATH:$stack_dir \
|
||||
python -m docs.openapi_generator.generate $(dirname $THIS_DIR)/static
|
||||
|
||||
cp $stack_dir/docs/static/stainless-llama-stack-spec.yaml $stack_dir/client-sdks/stainless/openapi.yml
|
||||
|
|
|
@ -1,366 +1,366 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c1e7571c",
|
||||
"metadata": {
|
||||
"id": "c1e7571c"
|
||||
},
|
||||
"source": [
|
||||
"[](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb)\n",
|
||||
"\n",
|
||||
"# Llama Stack - Building AI Applications\n",
|
||||
"\n",
|
||||
"<img src=\"https://llamastack.github.io/latest/_images/llama-stack.png\" alt=\"drawing\" width=\"500\"/>\n",
|
||||
"\n",
|
||||
"Get started with Llama Stack in minutes!\n",
|
||||
"\n",
|
||||
"[Llama Stack](https://github.com/meta-llama/llama-stack) is a stateful service with REST APIs to support the seamless transition of AI applications across different environments. You can build and test using a local server first and deploy to a hosted endpoint for production.\n",
|
||||
"\n",
|
||||
"In this guide, we'll walk through how to build a RAG application locally using Llama Stack with [Ollama](https://ollama.com/)\n",
|
||||
"as the inference [provider](docs/source/providers/index.md#inference) for a Llama Model.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4CV1Q19BDMVw",
|
||||
"metadata": {
|
||||
"id": "4CV1Q19BDMVw"
|
||||
},
|
||||
"source": [
|
||||
"## Step 1: Install and setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "K4AvfUAJZOeS",
|
||||
"metadata": {
|
||||
"id": "K4AvfUAJZOeS"
|
||||
},
|
||||
"source": [
|
||||
"### 1.1. Install uv and test inference with Ollama\n",
|
||||
"\n",
|
||||
"We'll install [uv](https://docs.astral.sh/uv/) to setup the Python virtual environment, along with [colab-xterm](https://github.com/InfuseAI/colab-xterm) for running command-line tools, and [Ollama](https://ollama.com/download) as the inference provider."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7a2d7b85",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install uv llama_stack llama-stack-client\n",
|
||||
"\n",
|
||||
"## If running on Collab:\n",
|
||||
"# !pip install colab-xterm\n",
|
||||
"# %load_ext colabxterm\n",
|
||||
"\n",
|
||||
"!curl https://ollama.ai/install.sh | sh"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39fa584b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1.2. Test inference with Ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3bf81522",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We’ll now launch a terminal and run inference on a Llama model with Ollama to verify that the model is working correctly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a7e8e0f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## If running on Colab:\n",
|
||||
"# %xterm\n",
|
||||
"\n",
|
||||
"## To be ran in the terminal:\n",
|
||||
"# ollama serve &\n",
|
||||
"# ollama run llama3.2:3b --keepalive 60m"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f3c5f243",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If successful, you should see the model respond to a prompt.\n",
|
||||
"\n",
|
||||
"...\n",
|
||||
"```\n",
|
||||
">>> hi\n",
|
||||
"Hello! How can I assist you today?\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "oDUB7M_qe-Gs",
|
||||
"metadata": {
|
||||
"id": "oDUB7M_qe-Gs"
|
||||
},
|
||||
"source": [
|
||||
"## Step 2: Run the Llama Stack server\n",
|
||||
"\n",
|
||||
"In this showcase, we will start a Llama Stack server that is running locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "732eadc6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.1. Setup the Llama Stack Server"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "J2kGed0R5PSf",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"collapsed": true,
|
||||
"id": "J2kGed0R5PSf",
|
||||
"outputId": "2478ea60-8d35-48a1-b011-f233831740c5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import subprocess\n",
|
||||
"\n",
|
||||
"if \"UV_SYSTEM_PYTHON\" in os.environ:\n",
|
||||
" del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
|
||||
"\n",
|
||||
"# this command installs all the dependencies needed for the llama stack server with the ollama inference provider\n",
|
||||
"!uv run --with llama-stack llama stack build --distro starter\n",
|
||||
"\n",
|
||||
"def run_llama_stack_server_background():\n",
|
||||
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
|
||||
" process = subprocess.Popen(\n",
|
||||
" f\"OLLAMA_URL=http://localhost:11434 uv run --with llama-stack llama stack run starter\n",
|
||||
" shell=True,\n",
|
||||
" stdout=log_file,\n",
|
||||
" stderr=log_file,\n",
|
||||
" text=True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(f\"Starting Llama Stack server with PID: {process.pid}\")\n",
|
||||
" return process\n",
|
||||
"\n",
|
||||
"def wait_for_server_to_start():\n",
|
||||
" import requests\n",
|
||||
" from requests.exceptions import ConnectionError\n",
|
||||
" import time\n",
|
||||
"\n",
|
||||
" url = \"http://0.0.0.0:8321/v1/health\"\n",
|
||||
" max_retries = 30\n",
|
||||
" retry_interval = 1\n",
|
||||
"\n",
|
||||
" print(\"Waiting for server to start\", end=\"\")\n",
|
||||
" for _ in range(max_retries):\n",
|
||||
" try:\n",
|
||||
" response = requests.get(url)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" print(\"\\nServer is ready!\")\n",
|
||||
" return True\n",
|
||||
" except ConnectionError:\n",
|
||||
" print(\".\", end=\"\", flush=True)\n",
|
||||
" time.sleep(retry_interval)\n",
|
||||
"\n",
|
||||
" print(\"\\nServer failed to start after\", max_retries * retry_interval, \"seconds\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# use this helper if needed to kill the server\n",
|
||||
"def kill_llama_stack_server():\n",
|
||||
" # Kill any existing llama stack server processes\n",
|
||||
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c40e9efd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.2. Start the Llama Stack Server"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f779283d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Starting Llama Stack server with PID: 787100\n",
|
||||
"Waiting for server to start\n",
|
||||
"Server is ready!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"server_process = run_llama_stack_server_background()\n",
|
||||
"assert wait_for_server_to_start()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28477c03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 3: Run the demo"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7da71011",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"rag_tool> Ingesting document: https://www.paulgraham.com/greatwork.html\n",
|
||||
"prompt> How do you do great work?\n",
|
||||
"\u001b[33minference> \u001b[0m\u001b[33m[k\u001b[0m\u001b[33mnowledge\u001b[0m\u001b[33m_search\u001b[0m\u001b[33m(query\u001b[0m\u001b[33m=\"\u001b[0m\u001b[33mWhat\u001b[0m\u001b[33m is\u001b[0m\u001b[33m the\u001b[0m\u001b[33m key\u001b[0m\u001b[33m to\u001b[0m\u001b[33m doing\u001b[0m\u001b[33m great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m\")]\u001b[0m\u001b[97m\u001b[0m\n",
|
||||
"\u001b[32mtool_execution> Tool:knowledge_search Args:{'query': 'What is the key to doing great work'}\u001b[0m\n",
|
||||
"\u001b[32mtool_execution> Tool:knowledge_search Response:[TextContentItem(text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n', type='text'), TextContentItem(text=\"Result 1:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 2:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 3:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 4:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 5:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text='END of knowledge_search tool results.\\n', type='text'), TextContentItem(text='The above results were retrieved to help answer the user\\'s query: \"What is the key to doing great work\". Use them as supporting information only in answering this query.\\n', type='text')]\u001b[0m\n",
|
||||
"\u001b[33minference> \u001b[0m\u001b[33mDoing\u001b[0m\u001b[33m great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m means\u001b[0m\u001b[33m doing\u001b[0m\u001b[33m something\u001b[0m\u001b[33m important\u001b[0m\u001b[33m so\u001b[0m\u001b[33m well\u001b[0m\u001b[33m that\u001b[0m\u001b[33m you\u001b[0m\u001b[33m expand\u001b[0m\u001b[33m people\u001b[0m\u001b[33m's\u001b[0m\u001b[33m ideas\u001b[0m\u001b[33m of\u001b[0m\u001b[33m what\u001b[0m\u001b[33m's\u001b[0m\u001b[33m possible\u001b[0m\u001b[33m.\u001b[0m\u001b[33m However\u001b[0m\u001b[33m,\u001b[0m\u001b[33m there\u001b[0m\u001b[33m's\u001b[0m\u001b[33m no\u001b[0m\u001b[33m threshold\u001b[0m\u001b[33m for\u001b[0m\u001b[33m importance\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m it\u001b[0m\u001b[33m's\u001b[0m\u001b[33m often\u001b[0m\u001b[33m hard\u001b[0m\u001b[33m to\u001b[0m\u001b[33m judge\u001b[0m\u001b[33m at\u001b[0m\u001b[33m the\u001b[0m\u001b[33m time\u001b[0m\u001b[33m anyway\u001b[0m\u001b[33m.\u001b[0m\u001b[33m Great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m is\u001b[0m\u001b[33m a\u001b[0m\u001b[33m matter\u001b[0m\u001b[33m of\u001b[0m\u001b[33m degree\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m it\u001b[0m\u001b[33m can\u001b[0m\u001b[33m be\u001b[0m\u001b[33m difficult\u001b[0m\u001b[33m to\u001b[0m\u001b[33m determine\u001b[0m\u001b[33m whether\u001b[0m\u001b[33m someone\u001b[0m\u001b[33m has\u001b[0m\u001b[33m done\u001b[0m\u001b[33m great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m until\u001b[0m\u001b[33m after\u001b[0m\u001b[33m the\u001b[0m\u001b[33m fact\u001b[0m\u001b[33m.\u001b[0m\u001b[97m\u001b[0m\n",
|
||||
"\u001b[30m\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient\n",
|
||||
"\n",
|
||||
"vector_db_id = \"my_demo_vector_db\"\n",
|
||||
"client = LlamaStackClient(base_url=\"http://0.0.0.0:8321\")\n",
|
||||
"\n",
|
||||
"models = client.models.list()\n",
|
||||
"\n",
|
||||
"# Select the first ollama and first ollama's embedding model\n",
|
||||
"model_id = next(m for m in models if m.model_type == \"llm\" and m.provider_id == \"ollama\").identifier\n",
|
||||
"embedding_model = next(m for m in models if m.model_type == \"embedding\" and m.provider_id == \"ollama\")\n",
|
||||
"embedding_model_id = embedding_model.identifier\n",
|
||||
"embedding_dimension = embedding_model.metadata[\"embedding_dimension\"]\n",
|
||||
"\n",
|
||||
"_ = client.vector_dbs.register(\n",
|
||||
" vector_db_id=vector_db_id,\n",
|
||||
" embedding_model=embedding_model_id,\n",
|
||||
" embedding_dimension=embedding_dimension,\n",
|
||||
" provider_id=\"faiss\",\n",
|
||||
")\n",
|
||||
"source = \"https://www.paulgraham.com/greatwork.html\"\n",
|
||||
"print(\"rag_tool> Ingesting document:\", source)\n",
|
||||
"document = RAGDocument(\n",
|
||||
" document_id=\"document_1\",\n",
|
||||
" content=source,\n",
|
||||
" mime_type=\"text/html\",\n",
|
||||
" metadata={},\n",
|
||||
")\n",
|
||||
"client.tool_runtime.rag_tool.insert(\n",
|
||||
" documents=[document],\n",
|
||||
" vector_db_id=vector_db_id,\n",
|
||||
" chunk_size_in_tokens=50,\n",
|
||||
")\n",
|
||||
"agent = Agent(\n",
|
||||
" client,\n",
|
||||
" model=model_id,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"name\": \"builtin::rag/knowledge_search\",\n",
|
||||
" \"args\": {\"vector_db_ids\": [vector_db_id]},\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = \"How do you do great work?\"\n",
|
||||
"print(\"prompt>\", prompt)\n",
|
||||
"\n",
|
||||
"response = agent.create_turn(\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": prompt}],\n",
|
||||
" session_id=agent.create_session(\"rag_session\"),\n",
|
||||
" stream=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for log in AgentEventLogger().log(response):\n",
|
||||
" log.print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "341aaadf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Congratulations! You've successfully built your first RAG application using Llama Stack! 🎉🥳"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e88e1185",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next Steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bcb73600",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you're ready to dive deeper into Llama Stack!\n",
|
||||
"- Explore the [Detailed Tutorial](./detailed_tutorial.md).\n",
|
||||
"- Try the [Getting Started Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb).\n",
|
||||
"- Browse more [Notebooks on GitHub](https://github.com/meta-llama/llama-stack/tree/main/docs/notebooks).\n",
|
||||
"- Learn about Llama Stack [Concepts](../concepts/index.md).\n",
|
||||
"- Discover how to [Build Llama Stacks](../distributions/index.md).\n",
|
||||
"- Refer to our [References](../references/index.md) for details on the Llama CLI and Python SDK.\n",
|
||||
"- Check out the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repository for example applications and tutorials."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"gpuType": "T4",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c1e7571c",
|
||||
"metadata": {
|
||||
"id": "c1e7571c"
|
||||
},
|
||||
"source": [
|
||||
"[](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb)\n",
|
||||
"\n",
|
||||
"# Llama Stack - Building AI Applications\n",
|
||||
"\n",
|
||||
"<img src=\"https://llamastack.github.io/latest/_images/llama-stack.png\" alt=\"drawing\" width=\"500\"/>\n",
|
||||
"\n",
|
||||
"Get started with Llama Stack in minutes!\n",
|
||||
"\n",
|
||||
"[Llama Stack](https://github.com/meta-llama/llama-stack) is a stateful service with REST APIs to support the seamless transition of AI applications across different environments. You can build and test using a local server first and deploy to a hosted endpoint for production.\n",
|
||||
"\n",
|
||||
"In this guide, we'll walk through how to build a RAG application locally using Llama Stack with [Ollama](https://ollama.com/)\n",
|
||||
"as the inference [provider](docs/source/providers/index.md#inference) for a Llama Model.\n"
|
||||
]
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4CV1Q19BDMVw",
|
||||
"metadata": {
|
||||
"id": "4CV1Q19BDMVw"
|
||||
},
|
||||
"source": [
|
||||
"## Step 1: Install and setup"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "K4AvfUAJZOeS",
|
||||
"metadata": {
|
||||
"id": "K4AvfUAJZOeS"
|
||||
},
|
||||
"source": [
|
||||
"### 1.1. Install uv and test inference with Ollama\n",
|
||||
"\n",
|
||||
"We'll install [uv](https://docs.astral.sh/uv/) to setup the Python virtual environment, along with [colab-xterm](https://github.com/InfuseAI/colab-xterm) for running command-line tools, and [Ollama](https://ollama.com/download) as the inference provider."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "7a2d7b85",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%pip install uv llama_stack llama-stack-client\n",
|
||||
"\n",
|
||||
"## If running on Collab:\n",
|
||||
"# !pip install colab-xterm\n",
|
||||
"# %load_ext colabxterm\n",
|
||||
"\n",
|
||||
"!curl https://ollama.ai/install.sh | sh"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "39fa584b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1.2. Test inference with Ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "3bf81522",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We’ll now launch a terminal and run inference on a Llama model with Ollama to verify that the model is working correctly."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a7e8e0f1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## If running on Colab:\n",
|
||||
"# %xterm\n",
|
||||
"\n",
|
||||
"## To be ran in the terminal:\n",
|
||||
"# ollama serve &\n",
|
||||
"# ollama run llama3.2:3b --keepalive 60m"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f3c5f243",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"If successful, you should see the model respond to a prompt.\n",
|
||||
"\n",
|
||||
"...\n",
|
||||
"```\n",
|
||||
">>> hi\n",
|
||||
"Hello! How can I assist you today?\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "oDUB7M_qe-Gs",
|
||||
"metadata": {
|
||||
"id": "oDUB7M_qe-Gs"
|
||||
},
|
||||
"source": [
|
||||
"## Step 2: Run the Llama Stack server\n",
|
||||
"\n",
|
||||
"In this showcase, we will start a Llama Stack server that is running locally."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "732eadc6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.1. Setup the Llama Stack Server"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "J2kGed0R5PSf",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"collapsed": true,
|
||||
"id": "J2kGed0R5PSf",
|
||||
"outputId": "2478ea60-8d35-48a1-b011-f233831740c5"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import subprocess\n",
|
||||
"\n",
|
||||
"if \"UV_SYSTEM_PYTHON\" in os.environ:\n",
|
||||
" del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
|
||||
"\n",
|
||||
"# this command installs all the dependencies needed for the llama stack server with the ollama inference provider\n",
|
||||
"!uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install\n",
|
||||
"\n",
|
||||
"def run_llama_stack_server_background():\n",
|
||||
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
|
||||
" process = subprocess.Popen(\n",
|
||||
" f\"OLLAMA_URL=http://localhost:11434 uv run --with llama-stack llama stack run starter\n",
|
||||
" shell=True,\n",
|
||||
" stdout=log_file,\n",
|
||||
" stderr=log_file,\n",
|
||||
" text=True\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(f\"Starting Llama Stack server with PID: {process.pid}\")\n",
|
||||
" return process\n",
|
||||
"\n",
|
||||
"def wait_for_server_to_start():\n",
|
||||
" import requests\n",
|
||||
" from requests.exceptions import ConnectionError\n",
|
||||
" import time\n",
|
||||
"\n",
|
||||
" url = \"http://0.0.0.0:8321/v1/health\"\n",
|
||||
" max_retries = 30\n",
|
||||
" retry_interval = 1\n",
|
||||
"\n",
|
||||
" print(\"Waiting for server to start\", end=\"\")\n",
|
||||
" for _ in range(max_retries):\n",
|
||||
" try:\n",
|
||||
" response = requests.get(url)\n",
|
||||
" if response.status_code == 200:\n",
|
||||
" print(\"\\nServer is ready!\")\n",
|
||||
" return True\n",
|
||||
" except ConnectionError:\n",
|
||||
" print(\".\", end=\"\", flush=True)\n",
|
||||
" time.sleep(retry_interval)\n",
|
||||
"\n",
|
||||
" print(\"\\nServer failed to start after\", max_retries * retry_interval, \"seconds\")\n",
|
||||
" return False\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# use this helper if needed to kill the server\n",
|
||||
"def kill_llama_stack_server():\n",
|
||||
" # Kill any existing llama stack server processes\n",
|
||||
" os.system(\"ps aux | grep -v grep | grep llama_stack.core.server.server | awk '{print $2}' | xargs kill -9\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c40e9efd",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 2.2. Start the Llama Stack Server"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "f779283d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Starting Llama Stack server with PID: 787100\n",
|
||||
"Waiting for server to start\n",
|
||||
"Server is ready!\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"server_process = run_llama_stack_server_background()\n",
|
||||
"assert wait_for_server_to_start()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "28477c03",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Step 3: Run the demo"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "7da71011",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"rag_tool> Ingesting document: https://www.paulgraham.com/greatwork.html\n",
|
||||
"prompt> How do you do great work?\n",
|
||||
"\u001b[33minference> \u001b[0m\u001b[33m[k\u001b[0m\u001b[33mnowledge\u001b[0m\u001b[33m_search\u001b[0m\u001b[33m(query\u001b[0m\u001b[33m=\"\u001b[0m\u001b[33mWhat\u001b[0m\u001b[33m is\u001b[0m\u001b[33m the\u001b[0m\u001b[33m key\u001b[0m\u001b[33m to\u001b[0m\u001b[33m doing\u001b[0m\u001b[33m great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m\")]\u001b[0m\u001b[97m\u001b[0m\n",
|
||||
"\u001b[32mtool_execution> Tool:knowledge_search Args:{'query': 'What is the key to doing great work'}\u001b[0m\n",
|
||||
"\u001b[32mtool_execution> Tool:knowledge_search Response:[TextContentItem(text='knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n', type='text'), TextContentItem(text=\"Result 1:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 2:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 3:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 4:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text=\"Result 5:\\nDocument_id:docum\\nContent: work. Doing great work means doing something important\\nso well that you expand people's ideas of what's possible. But\\nthere's no threshold for importance. It's a matter of degree, and\\noften hard to judge at the time anyway.\\n\", type='text'), TextContentItem(text='END of knowledge_search tool results.\\n', type='text'), TextContentItem(text='The above results were retrieved to help answer the user\\'s query: \"What is the key to doing great work\". Use them as supporting information only in answering this query.\\n', type='text')]\u001b[0m\n",
|
||||
"\u001b[33minference> \u001b[0m\u001b[33mDoing\u001b[0m\u001b[33m great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m means\u001b[0m\u001b[33m doing\u001b[0m\u001b[33m something\u001b[0m\u001b[33m important\u001b[0m\u001b[33m so\u001b[0m\u001b[33m well\u001b[0m\u001b[33m that\u001b[0m\u001b[33m you\u001b[0m\u001b[33m expand\u001b[0m\u001b[33m people\u001b[0m\u001b[33m's\u001b[0m\u001b[33m ideas\u001b[0m\u001b[33m of\u001b[0m\u001b[33m what\u001b[0m\u001b[33m's\u001b[0m\u001b[33m possible\u001b[0m\u001b[33m.\u001b[0m\u001b[33m However\u001b[0m\u001b[33m,\u001b[0m\u001b[33m there\u001b[0m\u001b[33m's\u001b[0m\u001b[33m no\u001b[0m\u001b[33m threshold\u001b[0m\u001b[33m for\u001b[0m\u001b[33m importance\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m it\u001b[0m\u001b[33m's\u001b[0m\u001b[33m often\u001b[0m\u001b[33m hard\u001b[0m\u001b[33m to\u001b[0m\u001b[33m judge\u001b[0m\u001b[33m at\u001b[0m\u001b[33m the\u001b[0m\u001b[33m time\u001b[0m\u001b[33m anyway\u001b[0m\u001b[33m.\u001b[0m\u001b[33m Great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m is\u001b[0m\u001b[33m a\u001b[0m\u001b[33m matter\u001b[0m\u001b[33m of\u001b[0m\u001b[33m degree\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m it\u001b[0m\u001b[33m can\u001b[0m\u001b[33m be\u001b[0m\u001b[33m difficult\u001b[0m\u001b[33m to\u001b[0m\u001b[33m determine\u001b[0m\u001b[33m whether\u001b[0m\u001b[33m someone\u001b[0m\u001b[33m has\u001b[0m\u001b[33m done\u001b[0m\u001b[33m great\u001b[0m\u001b[33m work\u001b[0m\u001b[33m until\u001b[0m\u001b[33m after\u001b[0m\u001b[33m the\u001b[0m\u001b[33m fact\u001b[0m\u001b[33m.\u001b[0m\u001b[97m\u001b[0m\n",
|
||||
"\u001b[30m\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient\n",
|
||||
"\n",
|
||||
"vector_db_id = \"my_demo_vector_db\"\n",
|
||||
"client = LlamaStackClient(base_url=\"http://0.0.0.0:8321\")\n",
|
||||
"\n",
|
||||
"models = client.models.list()\n",
|
||||
"\n",
|
||||
"# Select the first ollama and first ollama's embedding model\n",
|
||||
"model_id = next(m for m in models if m.model_type == \"llm\" and m.provider_id == \"ollama\").identifier\n",
|
||||
"embedding_model = next(m for m in models if m.model_type == \"embedding\" and m.provider_id == \"ollama\")\n",
|
||||
"embedding_model_id = embedding_model.identifier\n",
|
||||
"embedding_dimension = embedding_model.metadata[\"embedding_dimension\"]\n",
|
||||
"\n",
|
||||
"_ = client.vector_dbs.register(\n",
|
||||
" vector_db_id=vector_db_id,\n",
|
||||
" embedding_model=embedding_model_id,\n",
|
||||
" embedding_dimension=embedding_dimension,\n",
|
||||
" provider_id=\"faiss\",\n",
|
||||
")\n",
|
||||
"source = \"https://www.paulgraham.com/greatwork.html\"\n",
|
||||
"print(\"rag_tool> Ingesting document:\", source)\n",
|
||||
"document = RAGDocument(\n",
|
||||
" document_id=\"document_1\",\n",
|
||||
" content=source,\n",
|
||||
" mime_type=\"text/html\",\n",
|
||||
" metadata={},\n",
|
||||
")\n",
|
||||
"client.tool_runtime.rag_tool.insert(\n",
|
||||
" documents=[document],\n",
|
||||
" vector_db_id=vector_db_id,\n",
|
||||
" chunk_size_in_tokens=50,\n",
|
||||
")\n",
|
||||
"agent = Agent(\n",
|
||||
" client,\n",
|
||||
" model=model_id,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" tools=[\n",
|
||||
" {\n",
|
||||
" \"name\": \"builtin::rag/knowledge_search\",\n",
|
||||
" \"args\": {\"vector_db_ids\": [vector_db_id]},\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"prompt = \"How do you do great work?\"\n",
|
||||
"print(\"prompt>\", prompt)\n",
|
||||
"\n",
|
||||
"response = agent.create_turn(\n",
|
||||
" messages=[{\"role\": \"user\", \"content\": prompt}],\n",
|
||||
" session_id=agent.create_session(\"rag_session\"),\n",
|
||||
" stream=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for log in AgentEventLogger().log(response):\n",
|
||||
" log.print()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "341aaadf",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Congratulations! You've successfully built your first RAG application using Llama Stack! 🎉🥳"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e88e1185",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Next Steps"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "bcb73600",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now you're ready to dive deeper into Llama Stack!\n",
|
||||
"- Explore the [Detailed Tutorial](./detailed_tutorial.md).\n",
|
||||
"- Try the [Getting Started Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb).\n",
|
||||
"- Browse more [Notebooks on GitHub](https://github.com/meta-llama/llama-stack/tree/main/docs/notebooks).\n",
|
||||
"- Learn about Llama Stack [Concepts](../concepts/index.md).\n",
|
||||
"- Discover how to [Build Llama Stacks](../distributions/index.md).\n",
|
||||
"- Refer to our [References](../references/index.md) for details on the Llama CLI and Python SDK.\n",
|
||||
"- Check out the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main/examples) repository for example applications and tutorials."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"accelerator": "GPU",
|
||||
"colab": {
|
||||
"gpuType": "T4",
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
|
|
@ -47,11 +47,11 @@ function QuickStart() {
|
|||
<pre><code>{`# Install uv and start Ollama
|
||||
ollama run llama3.2:3b --keepalive 60m
|
||||
|
||||
# Install server dependencies
|
||||
uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install
|
||||
|
||||
# Run Llama Stack server
|
||||
OLLAMA_URL=http://localhost:11434 \\
|
||||
uv run --with llama-stack \\
|
||||
llama stack build --distro starter \\
|
||||
--image-type venv --run
|
||||
OLLAMA_URL=http://localhost:11434 uv run --with llama-stack llama stack run starter
|
||||
|
||||
# Try the Python SDK
|
||||
from llama_stack_client import LlamaStackClient
|
||||
|
|
16
docs/static/deprecated-llama-stack-spec.html
vendored
16
docs/static/deprecated-llama-stack-spec.html
vendored
|
@ -5547,7 +5547,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -5798,7 +5798,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -9024,6 +9024,10 @@
|
|||
"$ref": "#/components/schemas/OpenAIResponseUsage",
|
||||
"description": "(Optional) Token usage information for the response"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string",
|
||||
"description": "(Optional) System message inserted into the model's context"
|
||||
},
|
||||
"input": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
@ -9901,6 +9905,10 @@
|
|||
"usage": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseUsage",
|
||||
"description": "(Optional) Token usage information for the response"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string",
|
||||
"description": "(Optional) System message inserted into the model's context"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -13449,8 +13457,8 @@
|
|||
},
|
||||
{
|
||||
"name": "Eval",
|
||||
"description": "",
|
||||
"x-displayName": "Llama Stack Evaluation API for running evaluations on model and agent candidates."
|
||||
"description": "Llama Stack Evaluation API for running evaluations on model and agent candidates.",
|
||||
"x-displayName": "Evaluations"
|
||||
},
|
||||
{
|
||||
"name": "Files",
|
||||
|
|
16
docs/static/deprecated-llama-stack-spec.yaml
vendored
16
docs/static/deprecated-llama-stack-spec.yaml
vendored
|
@ -4114,7 +4114,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -4303,7 +4303,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -6734,6 +6734,10 @@ components:
|
|||
$ref: '#/components/schemas/OpenAIResponseUsage'
|
||||
description: >-
|
||||
(Optional) Token usage information for the response
|
||||
instructions:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) System message inserted into the model's context
|
||||
input:
|
||||
type: array
|
||||
items:
|
||||
|
@ -7403,6 +7407,10 @@ components:
|
|||
$ref: '#/components/schemas/OpenAIResponseUsage'
|
||||
description: >-
|
||||
(Optional) Token usage information for the response
|
||||
instructions:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) System message inserted into the model's context
|
||||
additionalProperties: false
|
||||
required:
|
||||
- created_at
|
||||
|
@ -10196,9 +10204,9 @@ tags:
|
|||
- name: Datasets
|
||||
description: ''
|
||||
- name: Eval
|
||||
description: ''
|
||||
x-displayName: >-
|
||||
description: >-
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates.
|
||||
x-displayName: Evaluations
|
||||
- name: Files
|
||||
description: >-
|
||||
This API is used to upload documents that can be used with other Llama Stack
|
||||
|
|
|
@ -1850,7 +1850,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -3983,7 +3983,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -5518,8 +5518,8 @@
|
|||
},
|
||||
{
|
||||
"name": "Eval",
|
||||
"description": "",
|
||||
"x-displayName": "Llama Stack Evaluation API for running evaluations on model and agent candidates."
|
||||
"description": "Llama Stack Evaluation API for running evaluations on model and agent candidates.",
|
||||
"x-displayName": "Evaluations"
|
||||
},
|
||||
{
|
||||
"name": "PostTraining (Coming Soon)",
|
||||
|
|
|
@ -1320,7 +1320,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -2927,7 +2927,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -4119,9 +4119,9 @@ tags:
|
|||
- name: Datasets
|
||||
description: ''
|
||||
- name: Eval
|
||||
description: ''
|
||||
x-displayName: >-
|
||||
description: >-
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates.
|
||||
x-displayName: Evaluations
|
||||
- name: PostTraining (Coming Soon)
|
||||
description: ''
|
||||
x-tagGroups:
|
||||
|
|
473
docs/static/llama-stack-spec.html
vendored
473
docs/static/llama-stack-spec.html
vendored
|
@ -282,7 +282,7 @@
|
|||
"Conversations"
|
||||
],
|
||||
"summary": "Create a conversation.",
|
||||
"description": "Create a conversation.",
|
||||
"description": "Create a conversation.\nCreate a conversation.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
|
@ -326,8 +326,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Get a conversation with the given ID.",
|
||||
"description": "Get a conversation with the given ID.",
|
||||
"summary": "Retrieve a conversation.",
|
||||
"description": "Retrieve a conversation.\nGet a conversation with the given ID.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -369,8 +369,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Update a conversation's metadata with the given ID.",
|
||||
"description": "Update a conversation's metadata with the given ID.",
|
||||
"summary": "Update a conversation.",
|
||||
"description": "Update a conversation.\nUpdate a conversation's metadata with the given ID.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -422,8 +422,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Delete a conversation with the given ID.",
|
||||
"description": "Delete a conversation with the given ID.",
|
||||
"summary": "Delete a conversation.",
|
||||
"description": "Delete a conversation.\nDelete a conversation with the given ID.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -467,8 +467,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "List items in the conversation.",
|
||||
"description": "List items in the conversation.",
|
||||
"summary": "List items.",
|
||||
"description": "List items.\nList items in the conversation.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -597,8 +597,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Create items in the conversation.",
|
||||
"description": "Create items in the conversation.",
|
||||
"summary": "Create items.",
|
||||
"description": "Create items.\nCreate items in the conversation.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -652,8 +652,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Retrieve a conversation item.",
|
||||
"description": "Retrieve a conversation item.",
|
||||
"summary": "Retrieve an item.",
|
||||
"description": "Retrieve an item.\nRetrieve a conversation item.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -704,8 +704,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Delete a conversation item.",
|
||||
"description": "Delete a conversation item.",
|
||||
"summary": "Delete an item.",
|
||||
"description": "Delete an item.\nDelete a conversation item.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -2624,89 +2624,6 @@
|
|||
"deprecated": false
|
||||
}
|
||||
},
|
||||
"/v1/tool-runtime/rag-tool/insert": {
|
||||
"post": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/BadRequest400"
|
||||
},
|
||||
"429": {
|
||||
"$ref": "#/components/responses/TooManyRequests429"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/InternalServerError500"
|
||||
},
|
||||
"default": {
|
||||
"$ref": "#/components/responses/DefaultError"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"ToolRuntime"
|
||||
],
|
||||
"summary": "Index documents so they can be used by the RAG system.",
|
||||
"description": "Index documents so they can be used by the RAG system.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/InsertRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"deprecated": false
|
||||
}
|
||||
},
|
||||
"/v1/tool-runtime/rag-tool/query": {
|
||||
"post": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "RAGQueryResult containing the retrieved content and metadata",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RAGQueryResult"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/BadRequest400"
|
||||
},
|
||||
"429": {
|
||||
"$ref": "#/components/responses/TooManyRequests429"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/InternalServerError500"
|
||||
},
|
||||
"default": {
|
||||
"$ref": "#/components/responses/DefaultError"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"ToolRuntime"
|
||||
],
|
||||
"summary": "Query the RAG system for context; typically invoked by the agent.",
|
||||
"description": "Query the RAG system for context; typically invoked by the agent.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/QueryRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"deprecated": false
|
||||
}
|
||||
},
|
||||
"/v1/toolgroups": {
|
||||
"get": {
|
||||
"responses": {
|
||||
|
@ -6800,7 +6717,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -7600,6 +7517,10 @@
|
|||
"$ref": "#/components/schemas/OpenAIResponseUsage",
|
||||
"description": "(Optional) Token usage information for the response"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string",
|
||||
"description": "(Optional) System message inserted into the model's context"
|
||||
},
|
||||
"input": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
@ -8148,6 +8069,10 @@
|
|||
"usage": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseUsage",
|
||||
"description": "(Optional) Token usage information for the response"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string",
|
||||
"description": "(Optional) System message inserted into the model's context"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -10197,7 +10122,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -10679,7 +10604,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -11375,346 +11300,6 @@
|
|||
"title": "ListToolDefsResponse",
|
||||
"description": "Response containing a list of tool definitions."
|
||||
},
|
||||
"RAGDocument": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"document_id": {
|
||||
"type": "string",
|
||||
"description": "The unique identifier for the document."
|
||||
},
|
||||
"content": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/InterleavedContentItem"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/InterleavedContentItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/URL"
|
||||
}
|
||||
],
|
||||
"description": "The content of the document."
|
||||
},
|
||||
"mime_type": {
|
||||
"type": "string",
|
||||
"description": "The MIME type of the document."
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "null"
|
||||
},
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Additional metadata for the document."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"document_id",
|
||||
"content",
|
||||
"metadata"
|
||||
],
|
||||
"title": "RAGDocument",
|
||||
"description": "A document to be used for document ingestion in the RAG Tool."
|
||||
},
|
||||
"InsertRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"documents": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/RAGDocument"
|
||||
},
|
||||
"description": "List of documents to index in the RAG system"
|
||||
},
|
||||
"vector_db_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the vector database to store the document embeddings"
|
||||
},
|
||||
"chunk_size_in_tokens": {
|
||||
"type": "integer",
|
||||
"description": "(Optional) Size in tokens for document chunking during indexing"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"documents",
|
||||
"vector_db_id",
|
||||
"chunk_size_in_tokens"
|
||||
],
|
||||
"title": "InsertRequest"
|
||||
},
|
||||
"DefaultRAGQueryGeneratorConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "default",
|
||||
"default": "default",
|
||||
"description": "Type of query generator, always 'default'"
|
||||
},
|
||||
"separator": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "String separator used to join query terms"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"separator"
|
||||
],
|
||||
"title": "DefaultRAGQueryGeneratorConfig",
|
||||
"description": "Configuration for the default RAG query generator."
|
||||
},
|
||||
"LLMRAGQueryGeneratorConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "llm",
|
||||
"default": "llm",
|
||||
"description": "Type of query generator, always 'llm'"
|
||||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "Name of the language model to use for query generation"
|
||||
},
|
||||
"template": {
|
||||
"type": "string",
|
||||
"description": "Template string for formatting the query generation prompt"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"model",
|
||||
"template"
|
||||
],
|
||||
"title": "LLMRAGQueryGeneratorConfig",
|
||||
"description": "Configuration for the LLM-based RAG query generator."
|
||||
},
|
||||
"RAGQueryConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query_generator_config": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/DefaultRAGQueryGeneratorConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/LLMRAGQueryGeneratorConfig"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"default": "#/components/schemas/DefaultRAGQueryGeneratorConfig",
|
||||
"llm": "#/components/schemas/LLMRAGQueryGeneratorConfig"
|
||||
}
|
||||
},
|
||||
"description": "Configuration for the query generator."
|
||||
},
|
||||
"max_tokens_in_context": {
|
||||
"type": "integer",
|
||||
"default": 4096,
|
||||
"description": "Maximum number of tokens in the context."
|
||||
},
|
||||
"max_chunks": {
|
||||
"type": "integer",
|
||||
"default": 5,
|
||||
"description": "Maximum number of chunks to retrieve."
|
||||
},
|
||||
"chunk_template": {
|
||||
"type": "string",
|
||||
"default": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n",
|
||||
"description": "Template for formatting each retrieved chunk in the context. Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict). Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n\""
|
||||
},
|
||||
"mode": {
|
||||
"$ref": "#/components/schemas/RAGSearchMode",
|
||||
"default": "vector",
|
||||
"description": "Search mode for retrieval—either \"vector\", \"keyword\", or \"hybrid\". Default \"vector\"."
|
||||
},
|
||||
"ranker": {
|
||||
"$ref": "#/components/schemas/Ranker",
|
||||
"description": "Configuration for the ranker to use in hybrid search. Defaults to RRF ranker."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"query_generator_config",
|
||||
"max_tokens_in_context",
|
||||
"max_chunks",
|
||||
"chunk_template"
|
||||
],
|
||||
"title": "RAGQueryConfig",
|
||||
"description": "Configuration for the RAG query generation."
|
||||
},
|
||||
"RAGSearchMode": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"vector",
|
||||
"keyword",
|
||||
"hybrid"
|
||||
],
|
||||
"title": "RAGSearchMode",
|
||||
"description": "Search modes for RAG query retrieval: - VECTOR: Uses vector similarity search for semantic matching - KEYWORD: Uses keyword-based search for exact matching - HYBRID: Combines both vector and keyword search for better results"
|
||||
},
|
||||
"RRFRanker": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "rrf",
|
||||
"default": "rrf",
|
||||
"description": "The type of ranker, always \"rrf\""
|
||||
},
|
||||
"impact_factor": {
|
||||
"type": "number",
|
||||
"default": 60.0,
|
||||
"description": "The impact factor for RRF scoring. Higher values give more weight to higher-ranked results. Must be greater than 0"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"impact_factor"
|
||||
],
|
||||
"title": "RRFRanker",
|
||||
"description": "Reciprocal Rank Fusion (RRF) ranker configuration."
|
||||
},
|
||||
"Ranker": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/RRFRanker"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/WeightedRanker"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"rrf": "#/components/schemas/RRFRanker",
|
||||
"weighted": "#/components/schemas/WeightedRanker"
|
||||
}
|
||||
}
|
||||
},
|
||||
"WeightedRanker": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "weighted",
|
||||
"default": "weighted",
|
||||
"description": "The type of ranker, always \"weighted\""
|
||||
},
|
||||
"alpha": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use vector scores, values in between blend both scores."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"alpha"
|
||||
],
|
||||
"title": "WeightedRanker",
|
||||
"description": "Weighted ranker configuration that combines vector and keyword scores."
|
||||
},
|
||||
"QueryRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"$ref": "#/components/schemas/InterleavedContent",
|
||||
"description": "The query content to search for in the indexed documents"
|
||||
},
|
||||
"vector_db_ids": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "List of vector database IDs to search within"
|
||||
},
|
||||
"query_config": {
|
||||
"$ref": "#/components/schemas/RAGQueryConfig",
|
||||
"description": "(Optional) Configuration parameters for the query operation"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"content",
|
||||
"vector_db_ids"
|
||||
],
|
||||
"title": "QueryRequest"
|
||||
},
|
||||
"RAGQueryResult": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"$ref": "#/components/schemas/InterleavedContent",
|
||||
"description": "(Optional) The retrieved content from the query"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "null"
|
||||
},
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Additional metadata about the query result"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"metadata"
|
||||
],
|
||||
"title": "RAGQueryResult",
|
||||
"description": "Result of a RAG query containing retrieved content and metadata."
|
||||
},
|
||||
"ToolGroup": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -11732,7 +11317,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -13251,8 +12836,8 @@
|
|||
},
|
||||
{
|
||||
"name": "Conversations",
|
||||
"description": "",
|
||||
"x-displayName": "Protocol for conversation management operations."
|
||||
"description": "Protocol for conversation management operations.",
|
||||
"x-displayName": "Conversations"
|
||||
},
|
||||
{
|
||||
"name": "Files",
|
||||
|
|
403
docs/static/llama-stack-spec.yaml
vendored
403
docs/static/llama-stack-spec.yaml
vendored
|
@ -192,7 +192,10 @@ paths:
|
|||
tags:
|
||||
- Conversations
|
||||
summary: Create a conversation.
|
||||
description: Create a conversation.
|
||||
description: >-
|
||||
Create a conversation.
|
||||
|
||||
Create a conversation.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
|
@ -222,8 +225,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Get a conversation with the given ID.
|
||||
description: Get a conversation with the given ID.
|
||||
summary: Retrieve a conversation.
|
||||
description: >-
|
||||
Retrieve a conversation.
|
||||
|
||||
Get a conversation with the given ID.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -252,9 +258,10 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: >-
|
||||
Update a conversation's metadata with the given ID.
|
||||
summary: Update a conversation.
|
||||
description: >-
|
||||
Update a conversation.
|
||||
|
||||
Update a conversation's metadata with the given ID.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
|
@ -290,8 +297,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Delete a conversation with the given ID.
|
||||
description: Delete a conversation with the given ID.
|
||||
summary: Delete a conversation.
|
||||
description: >-
|
||||
Delete a conversation.
|
||||
|
||||
Delete a conversation with the given ID.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -321,8 +331,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: List items in the conversation.
|
||||
description: List items in the conversation.
|
||||
summary: List items.
|
||||
description: >-
|
||||
List items.
|
||||
|
||||
List items in the conversation.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -495,8 +508,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Create items in the conversation.
|
||||
description: Create items in the conversation.
|
||||
summary: Create items.
|
||||
description: >-
|
||||
Create items.
|
||||
|
||||
Create items in the conversation.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -532,8 +548,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Retrieve a conversation item.
|
||||
description: Retrieve a conversation item.
|
||||
summary: Retrieve an item.
|
||||
description: >-
|
||||
Retrieve an item.
|
||||
|
||||
Retrieve a conversation item.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -568,8 +587,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Delete a conversation item.
|
||||
description: Delete a conversation item.
|
||||
summary: Delete an item.
|
||||
description: >-
|
||||
Delete an item.
|
||||
|
||||
Delete a conversation item.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -2014,69 +2036,6 @@ paths:
|
|||
schema:
|
||||
$ref: '#/components/schemas/URL'
|
||||
deprecated: false
|
||||
/v1/tool-runtime/rag-tool/insert:
|
||||
post:
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest400'
|
||||
'429':
|
||||
$ref: >-
|
||||
#/components/responses/TooManyRequests429
|
||||
'500':
|
||||
$ref: >-
|
||||
#/components/responses/InternalServerError500
|
||||
default:
|
||||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- ToolRuntime
|
||||
summary: >-
|
||||
Index documents so they can be used by the RAG system.
|
||||
description: >-
|
||||
Index documents so they can be used by the RAG system.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InsertRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/tool-runtime/rag-tool/query:
|
||||
post:
|
||||
responses:
|
||||
'200':
|
||||
description: >-
|
||||
RAGQueryResult containing the retrieved content and metadata
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RAGQueryResult'
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest400'
|
||||
'429':
|
||||
$ref: >-
|
||||
#/components/responses/TooManyRequests429
|
||||
'500':
|
||||
$ref: >-
|
||||
#/components/responses/InternalServerError500
|
||||
default:
|
||||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- ToolRuntime
|
||||
summary: >-
|
||||
Query the RAG system for context; typically invoked by the agent.
|
||||
description: >-
|
||||
Query the RAG system for context; typically invoked by the agent.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/QueryRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/toolgroups:
|
||||
get:
|
||||
responses:
|
||||
|
@ -5205,7 +5164,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -5793,6 +5752,10 @@ components:
|
|||
$ref: '#/components/schemas/OpenAIResponseUsage'
|
||||
description: >-
|
||||
(Optional) Token usage information for the response
|
||||
instructions:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) System message inserted into the model's context
|
||||
input:
|
||||
type: array
|
||||
items:
|
||||
|
@ -6196,6 +6159,10 @@ components:
|
|||
$ref: '#/components/schemas/OpenAIResponseUsage'
|
||||
description: >-
|
||||
(Optional) Token usage information for the response
|
||||
instructions:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) System message inserted into the model's context
|
||||
additionalProperties: false
|
||||
required:
|
||||
- created_at
|
||||
|
@ -7889,7 +7856,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -8197,7 +8164,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -8678,274 +8645,6 @@ components:
|
|||
title: ListToolDefsResponse
|
||||
description: >-
|
||||
Response containing a list of tool definitions.
|
||||
RAGDocument:
|
||||
type: object
|
||||
properties:
|
||||
document_id:
|
||||
type: string
|
||||
description: The unique identifier for the document.
|
||||
content:
|
||||
oneOf:
|
||||
- type: string
|
||||
- $ref: '#/components/schemas/InterleavedContentItem'
|
||||
- type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/InterleavedContentItem'
|
||||
- $ref: '#/components/schemas/URL'
|
||||
description: The content of the document.
|
||||
mime_type:
|
||||
type: string
|
||||
description: The MIME type of the document.
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties:
|
||||
oneOf:
|
||||
- type: 'null'
|
||||
- type: boolean
|
||||
- type: number
|
||||
- type: string
|
||||
- type: array
|
||||
- type: object
|
||||
description: Additional metadata for the document.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- document_id
|
||||
- content
|
||||
- metadata
|
||||
title: RAGDocument
|
||||
description: >-
|
||||
A document to be used for document ingestion in the RAG Tool.
|
||||
InsertRequest:
|
||||
type: object
|
||||
properties:
|
||||
documents:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/RAGDocument'
|
||||
description: >-
|
||||
List of documents to index in the RAG system
|
||||
vector_db_id:
|
||||
type: string
|
||||
description: >-
|
||||
ID of the vector database to store the document embeddings
|
||||
chunk_size_in_tokens:
|
||||
type: integer
|
||||
description: >-
|
||||
(Optional) Size in tokens for document chunking during indexing
|
||||
additionalProperties: false
|
||||
required:
|
||||
- documents
|
||||
- vector_db_id
|
||||
- chunk_size_in_tokens
|
||||
title: InsertRequest
|
||||
DefaultRAGQueryGeneratorConfig:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: default
|
||||
default: default
|
||||
description: >-
|
||||
Type of query generator, always 'default'
|
||||
separator:
|
||||
type: string
|
||||
default: ' '
|
||||
description: >-
|
||||
String separator used to join query terms
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- separator
|
||||
title: DefaultRAGQueryGeneratorConfig
|
||||
description: >-
|
||||
Configuration for the default RAG query generator.
|
||||
LLMRAGQueryGeneratorConfig:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: llm
|
||||
default: llm
|
||||
description: Type of query generator, always 'llm'
|
||||
model:
|
||||
type: string
|
||||
description: >-
|
||||
Name of the language model to use for query generation
|
||||
template:
|
||||
type: string
|
||||
description: >-
|
||||
Template string for formatting the query generation prompt
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- model
|
||||
- template
|
||||
title: LLMRAGQueryGeneratorConfig
|
||||
description: >-
|
||||
Configuration for the LLM-based RAG query generator.
|
||||
RAGQueryConfig:
|
||||
type: object
|
||||
properties:
|
||||
query_generator_config:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/DefaultRAGQueryGeneratorConfig'
|
||||
- $ref: '#/components/schemas/LLMRAGQueryGeneratorConfig'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
default: '#/components/schemas/DefaultRAGQueryGeneratorConfig'
|
||||
llm: '#/components/schemas/LLMRAGQueryGeneratorConfig'
|
||||
description: Configuration for the query generator.
|
||||
max_tokens_in_context:
|
||||
type: integer
|
||||
default: 4096
|
||||
description: Maximum number of tokens in the context.
|
||||
max_chunks:
|
||||
type: integer
|
||||
default: 5
|
||||
description: Maximum number of chunks to retrieve.
|
||||
chunk_template:
|
||||
type: string
|
||||
default: >
|
||||
Result {index}
|
||||
|
||||
Content: {chunk.content}
|
||||
|
||||
Metadata: {metadata}
|
||||
description: >-
|
||||
Template for formatting each retrieved chunk in the context. Available
|
||||
placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk
|
||||
content string), {metadata} (chunk metadata dict). Default: "Result {index}\nContent:
|
||||
{chunk.content}\nMetadata: {metadata}\n"
|
||||
mode:
|
||||
$ref: '#/components/schemas/RAGSearchMode'
|
||||
default: vector
|
||||
description: >-
|
||||
Search mode for retrieval—either "vector", "keyword", or "hybrid". Default
|
||||
"vector".
|
||||
ranker:
|
||||
$ref: '#/components/schemas/Ranker'
|
||||
description: >-
|
||||
Configuration for the ranker to use in hybrid search. Defaults to RRF
|
||||
ranker.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- query_generator_config
|
||||
- max_tokens_in_context
|
||||
- max_chunks
|
||||
- chunk_template
|
||||
title: RAGQueryConfig
|
||||
description: >-
|
||||
Configuration for the RAG query generation.
|
||||
RAGSearchMode:
|
||||
type: string
|
||||
enum:
|
||||
- vector
|
||||
- keyword
|
||||
- hybrid
|
||||
title: RAGSearchMode
|
||||
description: >-
|
||||
Search modes for RAG query retrieval: - VECTOR: Uses vector similarity search
|
||||
for semantic matching - KEYWORD: Uses keyword-based search for exact matching
|
||||
- HYBRID: Combines both vector and keyword search for better results
|
||||
RRFRanker:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: rrf
|
||||
default: rrf
|
||||
description: The type of ranker, always "rrf"
|
||||
impact_factor:
|
||||
type: number
|
||||
default: 60.0
|
||||
description: >-
|
||||
The impact factor for RRF scoring. Higher values give more weight to higher-ranked
|
||||
results. Must be greater than 0
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- impact_factor
|
||||
title: RRFRanker
|
||||
description: >-
|
||||
Reciprocal Rank Fusion (RRF) ranker configuration.
|
||||
Ranker:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/RRFRanker'
|
||||
- $ref: '#/components/schemas/WeightedRanker'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
rrf: '#/components/schemas/RRFRanker'
|
||||
weighted: '#/components/schemas/WeightedRanker'
|
||||
WeightedRanker:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: weighted
|
||||
default: weighted
|
||||
description: The type of ranker, always "weighted"
|
||||
alpha:
|
||||
type: number
|
||||
default: 0.5
|
||||
description: >-
|
||||
Weight factor between 0 and 1. 0 means only use keyword scores, 1 means
|
||||
only use vector scores, values in between blend both scores.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- alpha
|
||||
title: WeightedRanker
|
||||
description: >-
|
||||
Weighted ranker configuration that combines vector and keyword scores.
|
||||
QueryRequest:
|
||||
type: object
|
||||
properties:
|
||||
content:
|
||||
$ref: '#/components/schemas/InterleavedContent'
|
||||
description: >-
|
||||
The query content to search for in the indexed documents
|
||||
vector_db_ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: >-
|
||||
List of vector database IDs to search within
|
||||
query_config:
|
||||
$ref: '#/components/schemas/RAGQueryConfig'
|
||||
description: >-
|
||||
(Optional) Configuration parameters for the query operation
|
||||
additionalProperties: false
|
||||
required:
|
||||
- content
|
||||
- vector_db_ids
|
||||
title: QueryRequest
|
||||
RAGQueryResult:
|
||||
type: object
|
||||
properties:
|
||||
content:
|
||||
$ref: '#/components/schemas/InterleavedContent'
|
||||
description: >-
|
||||
(Optional) The retrieved content from the query
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties:
|
||||
oneOf:
|
||||
- type: 'null'
|
||||
- type: boolean
|
||||
- type: number
|
||||
- type: string
|
||||
- type: array
|
||||
- type: object
|
||||
description: >-
|
||||
Additional metadata about the query result
|
||||
additionalProperties: false
|
||||
required:
|
||||
- metadata
|
||||
title: RAGQueryResult
|
||||
description: >-
|
||||
Result of a RAG query containing retrieved content and metadata.
|
||||
ToolGroup:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -8960,7 +8659,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -10146,9 +9845,9 @@ tags:
|
|||
- `background`
|
||||
x-displayName: Agents
|
||||
- name: Conversations
|
||||
description: ''
|
||||
x-displayName: >-
|
||||
description: >-
|
||||
Protocol for conversation management operations.
|
||||
x-displayName: Conversations
|
||||
- name: Files
|
||||
description: >-
|
||||
This API is used to upload documents that can be used with other Llama Stack
|
||||
|
|
481
docs/static/stainless-llama-stack-spec.html
vendored
481
docs/static/stainless-llama-stack-spec.html
vendored
|
@ -282,7 +282,7 @@
|
|||
"Conversations"
|
||||
],
|
||||
"summary": "Create a conversation.",
|
||||
"description": "Create a conversation.",
|
||||
"description": "Create a conversation.\nCreate a conversation.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
|
@ -326,8 +326,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Get a conversation with the given ID.",
|
||||
"description": "Get a conversation with the given ID.",
|
||||
"summary": "Retrieve a conversation.",
|
||||
"description": "Retrieve a conversation.\nGet a conversation with the given ID.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -369,8 +369,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Update a conversation's metadata with the given ID.",
|
||||
"description": "Update a conversation's metadata with the given ID.",
|
||||
"summary": "Update a conversation.",
|
||||
"description": "Update a conversation.\nUpdate a conversation's metadata with the given ID.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -422,8 +422,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Delete a conversation with the given ID.",
|
||||
"description": "Delete a conversation with the given ID.",
|
||||
"summary": "Delete a conversation.",
|
||||
"description": "Delete a conversation.\nDelete a conversation with the given ID.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -467,8 +467,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "List items in the conversation.",
|
||||
"description": "List items in the conversation.",
|
||||
"summary": "List items.",
|
||||
"description": "List items.\nList items in the conversation.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -597,8 +597,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Create items in the conversation.",
|
||||
"description": "Create items in the conversation.",
|
||||
"summary": "Create items.",
|
||||
"description": "Create items.\nCreate items in the conversation.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -652,8 +652,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Retrieve a conversation item.",
|
||||
"description": "Retrieve a conversation item.",
|
||||
"summary": "Retrieve an item.",
|
||||
"description": "Retrieve an item.\nRetrieve a conversation item.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -704,8 +704,8 @@
|
|||
"tags": [
|
||||
"Conversations"
|
||||
],
|
||||
"summary": "Delete a conversation item.",
|
||||
"description": "Delete a conversation item.",
|
||||
"summary": "Delete an item.",
|
||||
"description": "Delete an item.\nDelete a conversation item.",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "conversation_id",
|
||||
|
@ -2624,89 +2624,6 @@
|
|||
"deprecated": false
|
||||
}
|
||||
},
|
||||
"/v1/tool-runtime/rag-tool/insert": {
|
||||
"post": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK"
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/BadRequest400"
|
||||
},
|
||||
"429": {
|
||||
"$ref": "#/components/responses/TooManyRequests429"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/InternalServerError500"
|
||||
},
|
||||
"default": {
|
||||
"$ref": "#/components/responses/DefaultError"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"ToolRuntime"
|
||||
],
|
||||
"summary": "Index documents so they can be used by the RAG system.",
|
||||
"description": "Index documents so they can be used by the RAG system.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/InsertRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"deprecated": false
|
||||
}
|
||||
},
|
||||
"/v1/tool-runtime/rag-tool/query": {
|
||||
"post": {
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "RAGQueryResult containing the retrieved content and metadata",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/RAGQueryResult"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/BadRequest400"
|
||||
},
|
||||
"429": {
|
||||
"$ref": "#/components/responses/TooManyRequests429"
|
||||
},
|
||||
"500": {
|
||||
"$ref": "#/components/responses/InternalServerError500"
|
||||
},
|
||||
"default": {
|
||||
"$ref": "#/components/responses/DefaultError"
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"ToolRuntime"
|
||||
],
|
||||
"summary": "Query the RAG system for context; typically invoked by the agent.",
|
||||
"description": "Query the RAG system for context; typically invoked by the agent.",
|
||||
"parameters": [],
|
||||
"requestBody": {
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/QueryRequest"
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": true
|
||||
},
|
||||
"deprecated": false
|
||||
}
|
||||
},
|
||||
"/v1/toolgroups": {
|
||||
"get": {
|
||||
"responses": {
|
||||
|
@ -8472,7 +8389,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -9272,6 +9189,10 @@
|
|||
"$ref": "#/components/schemas/OpenAIResponseUsage",
|
||||
"description": "(Optional) Token usage information for the response"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string",
|
||||
"description": "(Optional) System message inserted into the model's context"
|
||||
},
|
||||
"input": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
|
@ -9820,6 +9741,10 @@
|
|||
"usage": {
|
||||
"$ref": "#/components/schemas/OpenAIResponseUsage",
|
||||
"description": "(Optional) Token usage information for the response"
|
||||
},
|
||||
"instructions": {
|
||||
"type": "string",
|
||||
"description": "(Optional) System message inserted into the model's context"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
|
@ -11869,7 +11794,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -12351,7 +12276,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -13047,346 +12972,6 @@
|
|||
"title": "ListToolDefsResponse",
|
||||
"description": "Response containing a list of tool definitions."
|
||||
},
|
||||
"RAGDocument": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"document_id": {
|
||||
"type": "string",
|
||||
"description": "The unique identifier for the document."
|
||||
},
|
||||
"content": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/InterleavedContentItem"
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/InterleavedContentItem"
|
||||
}
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/URL"
|
||||
}
|
||||
],
|
||||
"description": "The content of the document."
|
||||
},
|
||||
"mime_type": {
|
||||
"type": "string",
|
||||
"description": "The MIME type of the document."
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "null"
|
||||
},
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Additional metadata for the document."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"document_id",
|
||||
"content",
|
||||
"metadata"
|
||||
],
|
||||
"title": "RAGDocument",
|
||||
"description": "A document to be used for document ingestion in the RAG Tool."
|
||||
},
|
||||
"InsertRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"documents": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/RAGDocument"
|
||||
},
|
||||
"description": "List of documents to index in the RAG system"
|
||||
},
|
||||
"vector_db_id": {
|
||||
"type": "string",
|
||||
"description": "ID of the vector database to store the document embeddings"
|
||||
},
|
||||
"chunk_size_in_tokens": {
|
||||
"type": "integer",
|
||||
"description": "(Optional) Size in tokens for document chunking during indexing"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"documents",
|
||||
"vector_db_id",
|
||||
"chunk_size_in_tokens"
|
||||
],
|
||||
"title": "InsertRequest"
|
||||
},
|
||||
"DefaultRAGQueryGeneratorConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "default",
|
||||
"default": "default",
|
||||
"description": "Type of query generator, always 'default'"
|
||||
},
|
||||
"separator": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "String separator used to join query terms"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"separator"
|
||||
],
|
||||
"title": "DefaultRAGQueryGeneratorConfig",
|
||||
"description": "Configuration for the default RAG query generator."
|
||||
},
|
||||
"LLMRAGQueryGeneratorConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "llm",
|
||||
"default": "llm",
|
||||
"description": "Type of query generator, always 'llm'"
|
||||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"description": "Name of the language model to use for query generation"
|
||||
},
|
||||
"template": {
|
||||
"type": "string",
|
||||
"description": "Template string for formatting the query generation prompt"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"model",
|
||||
"template"
|
||||
],
|
||||
"title": "LLMRAGQueryGeneratorConfig",
|
||||
"description": "Configuration for the LLM-based RAG query generator."
|
||||
},
|
||||
"RAGQueryConfig": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query_generator_config": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/DefaultRAGQueryGeneratorConfig"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/LLMRAGQueryGeneratorConfig"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"default": "#/components/schemas/DefaultRAGQueryGeneratorConfig",
|
||||
"llm": "#/components/schemas/LLMRAGQueryGeneratorConfig"
|
||||
}
|
||||
},
|
||||
"description": "Configuration for the query generator."
|
||||
},
|
||||
"max_tokens_in_context": {
|
||||
"type": "integer",
|
||||
"default": 4096,
|
||||
"description": "Maximum number of tokens in the context."
|
||||
},
|
||||
"max_chunks": {
|
||||
"type": "integer",
|
||||
"default": 5,
|
||||
"description": "Maximum number of chunks to retrieve."
|
||||
},
|
||||
"chunk_template": {
|
||||
"type": "string",
|
||||
"default": "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n",
|
||||
"description": "Template for formatting each retrieved chunk in the context. Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict). Default: \"Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n\""
|
||||
},
|
||||
"mode": {
|
||||
"$ref": "#/components/schemas/RAGSearchMode",
|
||||
"default": "vector",
|
||||
"description": "Search mode for retrieval—either \"vector\", \"keyword\", or \"hybrid\". Default \"vector\"."
|
||||
},
|
||||
"ranker": {
|
||||
"$ref": "#/components/schemas/Ranker",
|
||||
"description": "Configuration for the ranker to use in hybrid search. Defaults to RRF ranker."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"query_generator_config",
|
||||
"max_tokens_in_context",
|
||||
"max_chunks",
|
||||
"chunk_template"
|
||||
],
|
||||
"title": "RAGQueryConfig",
|
||||
"description": "Configuration for the RAG query generation."
|
||||
},
|
||||
"RAGSearchMode": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"vector",
|
||||
"keyword",
|
||||
"hybrid"
|
||||
],
|
||||
"title": "RAGSearchMode",
|
||||
"description": "Search modes for RAG query retrieval: - VECTOR: Uses vector similarity search for semantic matching - KEYWORD: Uses keyword-based search for exact matching - HYBRID: Combines both vector and keyword search for better results"
|
||||
},
|
||||
"RRFRanker": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "rrf",
|
||||
"default": "rrf",
|
||||
"description": "The type of ranker, always \"rrf\""
|
||||
},
|
||||
"impact_factor": {
|
||||
"type": "number",
|
||||
"default": 60.0,
|
||||
"description": "The impact factor for RRF scoring. Higher values give more weight to higher-ranked results. Must be greater than 0"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"impact_factor"
|
||||
],
|
||||
"title": "RRFRanker",
|
||||
"description": "Reciprocal Rank Fusion (RRF) ranker configuration."
|
||||
},
|
||||
"Ranker": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/RRFRanker"
|
||||
},
|
||||
{
|
||||
"$ref": "#/components/schemas/WeightedRanker"
|
||||
}
|
||||
],
|
||||
"discriminator": {
|
||||
"propertyName": "type",
|
||||
"mapping": {
|
||||
"rrf": "#/components/schemas/RRFRanker",
|
||||
"weighted": "#/components/schemas/WeightedRanker"
|
||||
}
|
||||
}
|
||||
},
|
||||
"WeightedRanker": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "weighted",
|
||||
"default": "weighted",
|
||||
"description": "The type of ranker, always \"weighted\""
|
||||
},
|
||||
"alpha": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Weight factor between 0 and 1. 0 means only use keyword scores, 1 means only use vector scores, values in between blend both scores."
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"type",
|
||||
"alpha"
|
||||
],
|
||||
"title": "WeightedRanker",
|
||||
"description": "Weighted ranker configuration that combines vector and keyword scores."
|
||||
},
|
||||
"QueryRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"$ref": "#/components/schemas/InterleavedContent",
|
||||
"description": "The query content to search for in the indexed documents"
|
||||
},
|
||||
"vector_db_ids": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "List of vector database IDs to search within"
|
||||
},
|
||||
"query_config": {
|
||||
"$ref": "#/components/schemas/RAGQueryConfig",
|
||||
"description": "(Optional) Configuration parameters for the query operation"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"content",
|
||||
"vector_db_ids"
|
||||
],
|
||||
"title": "QueryRequest"
|
||||
},
|
||||
"RAGQueryResult": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"$ref": "#/components/schemas/InterleavedContent",
|
||||
"description": "(Optional) The retrieved content from the query"
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "null"
|
||||
},
|
||||
{
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "Additional metadata about the query result"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [
|
||||
"metadata"
|
||||
],
|
||||
"title": "RAGQueryResult",
|
||||
"description": "Result of a RAG query containing retrieved content and metadata."
|
||||
},
|
||||
"ToolGroup": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
@ -13404,7 +12989,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -14951,7 +14536,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -16696,7 +16281,7 @@
|
|||
"enum": [
|
||||
"model",
|
||||
"shield",
|
||||
"vector_db",
|
||||
"vector_store",
|
||||
"dataset",
|
||||
"scoring_function",
|
||||
"benchmark",
|
||||
|
@ -17928,8 +17513,8 @@
|
|||
},
|
||||
{
|
||||
"name": "Conversations",
|
||||
"description": "",
|
||||
"x-displayName": "Protocol for conversation management operations."
|
||||
"description": "Protocol for conversation management operations.",
|
||||
"x-displayName": "Conversations"
|
||||
},
|
||||
{
|
||||
"name": "DatasetIO",
|
||||
|
@ -17941,8 +17526,8 @@
|
|||
},
|
||||
{
|
||||
"name": "Eval",
|
||||
"description": "",
|
||||
"x-displayName": "Llama Stack Evaluation API for running evaluations on model and agent candidates."
|
||||
"description": "Llama Stack Evaluation API for running evaluations on model and agent candidates.",
|
||||
"x-displayName": "Evaluations"
|
||||
},
|
||||
{
|
||||
"name": "Files",
|
||||
|
|
411
docs/static/stainless-llama-stack-spec.yaml
vendored
411
docs/static/stainless-llama-stack-spec.yaml
vendored
|
@ -195,7 +195,10 @@ paths:
|
|||
tags:
|
||||
- Conversations
|
||||
summary: Create a conversation.
|
||||
description: Create a conversation.
|
||||
description: >-
|
||||
Create a conversation.
|
||||
|
||||
Create a conversation.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
|
@ -225,8 +228,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Get a conversation with the given ID.
|
||||
description: Get a conversation with the given ID.
|
||||
summary: Retrieve a conversation.
|
||||
description: >-
|
||||
Retrieve a conversation.
|
||||
|
||||
Get a conversation with the given ID.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -255,9 +261,10 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: >-
|
||||
Update a conversation's metadata with the given ID.
|
||||
summary: Update a conversation.
|
||||
description: >-
|
||||
Update a conversation.
|
||||
|
||||
Update a conversation's metadata with the given ID.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
|
@ -293,8 +300,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Delete a conversation with the given ID.
|
||||
description: Delete a conversation with the given ID.
|
||||
summary: Delete a conversation.
|
||||
description: >-
|
||||
Delete a conversation.
|
||||
|
||||
Delete a conversation with the given ID.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -324,8 +334,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: List items in the conversation.
|
||||
description: List items in the conversation.
|
||||
summary: List items.
|
||||
description: >-
|
||||
List items.
|
||||
|
||||
List items in the conversation.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -498,8 +511,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Create items in the conversation.
|
||||
description: Create items in the conversation.
|
||||
summary: Create items.
|
||||
description: >-
|
||||
Create items.
|
||||
|
||||
Create items in the conversation.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -535,8 +551,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Retrieve a conversation item.
|
||||
description: Retrieve a conversation item.
|
||||
summary: Retrieve an item.
|
||||
description: >-
|
||||
Retrieve an item.
|
||||
|
||||
Retrieve a conversation item.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -571,8 +590,11 @@ paths:
|
|||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- Conversations
|
||||
summary: Delete a conversation item.
|
||||
description: Delete a conversation item.
|
||||
summary: Delete an item.
|
||||
description: >-
|
||||
Delete an item.
|
||||
|
||||
Delete a conversation item.
|
||||
parameters:
|
||||
- name: conversation_id
|
||||
in: path
|
||||
|
@ -2017,69 +2039,6 @@ paths:
|
|||
schema:
|
||||
$ref: '#/components/schemas/URL'
|
||||
deprecated: false
|
||||
/v1/tool-runtime/rag-tool/insert:
|
||||
post:
|
||||
responses:
|
||||
'200':
|
||||
description: OK
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest400'
|
||||
'429':
|
||||
$ref: >-
|
||||
#/components/responses/TooManyRequests429
|
||||
'500':
|
||||
$ref: >-
|
||||
#/components/responses/InternalServerError500
|
||||
default:
|
||||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- ToolRuntime
|
||||
summary: >-
|
||||
Index documents so they can be used by the RAG system.
|
||||
description: >-
|
||||
Index documents so they can be used by the RAG system.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InsertRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/tool-runtime/rag-tool/query:
|
||||
post:
|
||||
responses:
|
||||
'200':
|
||||
description: >-
|
||||
RAGQueryResult containing the retrieved content and metadata
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RAGQueryResult'
|
||||
'400':
|
||||
$ref: '#/components/responses/BadRequest400'
|
||||
'429':
|
||||
$ref: >-
|
||||
#/components/responses/TooManyRequests429
|
||||
'500':
|
||||
$ref: >-
|
||||
#/components/responses/InternalServerError500
|
||||
default:
|
||||
$ref: '#/components/responses/DefaultError'
|
||||
tags:
|
||||
- ToolRuntime
|
||||
summary: >-
|
||||
Query the RAG system for context; typically invoked by the agent.
|
||||
description: >-
|
||||
Query the RAG system for context; typically invoked by the agent.
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/QueryRequest'
|
||||
required: true
|
||||
deprecated: false
|
||||
/v1/toolgroups:
|
||||
get:
|
||||
responses:
|
||||
|
@ -6418,7 +6377,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -7006,6 +6965,10 @@ components:
|
|||
$ref: '#/components/schemas/OpenAIResponseUsage'
|
||||
description: >-
|
||||
(Optional) Token usage information for the response
|
||||
instructions:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) System message inserted into the model's context
|
||||
input:
|
||||
type: array
|
||||
items:
|
||||
|
@ -7409,6 +7372,10 @@ components:
|
|||
$ref: '#/components/schemas/OpenAIResponseUsage'
|
||||
description: >-
|
||||
(Optional) Token usage information for the response
|
||||
instructions:
|
||||
type: string
|
||||
description: >-
|
||||
(Optional) System message inserted into the model's context
|
||||
additionalProperties: false
|
||||
required:
|
||||
- created_at
|
||||
|
@ -9102,7 +9069,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -9410,7 +9377,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -9891,274 +9858,6 @@ components:
|
|||
title: ListToolDefsResponse
|
||||
description: >-
|
||||
Response containing a list of tool definitions.
|
||||
RAGDocument:
|
||||
type: object
|
||||
properties:
|
||||
document_id:
|
||||
type: string
|
||||
description: The unique identifier for the document.
|
||||
content:
|
||||
oneOf:
|
||||
- type: string
|
||||
- $ref: '#/components/schemas/InterleavedContentItem'
|
||||
- type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/InterleavedContentItem'
|
||||
- $ref: '#/components/schemas/URL'
|
||||
description: The content of the document.
|
||||
mime_type:
|
||||
type: string
|
||||
description: The MIME type of the document.
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties:
|
||||
oneOf:
|
||||
- type: 'null'
|
||||
- type: boolean
|
||||
- type: number
|
||||
- type: string
|
||||
- type: array
|
||||
- type: object
|
||||
description: Additional metadata for the document.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- document_id
|
||||
- content
|
||||
- metadata
|
||||
title: RAGDocument
|
||||
description: >-
|
||||
A document to be used for document ingestion in the RAG Tool.
|
||||
InsertRequest:
|
||||
type: object
|
||||
properties:
|
||||
documents:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/RAGDocument'
|
||||
description: >-
|
||||
List of documents to index in the RAG system
|
||||
vector_db_id:
|
||||
type: string
|
||||
description: >-
|
||||
ID of the vector database to store the document embeddings
|
||||
chunk_size_in_tokens:
|
||||
type: integer
|
||||
description: >-
|
||||
(Optional) Size in tokens for document chunking during indexing
|
||||
additionalProperties: false
|
||||
required:
|
||||
- documents
|
||||
- vector_db_id
|
||||
- chunk_size_in_tokens
|
||||
title: InsertRequest
|
||||
DefaultRAGQueryGeneratorConfig:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: default
|
||||
default: default
|
||||
description: >-
|
||||
Type of query generator, always 'default'
|
||||
separator:
|
||||
type: string
|
||||
default: ' '
|
||||
description: >-
|
||||
String separator used to join query terms
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- separator
|
||||
title: DefaultRAGQueryGeneratorConfig
|
||||
description: >-
|
||||
Configuration for the default RAG query generator.
|
||||
LLMRAGQueryGeneratorConfig:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: llm
|
||||
default: llm
|
||||
description: Type of query generator, always 'llm'
|
||||
model:
|
||||
type: string
|
||||
description: >-
|
||||
Name of the language model to use for query generation
|
||||
template:
|
||||
type: string
|
||||
description: >-
|
||||
Template string for formatting the query generation prompt
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- model
|
||||
- template
|
||||
title: LLMRAGQueryGeneratorConfig
|
||||
description: >-
|
||||
Configuration for the LLM-based RAG query generator.
|
||||
RAGQueryConfig:
|
||||
type: object
|
||||
properties:
|
||||
query_generator_config:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/DefaultRAGQueryGeneratorConfig'
|
||||
- $ref: '#/components/schemas/LLMRAGQueryGeneratorConfig'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
default: '#/components/schemas/DefaultRAGQueryGeneratorConfig'
|
||||
llm: '#/components/schemas/LLMRAGQueryGeneratorConfig'
|
||||
description: Configuration for the query generator.
|
||||
max_tokens_in_context:
|
||||
type: integer
|
||||
default: 4096
|
||||
description: Maximum number of tokens in the context.
|
||||
max_chunks:
|
||||
type: integer
|
||||
default: 5
|
||||
description: Maximum number of chunks to retrieve.
|
||||
chunk_template:
|
||||
type: string
|
||||
default: >
|
||||
Result {index}
|
||||
|
||||
Content: {chunk.content}
|
||||
|
||||
Metadata: {metadata}
|
||||
description: >-
|
||||
Template for formatting each retrieved chunk in the context. Available
|
||||
placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk
|
||||
content string), {metadata} (chunk metadata dict). Default: "Result {index}\nContent:
|
||||
{chunk.content}\nMetadata: {metadata}\n"
|
||||
mode:
|
||||
$ref: '#/components/schemas/RAGSearchMode'
|
||||
default: vector
|
||||
description: >-
|
||||
Search mode for retrieval—either "vector", "keyword", or "hybrid". Default
|
||||
"vector".
|
||||
ranker:
|
||||
$ref: '#/components/schemas/Ranker'
|
||||
description: >-
|
||||
Configuration for the ranker to use in hybrid search. Defaults to RRF
|
||||
ranker.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- query_generator_config
|
||||
- max_tokens_in_context
|
||||
- max_chunks
|
||||
- chunk_template
|
||||
title: RAGQueryConfig
|
||||
description: >-
|
||||
Configuration for the RAG query generation.
|
||||
RAGSearchMode:
|
||||
type: string
|
||||
enum:
|
||||
- vector
|
||||
- keyword
|
||||
- hybrid
|
||||
title: RAGSearchMode
|
||||
description: >-
|
||||
Search modes for RAG query retrieval: - VECTOR: Uses vector similarity search
|
||||
for semantic matching - KEYWORD: Uses keyword-based search for exact matching
|
||||
- HYBRID: Combines both vector and keyword search for better results
|
||||
RRFRanker:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: rrf
|
||||
default: rrf
|
||||
description: The type of ranker, always "rrf"
|
||||
impact_factor:
|
||||
type: number
|
||||
default: 60.0
|
||||
description: >-
|
||||
The impact factor for RRF scoring. Higher values give more weight to higher-ranked
|
||||
results. Must be greater than 0
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- impact_factor
|
||||
title: RRFRanker
|
||||
description: >-
|
||||
Reciprocal Rank Fusion (RRF) ranker configuration.
|
||||
Ranker:
|
||||
oneOf:
|
||||
- $ref: '#/components/schemas/RRFRanker'
|
||||
- $ref: '#/components/schemas/WeightedRanker'
|
||||
discriminator:
|
||||
propertyName: type
|
||||
mapping:
|
||||
rrf: '#/components/schemas/RRFRanker'
|
||||
weighted: '#/components/schemas/WeightedRanker'
|
||||
WeightedRanker:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
const: weighted
|
||||
default: weighted
|
||||
description: The type of ranker, always "weighted"
|
||||
alpha:
|
||||
type: number
|
||||
default: 0.5
|
||||
description: >-
|
||||
Weight factor between 0 and 1. 0 means only use keyword scores, 1 means
|
||||
only use vector scores, values in between blend both scores.
|
||||
additionalProperties: false
|
||||
required:
|
||||
- type
|
||||
- alpha
|
||||
title: WeightedRanker
|
||||
description: >-
|
||||
Weighted ranker configuration that combines vector and keyword scores.
|
||||
QueryRequest:
|
||||
type: object
|
||||
properties:
|
||||
content:
|
||||
$ref: '#/components/schemas/InterleavedContent'
|
||||
description: >-
|
||||
The query content to search for in the indexed documents
|
||||
vector_db_ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: >-
|
||||
List of vector database IDs to search within
|
||||
query_config:
|
||||
$ref: '#/components/schemas/RAGQueryConfig'
|
||||
description: >-
|
||||
(Optional) Configuration parameters for the query operation
|
||||
additionalProperties: false
|
||||
required:
|
||||
- content
|
||||
- vector_db_ids
|
||||
title: QueryRequest
|
||||
RAGQueryResult:
|
||||
type: object
|
||||
properties:
|
||||
content:
|
||||
$ref: '#/components/schemas/InterleavedContent'
|
||||
description: >-
|
||||
(Optional) The retrieved content from the query
|
||||
metadata:
|
||||
type: object
|
||||
additionalProperties:
|
||||
oneOf:
|
||||
- type: 'null'
|
||||
- type: boolean
|
||||
- type: number
|
||||
- type: string
|
||||
- type: array
|
||||
- type: object
|
||||
description: >-
|
||||
Additional metadata about the query result
|
||||
additionalProperties: false
|
||||
required:
|
||||
- metadata
|
||||
title: RAGQueryResult
|
||||
description: >-
|
||||
Result of a RAG query containing retrieved content and metadata.
|
||||
ToolGroup:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -10173,7 +9872,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -11295,7 +10994,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -12622,7 +12321,7 @@ components:
|
|||
enum:
|
||||
- model
|
||||
- shield
|
||||
- vector_db
|
||||
- vector_store
|
||||
- dataset
|
||||
- scoring_function
|
||||
- benchmark
|
||||
|
@ -13533,17 +13232,17 @@ tags:
|
|||
- name: Benchmarks
|
||||
description: ''
|
||||
- name: Conversations
|
||||
description: ''
|
||||
x-displayName: >-
|
||||
description: >-
|
||||
Protocol for conversation management operations.
|
||||
x-displayName: Conversations
|
||||
- name: DatasetIO
|
||||
description: ''
|
||||
- name: Datasets
|
||||
description: ''
|
||||
- name: Eval
|
||||
description: ''
|
||||
x-displayName: >-
|
||||
description: >-
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates.
|
||||
x-displayName: Evaluations
|
||||
- name: Files
|
||||
description: >-
|
||||
This API is used to upload documents that can be used with other Llama Stack
|
||||
|
|
|
@ -78,17 +78,14 @@ If you're looking for more specific topics, we have a [Zero to Hero Guide](#next
|
|||
|
||||
## Build, Configure, and Run Llama Stack
|
||||
|
||||
1. **Build the Llama Stack**:
|
||||
Build the Llama Stack using the `starter` template:
|
||||
1. **Install dependencies**:
|
||||
```bash
|
||||
uv run --with llama-stack llama stack build --distro starter --image-type venv
|
||||
llama stack list-deps starter | xargs -L1 uv pip install
|
||||
```
|
||||
**Expected Output:**
|
||||
|
||||
2. **Start the distribution**:
|
||||
```bash
|
||||
...
|
||||
Build Successful!
|
||||
You can find the newly-built template here: ~/.llama/distributions/starter/starter-run.yaml
|
||||
You can run the new Llama Stack Distro via: uv run --with llama-stack llama stack run starter
|
||||
llama stack run starter
|
||||
```
|
||||
|
||||
3. **Set the ENV variables by exporting them to the terminal**:
|
||||
|
|
|
@ -545,6 +545,7 @@ class OpenAIResponseObject(BaseModel):
|
|||
:param tools: (Optional) An array of tools the model may call while generating a response.
|
||||
:param truncation: (Optional) Truncation strategy applied to the response
|
||||
:param usage: (Optional) Token usage information for the response
|
||||
:param instructions: (Optional) System message inserted into the model's context
|
||||
"""
|
||||
|
||||
created_at: int
|
||||
|
@ -564,6 +565,7 @@ class OpenAIResponseObject(BaseModel):
|
|||
tools: list[OpenAIResponseTool] | None = None
|
||||
truncation: str | None = None
|
||||
usage: OpenAIResponseUsage | None = None
|
||||
instructions: str | None = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -173,7 +173,9 @@ class ConversationItemDeletedResource(BaseModel):
|
|||
@runtime_checkable
|
||||
@trace_protocol
|
||||
class Conversations(Protocol):
|
||||
"""Protocol for conversation management operations."""
|
||||
"""Conversations
|
||||
|
||||
Protocol for conversation management operations."""
|
||||
|
||||
@webmethod(route="/conversations", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def create_conversation(
|
||||
|
@ -181,6 +183,8 @@ class Conversations(Protocol):
|
|||
) -> Conversation:
|
||||
"""Create a conversation.
|
||||
|
||||
Create a conversation.
|
||||
|
||||
:param items: Initial items to include in the conversation context.
|
||||
:param metadata: Set of key-value pairs that can be attached to an object.
|
||||
:returns: The created conversation object.
|
||||
|
@ -189,7 +193,9 @@ class Conversations(Protocol):
|
|||
|
||||
@webmethod(route="/conversations/{conversation_id}", method="GET", level=LLAMA_STACK_API_V1)
|
||||
async def get_conversation(self, conversation_id: str) -> Conversation:
|
||||
"""Get a conversation with the given ID.
|
||||
"""Retrieve a conversation.
|
||||
|
||||
Get a conversation with the given ID.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:returns: The conversation object.
|
||||
|
@ -198,7 +204,9 @@ class Conversations(Protocol):
|
|||
|
||||
@webmethod(route="/conversations/{conversation_id}", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def update_conversation(self, conversation_id: str, metadata: Metadata) -> Conversation:
|
||||
"""Update a conversation's metadata with the given ID.
|
||||
"""Update a conversation.
|
||||
|
||||
Update a conversation's metadata with the given ID.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:param metadata: Set of key-value pairs that can be attached to an object.
|
||||
|
@ -208,7 +216,9 @@ class Conversations(Protocol):
|
|||
|
||||
@webmethod(route="/conversations/{conversation_id}", method="DELETE", level=LLAMA_STACK_API_V1)
|
||||
async def openai_delete_conversation(self, conversation_id: str) -> ConversationDeletedResource:
|
||||
"""Delete a conversation with the given ID.
|
||||
"""Delete a conversation.
|
||||
|
||||
Delete a conversation with the given ID.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:returns: The deleted conversation resource.
|
||||
|
@ -217,7 +227,9 @@ class Conversations(Protocol):
|
|||
|
||||
@webmethod(route="/conversations/{conversation_id}/items", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def add_items(self, conversation_id: str, items: list[ConversationItem]) -> ConversationItemList:
|
||||
"""Create items in the conversation.
|
||||
"""Create items.
|
||||
|
||||
Create items in the conversation.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:param items: Items to include in the conversation context.
|
||||
|
@ -227,7 +239,9 @@ class Conversations(Protocol):
|
|||
|
||||
@webmethod(route="/conversations/{conversation_id}/items/{item_id}", method="GET", level=LLAMA_STACK_API_V1)
|
||||
async def retrieve(self, conversation_id: str, item_id: str) -> ConversationItem:
|
||||
"""Retrieve a conversation item.
|
||||
"""Retrieve an item.
|
||||
|
||||
Retrieve a conversation item.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:param item_id: The item identifier.
|
||||
|
@ -244,7 +258,9 @@ class Conversations(Protocol):
|
|||
limit: int | NotGiven = NOT_GIVEN,
|
||||
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
|
||||
) -> ConversationItemList:
|
||||
"""List items in the conversation.
|
||||
"""List items.
|
||||
|
||||
List items in the conversation.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:param after: An item ID to list items after, used in pagination.
|
||||
|
@ -259,7 +275,9 @@ class Conversations(Protocol):
|
|||
async def openai_delete_conversation_item(
|
||||
self, conversation_id: str, item_id: str
|
||||
) -> ConversationItemDeletedResource:
|
||||
"""Delete a conversation item.
|
||||
"""Delete an item.
|
||||
|
||||
Delete a conversation item.
|
||||
|
||||
:param conversation_id: The conversation identifier.
|
||||
:param item_id: The item identifier.
|
||||
|
|
|
@ -121,6 +121,7 @@ class Api(Enum, metaclass=DynamicApiMeta):
|
|||
|
||||
models = "models"
|
||||
shields = "shields"
|
||||
vector_stores = "vector_stores" # only used for routing table
|
||||
datasets = "datasets"
|
||||
scoring_functions = "scoring_functions"
|
||||
benchmarks = "benchmarks"
|
||||
|
|
|
@ -82,7 +82,9 @@ class EvaluateResponse(BaseModel):
|
|||
|
||||
|
||||
class Eval(Protocol):
|
||||
"""Llama Stack Evaluation API for running evaluations on model and agent candidates."""
|
||||
"""Evaluations
|
||||
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates."""
|
||||
|
||||
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs", method="POST", level=LLAMA_STACK_API_V1, deprecated=True)
|
||||
@webmethod(route="/eval/benchmarks/{benchmark_id}/jobs", method="POST", level=LLAMA_STACK_API_V1ALPHA)
|
||||
|
|
|
@ -13,7 +13,7 @@ from pydantic import BaseModel, Field
|
|||
class ResourceType(StrEnum):
|
||||
model = "model"
|
||||
shield = "shield"
|
||||
vector_db = "vector_db"
|
||||
vector_store = "vector_store"
|
||||
dataset = "dataset"
|
||||
scoring_function = "scoring_function"
|
||||
benchmark = "benchmark"
|
||||
|
@ -34,4 +34,4 @@ class Resource(BaseModel):
|
|||
|
||||
provider_id: str = Field(description="ID of the provider that owns this resource")
|
||||
|
||||
type: ResourceType = Field(description="Type of resource (e.g. 'model', 'shield', 'vector_db', etc.)")
|
||||
type: ResourceType = Field(description="Type of resource (e.g. 'model', 'shield', 'vector_store', etc.)")
|
||||
|
|
|
@ -4,5 +4,4 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .rag_tool import *
|
||||
from .tools import *
|
||||
|
|
|
@ -1,218 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum, StrEnum
|
||||
from typing import Annotated, Any, Literal, Protocol
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
from typing_extensions import runtime_checkable
|
||||
|
||||
from llama_stack.apis.common.content_types import URL, InterleavedContent
|
||||
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
||||
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
||||
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RRFRanker(BaseModel):
|
||||
"""
|
||||
Reciprocal Rank Fusion (RRF) ranker configuration.
|
||||
|
||||
:param type: The type of ranker, always "rrf"
|
||||
:param impact_factor: The impact factor for RRF scoring. Higher values give more weight to higher-ranked results.
|
||||
Must be greater than 0
|
||||
"""
|
||||
|
||||
type: Literal["rrf"] = "rrf"
|
||||
impact_factor: float = Field(default=60.0, gt=0.0) # default of 60 for optimal performance
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class WeightedRanker(BaseModel):
|
||||
"""
|
||||
Weighted ranker configuration that combines vector and keyword scores.
|
||||
|
||||
:param type: The type of ranker, always "weighted"
|
||||
:param alpha: Weight factor between 0 and 1.
|
||||
0 means only use keyword scores,
|
||||
1 means only use vector scores,
|
||||
values in between blend both scores.
|
||||
"""
|
||||
|
||||
type: Literal["weighted"] = "weighted"
|
||||
alpha: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Weight factor between 0 and 1. 0 means only keyword scores, 1 means only vector scores.",
|
||||
)
|
||||
|
||||
|
||||
Ranker = Annotated[
|
||||
RRFRanker | WeightedRanker,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
register_schema(Ranker, name="Ranker")
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RAGDocument(BaseModel):
|
||||
"""
|
||||
A document to be used for document ingestion in the RAG Tool.
|
||||
|
||||
:param document_id: The unique identifier for the document.
|
||||
:param content: The content of the document.
|
||||
:param mime_type: The MIME type of the document.
|
||||
:param metadata: Additional metadata for the document.
|
||||
"""
|
||||
|
||||
document_id: str
|
||||
content: InterleavedContent | URL
|
||||
mime_type: str | None = None
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RAGQueryResult(BaseModel):
|
||||
"""Result of a RAG query containing retrieved content and metadata.
|
||||
|
||||
:param content: (Optional) The retrieved content from the query
|
||||
:param metadata: Additional metadata about the query result
|
||||
"""
|
||||
|
||||
content: InterleavedContent | None = None
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RAGQueryGenerator(Enum):
|
||||
"""Types of query generators for RAG systems.
|
||||
|
||||
:cvar default: Default query generator using simple text processing
|
||||
:cvar llm: LLM-based query generator for enhanced query understanding
|
||||
:cvar custom: Custom query generator implementation
|
||||
"""
|
||||
|
||||
default = "default"
|
||||
llm = "llm"
|
||||
custom = "custom"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RAGSearchMode(StrEnum):
|
||||
"""
|
||||
Search modes for RAG query retrieval:
|
||||
- VECTOR: Uses vector similarity search for semantic matching
|
||||
- KEYWORD: Uses keyword-based search for exact matching
|
||||
- HYBRID: Combines both vector and keyword search for better results
|
||||
"""
|
||||
|
||||
VECTOR = "vector"
|
||||
KEYWORD = "keyword"
|
||||
HYBRID = "hybrid"
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class DefaultRAGQueryGeneratorConfig(BaseModel):
|
||||
"""Configuration for the default RAG query generator.
|
||||
|
||||
:param type: Type of query generator, always 'default'
|
||||
:param separator: String separator used to join query terms
|
||||
"""
|
||||
|
||||
type: Literal["default"] = "default"
|
||||
separator: str = " "
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class LLMRAGQueryGeneratorConfig(BaseModel):
|
||||
"""Configuration for the LLM-based RAG query generator.
|
||||
|
||||
:param type: Type of query generator, always 'llm'
|
||||
:param model: Name of the language model to use for query generation
|
||||
:param template: Template string for formatting the query generation prompt
|
||||
"""
|
||||
|
||||
type: Literal["llm"] = "llm"
|
||||
model: str
|
||||
template: str
|
||||
|
||||
|
||||
RAGQueryGeneratorConfig = Annotated[
|
||||
DefaultRAGQueryGeneratorConfig | LLMRAGQueryGeneratorConfig,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
register_schema(RAGQueryGeneratorConfig, name="RAGQueryGeneratorConfig")
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RAGQueryConfig(BaseModel):
|
||||
"""
|
||||
Configuration for the RAG query generation.
|
||||
|
||||
:param query_generator_config: Configuration for the query generator.
|
||||
:param max_tokens_in_context: Maximum number of tokens in the context.
|
||||
:param max_chunks: Maximum number of chunks to retrieve.
|
||||
:param chunk_template: Template for formatting each retrieved chunk in the context.
|
||||
Available placeholders: {index} (1-based chunk ordinal), {chunk.content} (chunk content string), {metadata} (chunk metadata dict).
|
||||
Default: "Result {index}\\nContent: {chunk.content}\\nMetadata: {metadata}\\n"
|
||||
:param mode: Search mode for retrieval—either "vector", "keyword", or "hybrid". Default "vector".
|
||||
:param ranker: Configuration for the ranker to use in hybrid search. Defaults to RRF ranker.
|
||||
"""
|
||||
|
||||
# This config defines how a query is generated using the messages
|
||||
# for memory bank retrieval.
|
||||
query_generator_config: RAGQueryGeneratorConfig = Field(default=DefaultRAGQueryGeneratorConfig())
|
||||
max_tokens_in_context: int = 4096
|
||||
max_chunks: int = 5
|
||||
chunk_template: str = "Result {index}\nContent: {chunk.content}\nMetadata: {metadata}\n"
|
||||
mode: RAGSearchMode | None = RAGSearchMode.VECTOR
|
||||
ranker: Ranker | None = Field(default=None) # Only used for hybrid mode
|
||||
|
||||
@field_validator("chunk_template")
|
||||
def validate_chunk_template(cls, v: str) -> str:
|
||||
if "{chunk.content}" not in v:
|
||||
raise ValueError("chunk_template must contain {chunk.content}")
|
||||
if "{index}" not in v:
|
||||
raise ValueError("chunk_template must contain {index}")
|
||||
if len(v) == 0:
|
||||
raise ValueError("chunk_template must not be empty")
|
||||
return v
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
@trace_protocol
|
||||
class RAGToolRuntime(Protocol):
|
||||
@webmethod(route="/tool-runtime/rag-tool/insert", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def insert(
|
||||
self,
|
||||
documents: list[RAGDocument],
|
||||
vector_db_id: str,
|
||||
chunk_size_in_tokens: int = 512,
|
||||
) -> None:
|
||||
"""Index documents so they can be used by the RAG system.
|
||||
|
||||
:param documents: List of documents to index in the RAG system
|
||||
:param vector_db_id: ID of the vector database to store the document embeddings
|
||||
:param chunk_size_in_tokens: (Optional) Size in tokens for document chunking during indexing
|
||||
"""
|
||||
...
|
||||
|
||||
@webmethod(route="/tool-runtime/rag-tool/query", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def query(
|
||||
self,
|
||||
content: InterleavedContent,
|
||||
vector_db_ids: list[str],
|
||||
query_config: RAGQueryConfig | None = None,
|
||||
) -> RAGQueryResult:
|
||||
"""Query the RAG system for context; typically invoked by the agent.
|
||||
|
||||
:param content: The query content to search for in the indexed documents
|
||||
:param vector_db_ids: List of vector database IDs to search within
|
||||
:param query_config: (Optional) Configuration parameters for the query operation
|
||||
:returns: RAGQueryResult containing the retrieved content and metadata
|
||||
"""
|
||||
...
|
|
@ -4,7 +4,6 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any, Literal, Protocol
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
@ -16,8 +15,6 @@ from llama_stack.apis.version import LLAMA_STACK_API_V1
|
|||
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
||||
from llama_stack.schema_utils import json_schema_type, webmethod
|
||||
|
||||
from .rag_tool import RAGToolRuntime
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ToolDef(BaseModel):
|
||||
|
@ -181,22 +178,11 @@ class ToolGroups(Protocol):
|
|||
...
|
||||
|
||||
|
||||
class SpecialToolGroup(Enum):
|
||||
"""Special tool groups with predefined functionality.
|
||||
|
||||
:cvar rag_tool: Retrieval-Augmented Generation tool group for document search and retrieval
|
||||
"""
|
||||
|
||||
rag_tool = "rag_tool"
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
@trace_protocol
|
||||
class ToolRuntime(Protocol):
|
||||
tool_store: ToolStore | None = None
|
||||
|
||||
rag_tool: RAGToolRuntime | None = None
|
||||
|
||||
# TODO: This needs to be renamed once OPEN API generator name conflict issue is fixed.
|
||||
@webmethod(route="/tool-runtime/list-tools", method="GET", level=LLAMA_STACK_API_V1)
|
||||
async def list_runtime_tools(
|
||||
|
|
|
@ -15,7 +15,7 @@ from fastapi import Body
|
|||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.apis.inference import InterleavedContent
|
||||
from llama_stack.apis.vector_dbs import VectorDB
|
||||
from llama_stack.apis.vector_stores import VectorStore
|
||||
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
||||
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
||||
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
|
||||
|
@ -140,6 +140,7 @@ class VectorStoreFileCounts(BaseModel):
|
|||
total: int
|
||||
|
||||
|
||||
# TODO: rename this as OpenAIVectorStore
|
||||
@json_schema_type
|
||||
class VectorStoreObject(BaseModel):
|
||||
"""OpenAI Vector Store object.
|
||||
|
@ -517,17 +518,18 @@ class OpenAICreateVectorStoreFileBatchRequestWithExtraBody(BaseModel, extra="all
|
|||
chunking_strategy: VectorStoreChunkingStrategy | None = None
|
||||
|
||||
|
||||
class VectorDBStore(Protocol):
|
||||
def get_vector_db(self, vector_db_id: str) -> VectorDB | None: ...
|
||||
class VectorStoreTable(Protocol):
|
||||
def get_vector_store(self, vector_store_id: str) -> VectorStore | None: ...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
@trace_protocol
|
||||
class VectorIO(Protocol):
|
||||
vector_db_store: VectorDBStore | None = None
|
||||
vector_store_table: VectorStoreTable | None = None
|
||||
|
||||
# this will just block now until chunks are inserted, but it should
|
||||
# probably return a Job instance which can be polled for completion
|
||||
# TODO: rename vector_db_id to vector_store_id once Stainless is working
|
||||
@webmethod(route="/vector-io/insert", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def insert_chunks(
|
||||
self,
|
||||
|
@ -546,6 +548,7 @@ class VectorIO(Protocol):
|
|||
"""
|
||||
...
|
||||
|
||||
# TODO: rename vector_db_id to vector_store_id once Stainless is working
|
||||
@webmethod(route="/vector-io/query", method="POST", level=LLAMA_STACK_API_V1)
|
||||
async def query_chunks(
|
||||
self,
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .vector_dbs import *
|
||||
from .vector_stores import *
|
|
@ -9,53 +9,43 @@ from typing import Literal
|
|||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.apis.resource import Resource, ResourceType
|
||||
from llama_stack.schema_utils import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class VectorDB(Resource):
|
||||
# Internal resource type for storing the vector store routing and other information
|
||||
class VectorStore(Resource):
|
||||
"""Vector database resource for storing and querying vector embeddings.
|
||||
|
||||
:param type: Type of resource, always 'vector_db' for vector databases
|
||||
:param type: Type of resource, always 'vector_store' for vector stores
|
||||
:param embedding_model: Name of the embedding model to use for vector generation
|
||||
:param embedding_dimension: Dimension of the embedding vectors
|
||||
"""
|
||||
|
||||
type: Literal[ResourceType.vector_db] = ResourceType.vector_db
|
||||
type: Literal[ResourceType.vector_store] = ResourceType.vector_store
|
||||
|
||||
embedding_model: str
|
||||
embedding_dimension: int
|
||||
vector_db_name: str | None = None
|
||||
vector_store_name: str | None = None
|
||||
|
||||
@property
|
||||
def vector_db_id(self) -> str:
|
||||
def vector_store_id(self) -> str:
|
||||
return self.identifier
|
||||
|
||||
@property
|
||||
def provider_vector_db_id(self) -> str | None:
|
||||
def provider_vector_store_id(self) -> str | None:
|
||||
return self.provider_resource_id
|
||||
|
||||
|
||||
class VectorDBInput(BaseModel):
|
||||
class VectorStoreInput(BaseModel):
|
||||
"""Input parameters for creating or configuring a vector database.
|
||||
|
||||
:param vector_db_id: Unique identifier for the vector database
|
||||
:param vector_store_id: Unique identifier for the vector store
|
||||
:param embedding_model: Name of the embedding model to use for vector generation
|
||||
:param embedding_dimension: Dimension of the embedding vectors
|
||||
:param provider_vector_db_id: (Optional) Provider-specific identifier for the vector database
|
||||
:param provider_vector_store_id: (Optional) Provider-specific identifier for the vector store
|
||||
"""
|
||||
|
||||
vector_db_id: str
|
||||
vector_store_id: str
|
||||
embedding_model: str
|
||||
embedding_dimension: int
|
||||
provider_id: str | None = None
|
||||
provider_vector_db_id: str | None = None
|
||||
|
||||
|
||||
class ListVectorDBsResponse(BaseModel):
|
||||
"""Response from listing vector databases.
|
||||
|
||||
:param data: List of vector databases
|
||||
"""
|
||||
|
||||
data: list[VectorDB]
|
||||
provider_vector_store_id: str | None = None
|
|
@ -1,490 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import argparse
|
||||
import importlib.resources
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import textwrap
|
||||
from functools import lru_cache
|
||||
from importlib.abc import Traversable
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from prompt_toolkit import prompt
|
||||
from prompt_toolkit.completion import WordCompleter
|
||||
from prompt_toolkit.validation import Validator
|
||||
from termcolor import colored, cprint
|
||||
|
||||
from llama_stack.cli.stack.utils import ImageType
|
||||
from llama_stack.cli.table import print_table
|
||||
from llama_stack.core.build import (
|
||||
SERVER_DEPENDENCIES,
|
||||
build_image,
|
||||
get_provider_dependencies,
|
||||
)
|
||||
from llama_stack.core.configure import parse_and_maybe_upgrade_config
|
||||
from llama_stack.core.datatypes import (
|
||||
BuildConfig,
|
||||
BuildProvider,
|
||||
DistributionSpec,
|
||||
Provider,
|
||||
StackRunConfig,
|
||||
)
|
||||
from llama_stack.core.distribution import get_provider_registry
|
||||
from llama_stack.core.external import load_external_apis
|
||||
from llama_stack.core.resolver import InvalidProviderError
|
||||
from llama_stack.core.stack import replace_env_vars
|
||||
from llama_stack.core.utils.config_dirs import DISTRIBS_BASE_DIR, EXTERNAL_PROVIDERS_DIR
|
||||
from llama_stack.core.utils.dynamic import instantiate_class_type
|
||||
from llama_stack.core.utils.exec import formulate_run_args, run_command
|
||||
from llama_stack.core.utils.image_types import LlamaStackImageType
|
||||
from llama_stack.providers.datatypes import Api
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
|
||||
|
||||
DISTRIBS_PATH = Path(__file__).parent.parent.parent / "distributions"
|
||||
|
||||
|
||||
@lru_cache
|
||||
def available_distros_specs() -> dict[str, BuildConfig]:
|
||||
import yaml
|
||||
|
||||
distro_specs = {}
|
||||
for p in DISTRIBS_PATH.rglob("*build.yaml"):
|
||||
distro_name = p.parent.name
|
||||
with open(p) as f:
|
||||
build_config = BuildConfig(**yaml.safe_load(f))
|
||||
distro_specs[distro_name] = build_config
|
||||
return distro_specs
|
||||
|
||||
|
||||
def run_stack_build_command(args: argparse.Namespace) -> None:
|
||||
if args.list_distros:
|
||||
return _run_distro_list_cmd()
|
||||
|
||||
if args.image_type == ImageType.VENV.value:
|
||||
current_venv = os.environ.get("VIRTUAL_ENV")
|
||||
image_name = args.image_name or current_venv
|
||||
else:
|
||||
image_name = args.image_name
|
||||
|
||||
if args.template:
|
||||
cprint(
|
||||
"The --template argument is deprecated. Please use --distro instead.",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
distro_name = args.template
|
||||
else:
|
||||
distro_name = args.distribution
|
||||
|
||||
if distro_name:
|
||||
available_distros = available_distros_specs()
|
||||
if distro_name not in available_distros:
|
||||
cprint(
|
||||
f"Could not find distribution {distro_name}. Please run `llama stack build --list-distros` to check out the available distributions",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
build_config = available_distros[distro_name]
|
||||
if args.image_type:
|
||||
build_config.image_type = args.image_type
|
||||
else:
|
||||
cprint(
|
||||
f"Please specify a image-type ({' | '.join(e.value for e in ImageType)}) for {distro_name}",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
elif args.providers:
|
||||
provider_list: dict[str, list[BuildProvider]] = dict()
|
||||
for api_provider in args.providers.split(","):
|
||||
if "=" not in api_provider:
|
||||
cprint(
|
||||
"Could not parse `--providers`. Please ensure the list is in the format api1=provider1,api2=provider2",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
api, provider_type = api_provider.split("=")
|
||||
providers_for_api = get_provider_registry().get(Api(api), None)
|
||||
if providers_for_api is None:
|
||||
cprint(
|
||||
f"{api} is not a valid API.",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
if provider_type in providers_for_api:
|
||||
provider = BuildProvider(
|
||||
provider_type=provider_type,
|
||||
module=None,
|
||||
)
|
||||
provider_list.setdefault(api, []).append(provider)
|
||||
else:
|
||||
cprint(
|
||||
f"{provider} is not a valid provider for the {api} API.",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
distribution_spec = DistributionSpec(
|
||||
providers=provider_list,
|
||||
description=",".join(args.providers),
|
||||
)
|
||||
if not args.image_type:
|
||||
cprint(
|
||||
f"Please specify a image-type (container | venv) for {args.template}",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
build_config = BuildConfig(image_type=args.image_type, distribution_spec=distribution_spec)
|
||||
elif not args.config and not distro_name:
|
||||
name = prompt(
|
||||
"> Enter a name for your Llama Stack (e.g. my-local-stack): ",
|
||||
validator=Validator.from_callable(
|
||||
lambda x: len(x) > 0,
|
||||
error_message="Name cannot be empty, please enter a name",
|
||||
),
|
||||
)
|
||||
|
||||
image_type = prompt(
|
||||
"> Enter the image type you want your Llama Stack to be built as (use <TAB> to see options): ",
|
||||
completer=WordCompleter([e.value for e in ImageType]),
|
||||
complete_while_typing=True,
|
||||
validator=Validator.from_callable(
|
||||
lambda x: x in [e.value for e in ImageType],
|
||||
error_message="Invalid image type. Use <TAB> to see options",
|
||||
),
|
||||
)
|
||||
|
||||
image_name = f"llamastack-{name}"
|
||||
|
||||
cprint(
|
||||
textwrap.dedent(
|
||||
"""
|
||||
Llama Stack is composed of several APIs working together. Let's select
|
||||
the provider types (implementations) you want to use for these APIs.
|
||||
""",
|
||||
),
|
||||
color="green",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
cprint("Tip: use <TAB> to see options for the providers.\n", color="green", file=sys.stderr)
|
||||
|
||||
providers: dict[str, list[BuildProvider]] = dict()
|
||||
for api, providers_for_api in get_provider_registry().items():
|
||||
available_providers = [x for x in providers_for_api.keys() if x not in ("remote", "remote::sample")]
|
||||
if not available_providers:
|
||||
continue
|
||||
api_provider = prompt(
|
||||
f"> Enter provider for API {api.value}: ",
|
||||
completer=WordCompleter(available_providers),
|
||||
complete_while_typing=True,
|
||||
validator=Validator.from_callable(
|
||||
lambda x: x in available_providers, # noqa: B023 - see https://github.com/astral-sh/ruff/issues/7847
|
||||
error_message="Invalid provider, use <TAB> to see options",
|
||||
),
|
||||
)
|
||||
|
||||
string_providers = api_provider.split(" ")
|
||||
|
||||
for provider in string_providers:
|
||||
providers.setdefault(api.value, []).append(BuildProvider(provider_type=provider))
|
||||
|
||||
description = prompt(
|
||||
"\n > (Optional) Enter a short description for your Llama Stack: ",
|
||||
default="",
|
||||
)
|
||||
|
||||
distribution_spec = DistributionSpec(
|
||||
providers=providers,
|
||||
description=description,
|
||||
)
|
||||
|
||||
build_config = BuildConfig(image_type=image_type, distribution_spec=distribution_spec)
|
||||
else:
|
||||
with open(args.config) as f:
|
||||
try:
|
||||
contents = yaml.safe_load(f)
|
||||
contents = replace_env_vars(contents)
|
||||
build_config = BuildConfig(**contents)
|
||||
if args.image_type:
|
||||
build_config.image_type = args.image_type
|
||||
except Exception as e:
|
||||
cprint(
|
||||
f"Could not parse config file {args.config}: {e}",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if args.print_deps_only:
|
||||
print(f"# Dependencies for {distro_name or args.config or image_name}")
|
||||
normal_deps, special_deps, external_provider_dependencies = get_provider_dependencies(build_config)
|
||||
normal_deps += SERVER_DEPENDENCIES
|
||||
print(f"uv pip install {' '.join(normal_deps)}")
|
||||
for special_dep in special_deps:
|
||||
print(f"uv pip install {special_dep}")
|
||||
for external_dep in external_provider_dependencies:
|
||||
print(f"uv pip install {external_dep}")
|
||||
return
|
||||
|
||||
try:
|
||||
run_config = _run_stack_build_command_from_build_config(
|
||||
build_config,
|
||||
image_name=image_name,
|
||||
config_path=args.config,
|
||||
distro_name=distro_name,
|
||||
)
|
||||
|
||||
except (Exception, RuntimeError) as exc:
|
||||
import traceback
|
||||
|
||||
cprint(
|
||||
f"Error building stack: {exc}",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
cprint("Stack trace:", color="red", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
if run_config is None:
|
||||
cprint(
|
||||
"Run config path is empty",
|
||||
color="red",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if args.run:
|
||||
config_dict = yaml.safe_load(run_config.read_text())
|
||||
config = parse_and_maybe_upgrade_config(config_dict)
|
||||
if config.external_providers_dir and not config.external_providers_dir.exists():
|
||||
config.external_providers_dir.mkdir(exist_ok=True)
|
||||
run_args = formulate_run_args(args.image_type, image_name or config.image_name)
|
||||
run_args.extend([str(os.getenv("LLAMA_STACK_PORT", 8321)), "--config", str(run_config)])
|
||||
run_command(run_args)
|
||||
|
||||
|
||||
def _generate_run_config(
|
||||
build_config: BuildConfig,
|
||||
build_dir: Path,
|
||||
image_name: str,
|
||||
) -> Path:
|
||||
"""
|
||||
Generate a run.yaml template file for user to edit from a build.yaml file
|
||||
"""
|
||||
apis = list(build_config.distribution_spec.providers.keys())
|
||||
run_config = StackRunConfig(
|
||||
container_image=(image_name if build_config.image_type == LlamaStackImageType.CONTAINER.value else None),
|
||||
image_name=image_name,
|
||||
apis=apis,
|
||||
providers={},
|
||||
external_providers_dir=build_config.external_providers_dir
|
||||
if build_config.external_providers_dir
|
||||
else EXTERNAL_PROVIDERS_DIR,
|
||||
)
|
||||
if not run_config.inference_store:
|
||||
run_config.inference_store = SqliteSqlStoreConfig(
|
||||
**SqliteSqlStoreConfig.sample_run_config(
|
||||
__distro_dir__=(DISTRIBS_BASE_DIR / image_name).as_posix(), db_name="inference_store.db"
|
||||
)
|
||||
)
|
||||
# build providers dict
|
||||
provider_registry = get_provider_registry(build_config)
|
||||
for api in apis:
|
||||
run_config.providers[api] = []
|
||||
providers = build_config.distribution_spec.providers[api]
|
||||
|
||||
for provider in providers:
|
||||
pid = provider.provider_type.split("::")[-1]
|
||||
|
||||
p = provider_registry[Api(api)][provider.provider_type]
|
||||
if p.deprecation_error:
|
||||
raise InvalidProviderError(p.deprecation_error)
|
||||
|
||||
try:
|
||||
config_type = instantiate_class_type(provider_registry[Api(api)][provider.provider_type].config_class)
|
||||
except (ModuleNotFoundError, ValueError) as exc:
|
||||
# HACK ALERT:
|
||||
# This code executes after building is done, the import cannot work since the
|
||||
# package is either available in the venv or container - not available on the host.
|
||||
# TODO: use a "is_external" flag in ProviderSpec to check if the provider is
|
||||
# external
|
||||
cprint(
|
||||
f"Failed to import provider {provider.provider_type} for API {api} - assuming it's external, skipping: {exc}",
|
||||
color="yellow",
|
||||
file=sys.stderr,
|
||||
)
|
||||
# Set config_type to None to avoid UnboundLocalError
|
||||
config_type = None
|
||||
|
||||
if config_type is not None and hasattr(config_type, "sample_run_config"):
|
||||
config = config_type.sample_run_config(__distro_dir__=f"~/.llama/distributions/{image_name}")
|
||||
else:
|
||||
config = {}
|
||||
|
||||
p_spec = Provider(
|
||||
provider_id=pid,
|
||||
provider_type=provider.provider_type,
|
||||
config=config,
|
||||
module=provider.module,
|
||||
)
|
||||
run_config.providers[api].append(p_spec)
|
||||
|
||||
run_config_file = build_dir / f"{image_name}-run.yaml"
|
||||
|
||||
with open(run_config_file, "w") as f:
|
||||
to_write = json.loads(run_config.model_dump_json())
|
||||
f.write(yaml.dump(to_write, sort_keys=False))
|
||||
|
||||
# Only print this message for non-container builds since it will be displayed before the
|
||||
# container is built
|
||||
# For non-container builds, the run.yaml is generated at the very end of the build process so it
|
||||
# makes sense to display this message
|
||||
if build_config.image_type != LlamaStackImageType.CONTAINER.value:
|
||||
cprint(f"You can now run your stack with `llama stack run {run_config_file}`", color="green", file=sys.stderr)
|
||||
return run_config_file
|
||||
|
||||
|
||||
def _run_stack_build_command_from_build_config(
|
||||
build_config: BuildConfig,
|
||||
image_name: str | None = None,
|
||||
distro_name: str | None = None,
|
||||
config_path: str | None = None,
|
||||
) -> Path | Traversable:
|
||||
image_name = image_name or build_config.image_name
|
||||
if build_config.image_type == LlamaStackImageType.CONTAINER.value:
|
||||
if distro_name:
|
||||
image_name = f"distribution-{distro_name}"
|
||||
else:
|
||||
if not image_name:
|
||||
raise ValueError("Please specify an image name when building a container image without a template")
|
||||
else:
|
||||
if not image_name and os.environ.get("UV_SYSTEM_PYTHON"):
|
||||
image_name = "__system__"
|
||||
if not image_name:
|
||||
raise ValueError("Please specify an image name when building a venv image")
|
||||
|
||||
# At this point, image_name should be guaranteed to be a string
|
||||
if image_name is None:
|
||||
raise ValueError("image_name should not be None after validation")
|
||||
|
||||
if distro_name:
|
||||
build_dir = DISTRIBS_BASE_DIR / distro_name
|
||||
build_file_path = build_dir / f"{distro_name}-build.yaml"
|
||||
else:
|
||||
if image_name is None:
|
||||
raise ValueError("image_name cannot be None")
|
||||
build_dir = DISTRIBS_BASE_DIR / image_name
|
||||
build_file_path = build_dir / f"{image_name}-build.yaml"
|
||||
|
||||
os.makedirs(build_dir, exist_ok=True)
|
||||
run_config_file = None
|
||||
# Generate the run.yaml so it can be included in the container image with the proper entrypoint
|
||||
# Only do this if we're building a container image and we're not using a template
|
||||
if build_config.image_type == LlamaStackImageType.CONTAINER.value and not distro_name and config_path:
|
||||
cprint("Generating run.yaml file", color="yellow", file=sys.stderr)
|
||||
run_config_file = _generate_run_config(build_config, build_dir, image_name)
|
||||
|
||||
with open(build_file_path, "w") as f:
|
||||
to_write = json.loads(build_config.model_dump_json(exclude_none=True))
|
||||
f.write(yaml.dump(to_write, sort_keys=False))
|
||||
|
||||
# We first install the external APIs so that the build process can use them and discover the
|
||||
# providers dependencies
|
||||
if build_config.external_apis_dir:
|
||||
cprint("Installing external APIs", color="yellow", file=sys.stderr)
|
||||
external_apis = load_external_apis(build_config)
|
||||
if external_apis:
|
||||
# install the external APIs
|
||||
packages = []
|
||||
for _, api_spec in external_apis.items():
|
||||
if api_spec.pip_packages:
|
||||
packages.extend(api_spec.pip_packages)
|
||||
cprint(
|
||||
f"Installing {api_spec.name} with pip packages {api_spec.pip_packages}",
|
||||
color="yellow",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return_code = run_command(["uv", "pip", "install", *packages])
|
||||
if return_code != 0:
|
||||
packages_str = ", ".join(packages)
|
||||
raise RuntimeError(
|
||||
f"Failed to install external APIs packages: {packages_str} (return code: {return_code})"
|
||||
)
|
||||
|
||||
return_code = build_image(
|
||||
build_config,
|
||||
image_name,
|
||||
distro_or_config=distro_name or config_path or str(build_file_path),
|
||||
run_config=run_config_file.as_posix() if run_config_file else None,
|
||||
)
|
||||
if return_code != 0:
|
||||
raise RuntimeError(f"Failed to build image {image_name}")
|
||||
|
||||
if distro_name:
|
||||
# copy run.yaml from distribution to build_dir instead of generating it again
|
||||
distro_path = importlib.resources.files("llama_stack") / f"distributions/{distro_name}/run.yaml"
|
||||
run_config_file = build_dir / f"{distro_name}-run.yaml"
|
||||
|
||||
with importlib.resources.as_file(distro_path) as path:
|
||||
shutil.copy(path, run_config_file)
|
||||
|
||||
cprint("Build Successful!", color="green", file=sys.stderr)
|
||||
cprint(f"You can find the newly-built distribution here: {run_config_file}", color="blue", file=sys.stderr)
|
||||
if build_config.image_type == LlamaStackImageType.VENV:
|
||||
cprint(
|
||||
"You can run the new Llama Stack distro (after activating "
|
||||
+ colored(image_name, "cyan")
|
||||
+ ") via: "
|
||||
+ colored(f"llama stack run {run_config_file}", "blue"),
|
||||
color="green",
|
||||
file=sys.stderr,
|
||||
)
|
||||
elif build_config.image_type == LlamaStackImageType.CONTAINER:
|
||||
cprint(
|
||||
"You can run the container with: "
|
||||
+ colored(
|
||||
f"docker run -p 8321:8321 -v ~/.llama:/root/.llama localhost/{image_name} --port 8321", "blue"
|
||||
),
|
||||
color="green",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return distro_path
|
||||
else:
|
||||
return _generate_run_config(build_config, build_dir, image_name)
|
||||
|
||||
|
||||
def _run_distro_list_cmd() -> None:
|
||||
headers = [
|
||||
"Distribution Name",
|
||||
# "Providers",
|
||||
"Description",
|
||||
]
|
||||
|
||||
rows = []
|
||||
for distro_name, spec in available_distros_specs().items():
|
||||
rows.append(
|
||||
[
|
||||
distro_name,
|
||||
# json.dumps(spec.distribution_spec.providers, indent=2),
|
||||
spec.distribution_spec.description,
|
||||
]
|
||||
)
|
||||
print_table(
|
||||
rows,
|
||||
headers,
|
||||
separate_rows=True,
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue