mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
Merge branch 'main' into langchain_llamastack
This commit is contained in:
commit
bd3cef8cd1
3026 changed files with 259663 additions and 736414 deletions
|
|
@ -5,7 +5,7 @@ omit =
|
|||
*/llama_stack/templates/*
|
||||
.venv/*
|
||||
*/llama_stack/cli/scripts/*
|
||||
*/llama_stack/ui/*
|
||||
*/llama_stack_ui/*
|
||||
*/llama_stack/distribution/ui/*
|
||||
*/llama_stack/strong_typing/*
|
||||
*/llama_stack/env.py
|
||||
|
|
|
|||
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
|
@ -2,4 +2,4 @@
|
|||
|
||||
# These owners will be the default owners for everything in
|
||||
# the repo. Unless a later match takes precedence,
|
||||
* @ashwinb @yanxi0830 @hardikjshah @raghotham @ehhuang @terrytangyuan @leseb @bbrowning @reluctantfuturist @mattf @slekkala1 @franciscojavierarceo
|
||||
* @ashwinb @raghotham @ehhuang @leseb @bbrowning @mattf @franciscojavierarceo @cdoern
|
||||
|
|
|
|||
60
.github/actions/install-llama-stack-client/action.yml
vendored
Normal file
60
.github/actions/install-llama-stack-client/action.yml
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
name: Install llama-stack-client
|
||||
description: Install llama-stack-client based on branch context and client-version input
|
||||
|
||||
inputs:
|
||||
client-version:
|
||||
description: 'Client version to install on non-release branches (latest or published). Ignored on release branches.'
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
outputs:
|
||||
uv-extra-index-url:
|
||||
description: 'UV_EXTRA_INDEX_URL to use (set for release branches)'
|
||||
value: ${{ steps.configure.outputs.uv-extra-index-url }}
|
||||
install-after-sync:
|
||||
description: 'Whether to install client after uv sync'
|
||||
value: ${{ steps.configure.outputs.install-after-sync }}
|
||||
install-source:
|
||||
description: 'Where to install client from after sync'
|
||||
value: ${{ steps.configure.outputs.install-source }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Configure client installation
|
||||
id: configure
|
||||
shell: bash
|
||||
run: |
|
||||
# Determine the branch we're working with
|
||||
BRANCH="${{ github.base_ref || github.ref }}"
|
||||
BRANCH="${BRANCH#refs/heads/}"
|
||||
|
||||
echo "Working with branch: $BRANCH"
|
||||
|
||||
# On release branches: use test.pypi for uv sync, then install from git
|
||||
# On non-release branches: install based on client-version after sync
|
||||
if [[ "$BRANCH" =~ ^release-[0-9]+\.[0-9]+\.x$ ]]; then
|
||||
echo "Detected release branch: $BRANCH"
|
||||
|
||||
# Check if matching branch exists in client repo
|
||||
if ! git ls-remote --exit-code --heads https://github.com/llamastack/llama-stack-client-python.git "$BRANCH" > /dev/null 2>&1; then
|
||||
echo "::error::Branch $BRANCH not found in llama-stack-client-python repository"
|
||||
echo "::error::Please create the matching release branch in llama-stack-client-python before testing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Configure to use test.pypi as extra index (PyPI is primary)
|
||||
echo "uv-extra-index-url=https://test.pypi.org/simple/" >> $GITHUB_OUTPUT
|
||||
echo "install-after-sync=true" >> $GITHUB_OUTPUT
|
||||
echo "install-source=git+https://github.com/llamastack/llama-stack-client-python.git@$BRANCH" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ inputs.client-version }}" = "latest" ]; then
|
||||
# Install from main git after sync
|
||||
echo "install-after-sync=true" >> $GITHUB_OUTPUT
|
||||
echo "install-source=git+https://github.com/llamastack/llama-stack-client-python.git@main" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ inputs.client-version }}" = "published" ]; then
|
||||
# Use published version from PyPI (installed by sync)
|
||||
echo "install-after-sync=false" >> $GITHUB_OUTPUT
|
||||
elif [ -n "${{ inputs.client-version }}" ]; then
|
||||
echo "::error::Invalid client-version: ${{ inputs.client-version }}"
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -72,7 +72,8 @@ runs:
|
|||
echo "New recordings detected, committing and pushing"
|
||||
git add tests/integration/
|
||||
|
||||
git commit -m "Recordings update from CI (suite: ${{ inputs.suite }})"
|
||||
git commit -m "Recordings update from CI (setup: ${{ inputs.setup }}, suite: ${{ inputs.suite }})"
|
||||
|
||||
git fetch origin ${{ github.ref_name }}
|
||||
git rebase origin/${{ github.ref_name }}
|
||||
echo "Rebased successfully"
|
||||
|
|
@ -88,13 +89,15 @@ runs:
|
|||
run: |
|
||||
# Ollama logs (if ollama container exists)
|
||||
sudo docker logs ollama > ollama-${{ inputs.inference-mode }}.log 2>&1 || true
|
||||
# vllm logs (if vllm container exists)
|
||||
sudo docker logs vllm > vllm-${{ inputs.inference-mode }}.log 2>&1 || true
|
||||
# Note: distro container logs are now dumped in integration-tests.sh before container is removed
|
||||
|
||||
- name: Upload logs
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: logs-${{ github.run_id }}-${{ github.run_attempt || '' }}-${{ strategy.job-index }}
|
||||
name: logs-${{ github.run_id }}-${{ github.run_attempt || '1' }}-${{ strategy.job-index || github.job }}-${{ github.action }}
|
||||
path: |
|
||||
*.log
|
||||
retention-days: 1
|
||||
|
|
|
|||
30
.github/actions/setup-runner/action.yml
vendored
30
.github/actions/setup-runner/action.yml
vendored
|
|
@ -18,25 +18,35 @@ runs:
|
|||
python-version: ${{ inputs.python-version }}
|
||||
version: 0.7.6
|
||||
|
||||
- name: Configure client installation
|
||||
id: client-config
|
||||
uses: ./.github/actions/install-llama-stack-client
|
||||
with:
|
||||
client-version: ${{ inputs.client-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
env:
|
||||
UV_EXTRA_INDEX_URL: ${{ steps.client-config.outputs.uv-extra-index-url }}
|
||||
run: |
|
||||
# Export UV env vars for current step and persist to GITHUB_ENV for subsequent steps
|
||||
if [ -n "$UV_EXTRA_INDEX_URL" ]; then
|
||||
export UV_INDEX_STRATEGY=unsafe-best-match
|
||||
echo "UV_EXTRA_INDEX_URL=$UV_EXTRA_INDEX_URL" >> $GITHUB_ENV
|
||||
echo "UV_INDEX_STRATEGY=$UV_INDEX_STRATEGY" >> $GITHUB_ENV
|
||||
echo "Exported UV environment variables for current and subsequent steps"
|
||||
fi
|
||||
|
||||
echo "Updating project dependencies via uv sync"
|
||||
uv sync --all-groups
|
||||
|
||||
echo "Installing ad-hoc dependencies"
|
||||
uv pip install faiss-cpu
|
||||
|
||||
# Install llama-stack-client-python based on the client-version input
|
||||
if [ "${{ inputs.client-version }}" = "latest" ]; then
|
||||
echo "Installing latest llama-stack-client-python from main branch"
|
||||
uv pip install git+https://github.com/llamastack/llama-stack-client-python.git@main
|
||||
elif [ "${{ inputs.client-version }}" = "published" ]; then
|
||||
echo "Installing published llama-stack-client-python from PyPI"
|
||||
uv pip install llama-stack-client
|
||||
else
|
||||
echo "Invalid client-version: ${{ inputs.client-version }}"
|
||||
exit 1
|
||||
# Install specific client version after sync if needed
|
||||
if [ "${{ steps.client-config.outputs.install-after-sync }}" = "true" ]; then
|
||||
echo "Installing llama-stack-client from: ${{ steps.client-config.outputs.install-source }}"
|
||||
uv pip install ${{ steps.client-config.outputs.install-source }}
|
||||
fi
|
||||
|
||||
echo "Installed llama packages"
|
||||
|
|
|
|||
|
|
@ -39,21 +39,36 @@ runs:
|
|||
if: ${{ inputs.setup == 'vllm' && inputs.inference-mode == 'record' }}
|
||||
uses: ./.github/actions/setup-vllm
|
||||
|
||||
- name: Start Postgres service
|
||||
if: ${{ contains(inputs.setup, 'postgres') }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo docker rm -f postgres-ci || true
|
||||
sudo docker run -d --name postgres-ci \
|
||||
-e POSTGRES_USER=llamastack \
|
||||
-e POSTGRES_PASSWORD=llamastack \
|
||||
-e POSTGRES_DB=llamastack \
|
||||
-p 5432:5432 \
|
||||
postgres:16
|
||||
|
||||
echo "Waiting for Postgres to become ready..."
|
||||
for i in {1..30}; do
|
||||
if sudo docker exec postgres-ci pg_isready -U llamastack -d llamastack >/dev/null 2>&1; then
|
||||
echo "Postgres is ready"
|
||||
break
|
||||
fi
|
||||
if [ "$i" -eq 30 ]; then
|
||||
echo "Postgres failed to start in time"
|
||||
sudo docker logs postgres-ci || true
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
|
||||
- name: Build Llama Stack
|
||||
shell: bash
|
||||
run: |
|
||||
# Install llama-stack-client-python based on the client-version input
|
||||
if [ "${{ inputs.client-version }}" = "latest" ]; then
|
||||
echo "Installing latest llama-stack-client-python from main branch"
|
||||
export LLAMA_STACK_CLIENT_DIR=git+https://github.com/llamastack/llama-stack-client-python.git@main
|
||||
elif [ "${{ inputs.client-version }}" = "published" ]; then
|
||||
echo "Installing published llama-stack-client-python from PyPI"
|
||||
unset LLAMA_STACK_CLIENT_DIR
|
||||
else
|
||||
echo "Invalid client-version: ${{ inputs.client-version }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Client is already installed by setup-runner (handles both main and release branches)
|
||||
echo "Building Llama Stack"
|
||||
|
||||
LLAMA_STACK_DIR=. \
|
||||
|
|
|
|||
35
.github/actions/setup-typescript-client/action.yml
vendored
Normal file
35
.github/actions/setup-typescript-client/action.yml
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
name: Setup TypeScript client
|
||||
description: Conditionally checkout and link llama-stack-client-typescript based on client-version
|
||||
inputs:
|
||||
client-version:
|
||||
description: 'Client version (latest or published)'
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
ts-client-path:
|
||||
description: 'Path or version to use for TypeScript client'
|
||||
value: ${{ steps.set-path.outputs.ts-client-path }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout TypeScript client (latest)
|
||||
if: ${{ inputs.client-version == 'latest' }}
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
repository: llamastack/llama-stack-client-typescript
|
||||
ref: main
|
||||
path: .ts-client-checkout
|
||||
|
||||
- name: Set TS_CLIENT_PATH
|
||||
id: set-path
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "${{ inputs.client-version }}" = "latest" ]; then
|
||||
echo "ts-client-path=${{ github.workspace }}/.ts-client-checkout" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ inputs.client-version }}" = "published" ]; then
|
||||
echo "ts-client-path=^0.3.2" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "::error::Invalid client-version: ${{ inputs.client-version }}"
|
||||
exit 1
|
||||
fi
|
||||
9
.github/actions/setup-vllm/action.yml
vendored
9
.github/actions/setup-vllm/action.yml
vendored
|
|
@ -11,13 +11,14 @@ runs:
|
|||
--name vllm \
|
||||
-p 8000:8000 \
|
||||
--privileged=true \
|
||||
quay.io/higginsd/vllm-cpu:65393ee064 \
|
||||
quay.io/higginsd/vllm-cpu:65393ee064-qwen3 \
|
||||
--host 0.0.0.0 \
|
||||
--port 8000 \
|
||||
--enable-auto-tool-choice \
|
||||
--tool-call-parser llama3_json \
|
||||
--model /root/.cache/Llama-3.2-1B-Instruct \
|
||||
--served-model-name meta-llama/Llama-3.2-1B-Instruct
|
||||
--tool-call-parser hermes \
|
||||
--model /root/.cache/Qwen3-0.6B \
|
||||
--served-model-name Qwen/Qwen3-0.6B \
|
||||
--max-model-len 8192
|
||||
|
||||
# Wait for vllm to be ready
|
||||
echo "Waiting for vllm to be ready..."
|
||||
|
|
|
|||
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
|
|
@ -22,7 +22,7 @@ updates:
|
|||
prefix: chore(python-deps)
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/llama_stack/ui"
|
||||
directory: "/llama_stack_ui"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "saturday"
|
||||
|
|
|
|||
23
.github/mergify.yml
vendored
Normal file
23
.github/mergify.yml
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
pull_request_rules:
|
||||
- name: ping author on conflicts and add 'needs-rebase' label
|
||||
conditions:
|
||||
- conflict
|
||||
- -closed
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
- needs-rebase
|
||||
comment:
|
||||
message: >
|
||||
This pull request has merge conflicts that must be resolved before it
|
||||
can be merged. @{{author}} please rebase it.
|
||||
https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork
|
||||
|
||||
- name: remove 'needs-rebase' label when conflict is resolved
|
||||
conditions:
|
||||
- -conflict
|
||||
- -closed
|
||||
actions:
|
||||
label:
|
||||
remove:
|
||||
- needs-rebase
|
||||
4
.github/workflows/README.md
vendored
4
.github/workflows/README.md
vendored
|
|
@ -4,7 +4,7 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl
|
|||
|
||||
| Name | File | Purpose |
|
||||
| ---- | ---- | ------- |
|
||||
| Update Changelog | [changelog.yml](changelog.yml) | Creates PR for updating the CHANGELOG.md |
|
||||
| Backward Compatibility Check | [backward-compat.yml](backward-compat.yml) | Check backward compatibility for run.yaml configs |
|
||||
| API Conformance Tests | [conformance.yml](conformance.yml) | Run the API Conformance test suite on the changes. |
|
||||
| Installer CI | [install-script-ci.yml](install-script-ci.yml) | Test the installation script |
|
||||
| Integration Auth Tests | [integration-auth-tests.yml](integration-auth-tests.yml) | Run the integration test suite with Kubernetes authentication |
|
||||
|
|
@ -12,12 +12,12 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl
|
|||
| Integration Tests (Replay) | [integration-tests.yml](integration-tests.yml) | Run the integration test suites from tests/integration in replay mode |
|
||||
| Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers |
|
||||
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
|
||||
| Pre-commit Bot | [precommit-trigger.yml](precommit-trigger.yml) | Pre-commit bot for PR |
|
||||
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build |
|
||||
| Test llama stack list-deps | [providers-list-deps.yml](providers-list-deps.yml) | Test llama stack list-deps |
|
||||
| Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project |
|
||||
| Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration |
|
||||
| Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec |
|
||||
| Stainless SDK Builds | [stainless-builds.yml](stainless-builds.yml) | Build Stainless SDK from OpenAPI spec changes |
|
||||
| Close stale issues and PRs | [stale_bot.yml](stale_bot.yml) | Run the Stale Bot action |
|
||||
| Test External Providers Installed via Module | [test-external-provider-module.yml](test-external-provider-module.yml) | Test External Provider installation via Python module |
|
||||
| Test External API and Providers | [test-external.yml](test-external.yml) | Test the External API and Provider mechanisms |
|
||||
|
|
|
|||
578
.github/workflows/backward-compat.yml
vendored
Normal file
578
.github/workflows/backward-compat.yml
vendored
Normal file
|
|
@ -0,0 +1,578 @@
|
|||
name: Backward Compatibility Check
|
||||
|
||||
run-name: Check backward compatibility for run.yaml configs
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'release-[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'release-[0-9]+.[0-9]+'
|
||||
paths:
|
||||
- 'src/llama_stack/core/datatypes.py'
|
||||
- 'src/llama_stack/providers/datatypes.py'
|
||||
- 'src/llama_stack/distributions/**/run.yaml'
|
||||
- 'tests/backward_compat/**'
|
||||
- '.github/workflows/backward-compat.yml'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-main-compatibility:
|
||||
name: Check Compatibility with main
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0 # Need full history to access main branch
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync --group dev
|
||||
|
||||
- name: Extract run.yaml files from main branch
|
||||
id: extract_configs
|
||||
run: |
|
||||
# Get list of run.yaml paths from main
|
||||
git fetch origin main
|
||||
CONFIG_PATHS=$(git ls-tree -r --name-only origin/main | grep "src/llama_stack/distributions/.*/run.yaml$" || true)
|
||||
|
||||
if [ -z "$CONFIG_PATHS" ]; then
|
||||
echo "No run.yaml files found in main branch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract all configs to a temp directory
|
||||
mkdir -p /tmp/main_configs
|
||||
echo "Extracting configs from main branch:"
|
||||
|
||||
while IFS= read -r config_path; do
|
||||
if [ -z "$config_path" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Extract filename for storage
|
||||
filename=$(basename $(dirname "$config_path"))
|
||||
echo " - $filename (from $config_path)"
|
||||
|
||||
git show origin/main:"$config_path" > "/tmp/main_configs/${filename}.yaml"
|
||||
done <<< "$CONFIG_PATHS"
|
||||
|
||||
echo ""
|
||||
echo "Extracted $(ls /tmp/main_configs/*.yaml | wc -l) config files"
|
||||
|
||||
- name: Test all configs from main
|
||||
id: test_configs
|
||||
continue-on-error: true
|
||||
run: |
|
||||
# Run pytest once with all configs parameterized
|
||||
if COMPAT_TEST_CONFIGS_DIR=/tmp/main_configs uv run pytest tests/backward_compat/test_run_config.py -v; then
|
||||
echo "failed=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "failed=true" >> $GITHUB_OUTPUT
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check for breaking change acknowledgment
|
||||
id: check_ack
|
||||
if: steps.test_configs.outputs.failed == 'true'
|
||||
run: |
|
||||
echo "Breaking changes detected. Checking for acknowledgment..."
|
||||
|
||||
# Check PR title for '!:' marker (conventional commits)
|
||||
PR_TITLE="${{ github.event.pull_request.title }}"
|
||||
if [[ "$PR_TITLE" =~ ^[a-z]+\!: ]]; then
|
||||
echo "✓ Breaking change acknowledged in PR title"
|
||||
echo "acknowledged=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check commit messages for BREAKING CHANGE:
|
||||
if git log origin/main..HEAD --format=%B | grep -q "BREAKING CHANGE:"; then
|
||||
echo "✓ Breaking change acknowledged in commit message"
|
||||
echo "acknowledged=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "✗ Breaking change NOT acknowledged"
|
||||
echo "acknowledged=false" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Evaluate results
|
||||
if: always()
|
||||
run: |
|
||||
FAILED="${{ steps.test_configs.outputs.failed }}"
|
||||
ACKNOWLEDGED="${{ steps.check_ack.outputs.acknowledged }}"
|
||||
|
||||
if [[ "$FAILED" == "true" ]]; then
|
||||
if [[ "$ACKNOWLEDGED" == "true" ]]; then
|
||||
echo ""
|
||||
echo "⚠️ WARNING: Breaking changes detected but acknowledged"
|
||||
echo ""
|
||||
echo "This PR introduces backward-incompatible changes to run.yaml."
|
||||
echo "The changes have been properly acknowledged."
|
||||
echo ""
|
||||
exit 0 # Pass the check
|
||||
else
|
||||
echo ""
|
||||
echo "❌ ERROR: Breaking changes detected without acknowledgment"
|
||||
echo ""
|
||||
echo "This PR introduces backward-incompatible changes to run.yaml"
|
||||
echo "that will break existing user configurations."
|
||||
echo ""
|
||||
echo "To acknowledge this breaking change, do ONE of:"
|
||||
echo " 1. Add '!:' to your PR title (e.g., 'feat!: change xyz')"
|
||||
echo " 2. Add the 'breaking-change' label to this PR"
|
||||
echo " 3. Include 'BREAKING CHANGE:' in a commit message"
|
||||
echo ""
|
||||
exit 1 # Fail the check
|
||||
fi
|
||||
fi
|
||||
|
||||
test-integration-main:
|
||||
name: Run Integration Tests with main Config
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Extract ci-tests run.yaml from main
|
||||
run: |
|
||||
git fetch origin main
|
||||
git show origin/main:src/llama_stack/distributions/ci-tests/run.yaml > /tmp/main-ci-tests-run.yaml
|
||||
echo "Extracted ci-tests run.yaml from main branch"
|
||||
|
||||
- name: Setup test environment
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
python-version: '3.12'
|
||||
client-version: 'latest'
|
||||
setup: 'ollama'
|
||||
suite: 'base'
|
||||
inference-mode: 'replay'
|
||||
|
||||
- name: Run integration tests with main config
|
||||
id: test_integration
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/run-and-record-tests
|
||||
with:
|
||||
stack-config: /tmp/main-ci-tests-run.yaml
|
||||
setup: 'ollama'
|
||||
inference-mode: 'replay'
|
||||
suite: 'base'
|
||||
|
||||
- name: Check for breaking change acknowledgment
|
||||
id: check_ack
|
||||
if: steps.test_integration.outcome == 'failure'
|
||||
run: |
|
||||
echo "Integration tests failed. Checking for acknowledgment..."
|
||||
|
||||
# Check PR title for '!:' marker (conventional commits)
|
||||
PR_TITLE="${{ github.event.pull_request.title }}"
|
||||
if [[ "$PR_TITLE" =~ ^[a-z]+\!: ]]; then
|
||||
echo "✓ Breaking change acknowledged in PR title"
|
||||
echo "acknowledged=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check commit messages for BREAKING CHANGE:
|
||||
if git log origin/main..HEAD --format=%B | grep -q "BREAKING CHANGE:"; then
|
||||
echo "✓ Breaking change acknowledged in commit message"
|
||||
echo "acknowledged=true" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "✗ Breaking change NOT acknowledged"
|
||||
echo "acknowledged=false" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Evaluate integration test results
|
||||
if: always()
|
||||
run: |
|
||||
TEST_FAILED="${{ steps.test_integration.outcome == 'failure' }}"
|
||||
ACKNOWLEDGED="${{ steps.check_ack.outputs.acknowledged }}"
|
||||
|
||||
if [[ "$TEST_FAILED" == "true" ]]; then
|
||||
if [[ "$ACKNOWLEDGED" == "true" ]]; then
|
||||
echo ""
|
||||
echo "⚠️ WARNING: Integration tests failed with main config but acknowledged"
|
||||
echo ""
|
||||
exit 0 # Pass the check
|
||||
else
|
||||
echo ""
|
||||
echo "❌ ERROR: Integration tests failed with main config without acknowledgment"
|
||||
echo ""
|
||||
echo "To acknowledge this breaking change, do ONE of:"
|
||||
echo " 1. Add '!:' to your PR title (e.g., 'feat!: change xyz')"
|
||||
echo " 2. Include 'BREAKING CHANGE:' in a commit message"
|
||||
echo ""
|
||||
exit 1 # Fail the check
|
||||
fi
|
||||
fi
|
||||
|
||||
test-integration-release:
|
||||
name: Run Integration Tests with Latest Release (Informational)
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get latest release
|
||||
id: get_release
|
||||
run: |
|
||||
# Get the latest release from GitHub
|
||||
LATEST_TAG=$(gh release list --limit 1 --json tagName --jq '.[0].tagName' 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$LATEST_TAG" ]; then
|
||||
echo "No releases found, skipping release compatibility check"
|
||||
echo "has_release=false" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Latest release: $LATEST_TAG"
|
||||
echo "has_release=true" >> $GITHUB_OUTPUT
|
||||
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Extract ci-tests run.yaml from release
|
||||
if: steps.get_release.outputs.has_release == 'true'
|
||||
id: extract_config
|
||||
run: |
|
||||
RELEASE_TAG="${{ steps.get_release.outputs.tag }}"
|
||||
|
||||
# Try with src/ prefix first (newer releases), then without (older releases)
|
||||
if git show "$RELEASE_TAG:src/llama_stack/distributions/ci-tests/run.yaml" > /tmp/release-ci-tests-run.yaml 2>/dev/null; then
|
||||
echo "Extracted ci-tests run.yaml from release $RELEASE_TAG (src/ path)"
|
||||
echo "has_config=true" >> $GITHUB_OUTPUT
|
||||
elif git show "$RELEASE_TAG:llama_stack/distributions/ci-tests/run.yaml" > /tmp/release-ci-tests-run.yaml 2>/dev/null; then
|
||||
echo "Extracted ci-tests run.yaml from release $RELEASE_TAG (old path)"
|
||||
echo "has_config=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "::warning::ci-tests/run.yaml not found in release $RELEASE_TAG"
|
||||
echo "has_config=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Setup test environment
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_config.outputs.has_config == 'true'
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
python-version: '3.12'
|
||||
client-version: 'latest'
|
||||
setup: 'ollama'
|
||||
suite: 'base'
|
||||
inference-mode: 'replay'
|
||||
|
||||
- name: Run integration tests with release config (PR branch)
|
||||
id: test_release_pr
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_config.outputs.has_config == 'true'
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/run-and-record-tests
|
||||
with:
|
||||
stack-config: /tmp/release-ci-tests-run.yaml
|
||||
setup: 'ollama'
|
||||
inference-mode: 'replay'
|
||||
suite: 'base'
|
||||
|
||||
- name: Checkout main branch to test baseline
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_config.outputs.has_config == 'true'
|
||||
run: |
|
||||
git checkout origin/main
|
||||
|
||||
- name: Setup test environment for main
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_config.outputs.has_config == 'true'
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
python-version: '3.12'
|
||||
client-version: 'latest'
|
||||
setup: 'ollama'
|
||||
suite: 'base'
|
||||
inference-mode: 'replay'
|
||||
|
||||
- name: Run integration tests with release config (main branch)
|
||||
id: test_release_main
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_config.outputs.has_config == 'true'
|
||||
continue-on-error: true
|
||||
uses: ./.github/actions/run-and-record-tests
|
||||
with:
|
||||
stack-config: /tmp/release-ci-tests-run.yaml
|
||||
setup: 'ollama'
|
||||
inference-mode: 'replay'
|
||||
suite: 'base'
|
||||
|
||||
- name: Report results and post PR comment
|
||||
if: always() && steps.get_release.outputs.has_release == 'true' && steps.extract_config.outputs.has_config == 'true'
|
||||
run: |
|
||||
RELEASE_TAG="${{ steps.get_release.outputs.tag }}"
|
||||
PR_OUTCOME="${{ steps.test_release_pr.outcome }}"
|
||||
MAIN_OUTCOME="${{ steps.test_release_main.outcome }}"
|
||||
|
||||
if [[ "$PR_OUTCOME" == "failure" && "$MAIN_OUTCOME" == "success" ]]; then
|
||||
# NEW breaking change - PR fails but main passes
|
||||
echo "::error::🚨 This PR introduces a NEW breaking change!"
|
||||
|
||||
# Check if we already posted a comment (to avoid spam on every push)
|
||||
EXISTING_COMMENT=$(gh pr view ${{ github.event.pull_request.number }} --json comments --jq '.comments[] | select(.body | contains("🚨 New Breaking Change Detected") and contains("Integration tests")) | .id' | head -1)
|
||||
|
||||
if [[ -z "$EXISTING_COMMENT" ]]; then
|
||||
gh pr comment ${{ github.event.pull_request.number }} --body "## 🚨 New Breaking Change Detected
|
||||
|
||||
**Integration tests against release \`$RELEASE_TAG\` are now failing**
|
||||
|
||||
⚠️ This PR introduces a breaking change that affects compatibility with the latest release.
|
||||
|
||||
- Users on release \`$RELEASE_TAG\` may not be able to upgrade
|
||||
- Existing configurations may break
|
||||
|
||||
The tests pass on \`main\` but fail with this PR's changes.
|
||||
|
||||
> **Note:** This is informational only and does not block merge.
|
||||
> Consider whether this breaking change is acceptable for users."
|
||||
else
|
||||
echo "Comment already exists, skipping to avoid spam"
|
||||
fi
|
||||
|
||||
cat >> $GITHUB_STEP_SUMMARY <<EOF
|
||||
## 🚨 NEW Breaking Change Detected
|
||||
|
||||
**Integration tests against release \`$RELEASE_TAG\` FAILED**
|
||||
|
||||
⚠️ **This PR introduces a NEW breaking change**
|
||||
|
||||
- Tests **PASS** on main branch ✅
|
||||
- Tests **FAIL** on PR branch ❌
|
||||
- Users on release \`$RELEASE_TAG\` may not be able to upgrade
|
||||
- Existing configurations may break
|
||||
|
||||
> **Note:** This is informational only and does not block merge.
|
||||
> Consider whether this breaking change is acceptable for users.
|
||||
EOF
|
||||
|
||||
elif [[ "$PR_OUTCOME" == "failure" ]]; then
|
||||
# Existing breaking change - both PR and main fail
|
||||
echo "::warning::Breaking change already exists in main branch"
|
||||
|
||||
cat >> $GITHUB_STEP_SUMMARY <<EOF
|
||||
## ⚠️ Release Compatibility Test Failed (Existing Issue)
|
||||
|
||||
**Integration tests against release \`$RELEASE_TAG\` FAILED**
|
||||
|
||||
- Tests **FAIL** on main branch ❌
|
||||
- Tests **FAIL** on PR branch ❌
|
||||
- This breaking change already exists in main (not introduced by this PR)
|
||||
|
||||
> **Note:** This is informational only.
|
||||
EOF
|
||||
|
||||
else
|
||||
# Success - tests pass
|
||||
cat >> $GITHUB_STEP_SUMMARY <<EOF
|
||||
## ✅ Release Compatibility Test Passed
|
||||
|
||||
Integration tests against release \`$RELEASE_TAG\` passed successfully.
|
||||
This PR maintains compatibility with the latest release.
|
||||
EOF
|
||||
fi
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
check-schema-release-compatibility:
|
||||
name: Check Schema Compatibility with Latest Release (Informational)
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
uv sync --group dev
|
||||
|
||||
- name: Get latest release
|
||||
id: get_release
|
||||
run: |
|
||||
# Get the latest release from GitHub
|
||||
LATEST_TAG=$(gh release list --limit 1 --json tagName --jq '.[0].tagName' 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$LATEST_TAG" ]; then
|
||||
echo "No releases found, skipping release compatibility check"
|
||||
echo "has_release=false" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Latest release: $LATEST_TAG"
|
||||
echo "has_release=true" >> $GITHUB_OUTPUT
|
||||
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Extract configs from release
|
||||
if: steps.get_release.outputs.has_release == 'true'
|
||||
id: extract_release_configs
|
||||
run: |
|
||||
RELEASE_TAG="${{ steps.get_release.outputs.tag }}"
|
||||
|
||||
# Get run.yaml files from the release (try both src/ and old path)
|
||||
CONFIG_PATHS=$(git ls-tree -r --name-only "$RELEASE_TAG" | grep "llama_stack/distributions/.*/run.yaml$" || true)
|
||||
|
||||
if [ -z "$CONFIG_PATHS" ]; then
|
||||
echo "::warning::No run.yaml files found in release $RELEASE_TAG"
|
||||
echo "has_configs=false" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Extract all configs to a temp directory
|
||||
mkdir -p /tmp/release_configs
|
||||
echo "Extracting configs from release $RELEASE_TAG:"
|
||||
|
||||
while IFS= read -r config_path; do
|
||||
if [ -z "$config_path" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
filename=$(basename $(dirname "$config_path"))
|
||||
echo " - $filename (from $config_path)"
|
||||
|
||||
git show "$RELEASE_TAG:$config_path" > "/tmp/release_configs/${filename}.yaml" 2>/dev/null || true
|
||||
done <<< "$CONFIG_PATHS"
|
||||
|
||||
echo ""
|
||||
echo "Extracted $(ls /tmp/release_configs/*.yaml 2>/dev/null | wc -l) config files"
|
||||
echo "has_configs=true" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Test against release configs (PR branch)
|
||||
id: test_schema_pr
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_release_configs.outputs.has_configs == 'true'
|
||||
continue-on-error: true
|
||||
run: |
|
||||
RELEASE_TAG="${{ steps.get_release.outputs.tag }}"
|
||||
COMPAT_TEST_CONFIGS_DIR=/tmp/release_configs uv run pytest tests/backward_compat/test_run_config.py -v --tb=short
|
||||
|
||||
- name: Checkout main branch to test baseline
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_release_configs.outputs.has_configs == 'true'
|
||||
run: |
|
||||
git checkout origin/main
|
||||
|
||||
- name: Install dependencies for main
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_release_configs.outputs.has_configs == 'true'
|
||||
run: |
|
||||
uv sync --group dev
|
||||
|
||||
- name: Test against release configs (main branch)
|
||||
id: test_schema_main
|
||||
if: steps.get_release.outputs.has_release == 'true' && steps.extract_release_configs.outputs.has_configs == 'true'
|
||||
continue-on-error: true
|
||||
run: |
|
||||
RELEASE_TAG="${{ steps.get_release.outputs.tag }}"
|
||||
COMPAT_TEST_CONFIGS_DIR=/tmp/release_configs uv run pytest tests/backward_compat/test_run_config.py -v --tb=short
|
||||
|
||||
- name: Report results and post PR comment
|
||||
if: always() && steps.get_release.outputs.has_release == 'true' && steps.extract_release_configs.outputs.has_configs == 'true'
|
||||
run: |
|
||||
RELEASE_TAG="${{ steps.get_release.outputs.tag }}"
|
||||
PR_OUTCOME="${{ steps.test_schema_pr.outcome }}"
|
||||
MAIN_OUTCOME="${{ steps.test_schema_main.outcome }}"
|
||||
|
||||
if [[ "$PR_OUTCOME" == "failure" && "$MAIN_OUTCOME" == "success" ]]; then
|
||||
# NEW breaking change - PR fails but main passes
|
||||
echo "::error::🚨 This PR introduces a NEW schema breaking change!"
|
||||
|
||||
# Check if we already posted a comment (to avoid spam on every push)
|
||||
EXISTING_COMMENT=$(gh pr view ${{ github.event.pull_request.number }} --json comments --jq '.comments[] | select(.body | contains("🚨 New Schema Breaking Change Detected")) | .id' | head -1)
|
||||
|
||||
if [[ -z "$EXISTING_COMMENT" ]]; then
|
||||
gh pr comment ${{ github.event.pull_request.number }} --body "## 🚨 New Schema Breaking Change Detected
|
||||
|
||||
**Schema validation against release \`$RELEASE_TAG\` is now failing**
|
||||
|
||||
⚠️ This PR introduces a schema breaking change that affects compatibility with the latest release.
|
||||
|
||||
- Users on release \`$RELEASE_TAG\` will not be able to upgrade
|
||||
- Existing run.yaml configurations will fail validation
|
||||
|
||||
The tests pass on \`main\` but fail with this PR's changes.
|
||||
|
||||
> **Note:** This is informational only and does not block merge.
|
||||
> Consider whether this breaking change is acceptable for users."
|
||||
else
|
||||
echo "Comment already exists, skipping to avoid spam"
|
||||
fi
|
||||
|
||||
cat >> $GITHUB_STEP_SUMMARY <<EOF
|
||||
## 🚨 NEW Schema Breaking Change Detected
|
||||
|
||||
**Schema validation against release \`$RELEASE_TAG\` FAILED**
|
||||
|
||||
⚠️ **This PR introduces a NEW schema breaking change**
|
||||
|
||||
- Tests **PASS** on main branch ✅
|
||||
- Tests **FAIL** on PR branch ❌
|
||||
- Users on release \`$RELEASE_TAG\` will not be able to upgrade
|
||||
- Existing run.yaml configurations will fail validation
|
||||
|
||||
> **Note:** This is informational only and does not block merge.
|
||||
> Consider whether this breaking change is acceptable for users.
|
||||
EOF
|
||||
|
||||
elif [[ "$PR_OUTCOME" == "failure" ]]; then
|
||||
# Existing breaking change - both PR and main fail
|
||||
echo "::warning::Schema breaking change already exists in main branch"
|
||||
|
||||
cat >> $GITHUB_STEP_SUMMARY <<EOF
|
||||
## ⚠️ Release Schema Compatibility Failed (Existing Issue)
|
||||
|
||||
**Schema validation against release \`$RELEASE_TAG\` FAILED**
|
||||
|
||||
- Tests **FAIL** on main branch ❌
|
||||
- Tests **FAIL** on PR branch ❌
|
||||
- This schema breaking change already exists in main (not introduced by this PR)
|
||||
|
||||
> **Note:** This is informational only.
|
||||
EOF
|
||||
|
||||
else
|
||||
# Success - tests pass
|
||||
cat >> $GITHUB_STEP_SUMMARY <<EOF
|
||||
## ✅ Release Schema Compatibility Passed
|
||||
|
||||
All run.yaml configs from release \`$RELEASE_TAG\` are compatible.
|
||||
This PR maintains backward compatibility with the latest release.
|
||||
EOF
|
||||
fi
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
31
.github/workflows/changelog.yml
vendored
31
.github/workflows/changelog.yml
vendored
|
|
@ -1,31 +0,0 @@
|
|||
name: Update Changelog
|
||||
|
||||
run-name: Creates PR for updating the CHANGELOG.md
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, unpublished, created, edited, deleted, released]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
generate_changelog:
|
||||
name: Generate changelog
|
||||
permissions:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: main
|
||||
fetch-depth: 0
|
||||
- run: |
|
||||
python ./scripts/gen-changelog.py
|
||||
- uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
title: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
|
||||
commit-message: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
|
||||
branch: create-pull-request/changelog
|
||||
signoff: true
|
||||
5
.github/workflows/conformance.yml
vendored
5
.github/workflows/conformance.yml
vendored
|
|
@ -22,7 +22,6 @@ on:
|
|||
- 'docs/static/stable-llama-stack-spec.yaml' # Stable APIs spec
|
||||
- 'docs/static/experimental-llama-stack-spec.yaml' # Experimental APIs spec
|
||||
- 'docs/static/deprecated-llama-stack-spec.yaml' # Deprecated APIs spec
|
||||
- 'docs/static/llama-stack-spec.html' # Legacy HTML spec
|
||||
- '.github/workflows/conformance.yml' # This workflow itself
|
||||
|
||||
concurrency:
|
||||
|
|
@ -36,7 +35,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout PR Code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
|
@ -60,7 +59,7 @@ jobs:
|
|||
# This allows us to diff the current changes against the previous state
|
||||
- name: Checkout Base Branch
|
||||
if: steps.skip-check.outputs.skip != 'true'
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
path: 'base'
|
||||
|
|
|
|||
14
.github/workflows/install-script-ci.yml
vendored
14
.github/workflows/install-script-ci.yml
vendored
|
|
@ -16,24 +16,30 @@ jobs:
|
|||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # 5.0.0
|
||||
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # 6.0.0
|
||||
- name: Run ShellCheck on install.sh
|
||||
run: shellcheck scripts/install.sh
|
||||
smoke-test-on-dev:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Build a single provider
|
||||
run: |
|
||||
BUILD_ARGS="--build-arg INSTALL_MODE=editable --build-arg DISTRO_NAME=starter"
|
||||
if [ -n "${UV_EXTRA_INDEX_URL:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_EXTRA_INDEX_URL=$UV_EXTRA_INDEX_URL"
|
||||
fi
|
||||
if [ -n "${UV_INDEX_STRATEGY:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_INDEX_STRATEGY=$UV_INDEX_STRATEGY"
|
||||
fi
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=starter \
|
||||
$BUILD_ARGS \
|
||||
--tag llama-stack:starter-ci
|
||||
|
||||
- name: Run installer end-to-end
|
||||
|
|
|
|||
17
.github/workflows/integration-auth-tests.yml
vendored
17
.github/workflows/integration-auth-tests.yml
vendored
|
|
@ -4,13 +4,17 @@ run-name: Run the integration test suite with Kubernetes authentication
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
paths:
|
||||
- 'distributions/**'
|
||||
- 'llama_stack/**'
|
||||
- '!llama_stack/ui/**'
|
||||
- 'src/llama_stack/**'
|
||||
- '!src/llama_stack_ui/**'
|
||||
- 'tests/integration/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -31,7 +35,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -91,6 +95,9 @@ jobs:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
prompts:
|
||||
namespace: prompts
|
||||
backend: kv_default
|
||||
server:
|
||||
port: 8321
|
||||
EOF
|
||||
|
|
|
|||
|
|
@ -4,11 +4,15 @@ run-name: Run the integration test suite with SqlStore
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
paths:
|
||||
- 'llama_stack/providers/utils/sqlstore/**'
|
||||
- 'src/llama_stack/providers/utils/sqlstore/**'
|
||||
- 'tests/integration/sqlstore/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -44,7 +48,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -64,7 +68,7 @@ jobs:
|
|||
|
||||
- name: Upload test logs
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: postgres-test-logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.python-version }}
|
||||
path: |
|
||||
|
|
|
|||
79
.github/workflows/integration-tests.yml
vendored
79
.github/workflows/integration-tests.yml
vendored
|
|
@ -4,13 +4,17 @@ run-name: Run the integration test suites from tests/integration in replay mode
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
types: [opened, synchronize, reopened]
|
||||
paths:
|
||||
- 'llama_stack/**'
|
||||
- '!llama_stack/ui/**'
|
||||
- 'src/llama_stack/**'
|
||||
- '!src/llama_stack_ui/**'
|
||||
- 'tests/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -18,10 +22,11 @@ on:
|
|||
- '.github/actions/setup-ollama/action.yml'
|
||||
- '.github/actions/setup-test-environment/action.yml'
|
||||
- '.github/actions/run-and-record-tests/action.yml'
|
||||
- 'scripts/integration-tests.sh'
|
||||
- 'scripts/generate_ci_matrix.py'
|
||||
schedule:
|
||||
# If changing the cron schedule, update the provider in the test-matrix job
|
||||
- cron: '0 0 * * *' # (test latest client) Daily at 12 AM UTC
|
||||
- cron: '1 0 * * 0' # (test vllm) Weekly on Sunday at 1 AM UTC
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test-all-client-versions:
|
||||
|
|
@ -39,36 +44,47 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Generate test matrix
|
||||
id: set-matrix
|
||||
run: |
|
||||
# Generate matrix from CI_MATRIX in tests/integration/suites.py
|
||||
# Supports schedule-based and manual input overrides
|
||||
MATRIX=$(PYTHONPATH=. python3 scripts/generate_ci_matrix.py \
|
||||
--schedule "${{ github.event.schedule }}" \
|
||||
--test-setup "${{ github.event.inputs.test-setup }}")
|
||||
echo "matrix=$MATRIX" >> $GITHUB_OUTPUT
|
||||
echo "Generated matrix: $MATRIX"
|
||||
|
||||
run-replay-mode-tests:
|
||||
needs: generate-matrix
|
||||
runs-on: ubuntu-latest
|
||||
name: ${{ format('Integration Tests ({0}, {1}, {2}, client={3}, {4})', matrix.client-type, matrix.config.setup, matrix.python-version, matrix.client-version, matrix.config.suite) }}
|
||||
name: ${{ format('Integration Tests ({0}, {1}, {2}, client={3}, {4})', matrix.client, matrix.config.setup, matrix.python-version, matrix.client-version, matrix.config.suite) }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
client-type: [library, docker]
|
||||
client: [library, docker, server]
|
||||
# Use Python 3.13 only on nightly schedule (daily latest client test), otherwise use 3.12
|
||||
python-version: ${{ github.event.schedule == '0 0 * * *' && fromJSON('["3.12", "3.13"]') || fromJSON('["3.12"]') }}
|
||||
client-version: ${{ (github.event.schedule == '0 0 * * *' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
|
||||
# Define (setup, suite) pairs - they are always matched and cannot be independent
|
||||
# Weekly schedule (Sun 1 AM): vllm+base
|
||||
# Input test-setup=ollama-vision: ollama-vision+vision
|
||||
# Default (including test-setup=ollama): ollama+base, ollama-vision+vision, gpt+responses
|
||||
config: >-
|
||||
${{
|
||||
github.event.schedule == '1 0 * * 0'
|
||||
&& fromJSON('[{"setup": "vllm", "suite": "base"}]')
|
||||
|| github.event.inputs.test-setup == 'ollama-vision'
|
||||
&& fromJSON('[{"setup": "ollama-vision", "suite": "vision"}]')
|
||||
|| fromJSON('[{"setup": "ollama", "suite": "base"}, {"setup": "ollama-vision", "suite": "vision"}, {"setup": "gpt", "suite": "responses"}]')
|
||||
}}
|
||||
# Test configurations: Generated from CI_MATRIX in tests/integration/suites.py
|
||||
# See scripts/generate_ci_matrix.py for generation logic
|
||||
config: ${{ fromJSON(needs.generate-matrix.outputs.matrix).include }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Setup test environment
|
||||
if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }}
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
|
@ -77,12 +93,33 @@ jobs:
|
|||
suite: ${{ matrix.config.suite }}
|
||||
inference-mode: 'replay'
|
||||
|
||||
- name: Setup Node.js for TypeScript client tests
|
||||
if: ${{ matrix.client == 'server' }}
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: tests/integration/client-typescript/package-lock.json
|
||||
|
||||
- name: Setup TypeScript client
|
||||
if: ${{ matrix.client == 'server' }}
|
||||
id: setup-ts-client
|
||||
uses: ./.github/actions/setup-typescript-client
|
||||
with:
|
||||
client-version: ${{ matrix.client-version }}
|
||||
|
||||
- name: Run tests
|
||||
if: ${{ matrix.config.allowed_clients == null || contains(matrix.config.allowed_clients, matrix.client) }}
|
||||
uses: ./.github/actions/run-and-record-tests
|
||||
env:
|
||||
OPENAI_API_KEY: dummy
|
||||
TS_CLIENT_PATH: ${{ steps.setup-ts-client.outputs.ts-client-path || '' }}
|
||||
with:
|
||||
stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || matrix.client-type == 'server' && 'server:ci-tests' || 'docker:ci-tests' }}
|
||||
stack-config: >-
|
||||
${{ matrix.config.stack_config
|
||||
|| (matrix.client == 'library' && 'ci-tests')
|
||||
|| (matrix.client == 'server' && 'server:ci-tests')
|
||||
|| 'docker:ci-tests' }}
|
||||
setup: ${{ matrix.config.setup }}
|
||||
inference-mode: 'replay'
|
||||
suite: ${{ matrix.config.suite }}
|
||||
|
|
|
|||
|
|
@ -4,12 +4,16 @@ run-name: Run the integration test suite with various VectorIO providers
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
paths:
|
||||
- 'llama_stack/**'
|
||||
- '!llama_stack/ui/**'
|
||||
- 'src/llama_stack/**'
|
||||
- '!src/llama_stack_ui/**'
|
||||
- 'tests/integration/vector_io/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -33,7 +37,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -194,7 +198,7 @@ jobs:
|
|||
|
||||
- name: Upload all logs to artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: vector-io-logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ env.SANITIZED_PROVIDER }}-${{ matrix.python-version }}
|
||||
path: |
|
||||
|
|
|
|||
85
.github/workflows/pre-commit.yml
vendored
85
.github/workflows/pre-commit.yml
vendored
|
|
@ -5,7 +5,9 @@ run-name: Run pre-commit checks
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
||||
|
|
@ -20,7 +22,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
# For dependabot PRs, we need to checkout with a token that can push changes
|
||||
token: ${{ github.actor == 'dependabot[bot]' && secrets.GITHUB_TOKEN || github.token }}
|
||||
|
|
@ -41,25 +43,43 @@ jobs:
|
|||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'llama_stack/ui/'
|
||||
cache-dependency-path: 'src/llama_stack_ui/'
|
||||
|
||||
- name: Set up uv
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
|
||||
- name: Install npm dependencies
|
||||
run: npm ci
|
||||
working-directory: llama_stack/ui
|
||||
working-directory: src/llama_stack_ui
|
||||
|
||||
- name: Install pre-commit
|
||||
run: python -m pip install 'pre-commit>=4.4.0'
|
||||
|
||||
- name: Cache pre-commit
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit
|
||||
key: pre-commit-3|${{ env.pythonLocation }}|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
|
||||
- name: Run pre-commit
|
||||
id: precommit
|
||||
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
|
||||
continue-on-error: true
|
||||
run: |
|
||||
set +e
|
||||
pre-commit run --show-diff-on-failure --color=always --all-files 2>&1 | tee /tmp/precommit.log
|
||||
status=${PIPESTATUS[0]}
|
||||
echo "status=$status" >> $GITHUB_OUTPUT
|
||||
exit 0
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
SKIP: no-commit-to-branch,mypy
|
||||
RUFF_OUTPUT_FORMAT: github
|
||||
|
||||
- name: Check pre-commit results
|
||||
if: steps.precommit.outcome == 'failure'
|
||||
if: steps.precommit.outputs.status != '0'
|
||||
run: |
|
||||
echo "::error::Pre-commit hooks failed. Please run 'pre-commit run --all-files' locally and commit the fixes."
|
||||
echo "::warning::Some pre-commit hooks failed. Check the output above for details."
|
||||
echo ""
|
||||
echo "Failed hooks output:"
|
||||
cat /tmp/precommit.log
|
||||
exit 1
|
||||
|
||||
- name: Debug
|
||||
|
|
@ -109,3 +129,50 @@ jobs:
|
|||
echo "$unstaged_files"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Configure client installation
|
||||
id: client-config
|
||||
uses: ./.github/actions/install-llama-stack-client
|
||||
|
||||
- name: Sync dev + type_checking dependencies
|
||||
env:
|
||||
UV_EXTRA_INDEX_URL: ${{ steps.client-config.outputs.uv-extra-index-url }}
|
||||
run: |
|
||||
if [ -n "$UV_EXTRA_INDEX_URL" ]; then
|
||||
export UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
fi
|
||||
|
||||
uv sync --group dev --group type_checking
|
||||
|
||||
# Install specific client version after sync if needed
|
||||
if [ "${{ steps.client-config.outputs.install-after-sync }}" = "true" ]; then
|
||||
echo "Installing llama-stack-client from: ${{ steps.client-config.outputs.install-source }}"
|
||||
uv pip install ${{ steps.client-config.outputs.install-source }}
|
||||
fi
|
||||
|
||||
- name: Run mypy (full type_checking)
|
||||
env:
|
||||
UV_EXTRA_INDEX_URL: ${{ steps.client-config.outputs.uv-extra-index-url }}
|
||||
run: |
|
||||
if [ -n "$UV_EXTRA_INDEX_URL" ]; then
|
||||
export UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
fi
|
||||
|
||||
set +e
|
||||
uv run --group dev --group type_checking mypy
|
||||
status=$?
|
||||
if [ $status -ne 0 ]; then
|
||||
echo "::error::Full mypy failed. Reproduce locally with 'uv run pre-commit run mypy-full --hook-stage manual --all-files'."
|
||||
fi
|
||||
exit $status
|
||||
|
||||
- name: Check if any unused recordings
|
||||
run: |
|
||||
set -e
|
||||
PYTHONPATH=$PWD uv run ./scripts/cleanup_recordings.py --delete
|
||||
changes=$(git status --short tests/integration | grep 'recordings' || true)
|
||||
if [ -n "$changes" ]; then
|
||||
echo "::error::Unused integration recordings detected. Run 'PYTHONPATH=$(pwd) uv run ./scripts/cleanup_recordings.py --delete' locally and commit the deletions."
|
||||
echo "$changes"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
|||
227
.github/workflows/precommit-trigger.yml
vendored
227
.github/workflows/precommit-trigger.yml
vendored
|
|
@ -1,227 +0,0 @@
|
|||
name: Pre-commit Bot
|
||||
|
||||
run-name: Pre-commit bot for PR #${{ github.event.issue.number }}
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
# Only run on pull request comments
|
||||
if: github.event.issue.pull_request && contains(github.event.comment.body, '@github-actions run precommit')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Check comment author and get PR details
|
||||
id: check_author
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
// Get PR details
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
|
||||
// Check if commenter has write access or is the PR author
|
||||
const commenter = context.payload.comment.user.login;
|
||||
const prAuthor = pr.data.user.login;
|
||||
|
||||
let hasPermission = false;
|
||||
|
||||
// Check if commenter is PR author
|
||||
if (commenter === prAuthor) {
|
||||
hasPermission = true;
|
||||
console.log(`Comment author ${commenter} is the PR author`);
|
||||
} else {
|
||||
// Check if commenter has write/admin access
|
||||
try {
|
||||
const permission = await github.rest.repos.getCollaboratorPermissionLevel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
username: commenter
|
||||
});
|
||||
|
||||
const level = permission.data.permission;
|
||||
hasPermission = ['write', 'admin', 'maintain'].includes(level);
|
||||
console.log(`Comment author ${commenter} has permission: ${level}`);
|
||||
} catch (error) {
|
||||
console.log(`Could not check permissions for ${commenter}: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasPermission) {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.issue.number,
|
||||
body: `❌ @${commenter} You don't have permission to trigger pre-commit. Only PR authors or repository collaborators can run this command.`
|
||||
});
|
||||
core.setFailed(`User ${commenter} does not have permission`);
|
||||
return;
|
||||
}
|
||||
|
||||
// Save PR info for later steps
|
||||
core.setOutput('pr_number', context.issue.number);
|
||||
core.setOutput('pr_head_ref', pr.data.head.ref);
|
||||
core.setOutput('pr_head_sha', pr.data.head.sha);
|
||||
core.setOutput('pr_head_repo', pr.data.head.repo.full_name);
|
||||
core.setOutput('pr_base_ref', pr.data.base.ref);
|
||||
core.setOutput('is_fork', pr.data.head.repo.full_name !== context.payload.repository.full_name);
|
||||
core.setOutput('authorized', 'true');
|
||||
|
||||
- name: React to comment
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.reactions.createForIssueComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: context.payload.comment.id,
|
||||
content: 'rocket'
|
||||
});
|
||||
|
||||
- name: Comment starting
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: ${{ steps.check_author.outputs.pr_number }},
|
||||
body: `⏳ Running [pre-commit hooks](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}) on PR #${{ steps.check_author.outputs.pr_number }}...`
|
||||
});
|
||||
|
||||
- name: Checkout PR branch (same-repo)
|
||||
if: steps.check_author.outputs.authorized == 'true' && steps.check_author.outputs.is_fork == 'false'
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
ref: ${{ steps.check_author.outputs.pr_head_ref }}
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout PR branch (fork)
|
||||
if: steps.check_author.outputs.authorized == 'true' && steps.check_author.outputs.is_fork == 'true'
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
repository: ${{ steps.check_author.outputs.pr_head_repo }}
|
||||
ref: ${{ steps.check_author.outputs.pr_head_ref }}
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Verify checkout
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
run: |
|
||||
echo "Current SHA: $(git rev-parse HEAD)"
|
||||
echo "Expected SHA: ${{ steps.check_author.outputs.pr_head_sha }}"
|
||||
if [[ "$(git rev-parse HEAD)" != "${{ steps.check_author.outputs.pr_head_sha }}" ]]; then
|
||||
echo "::error::Checked out SHA does not match expected SHA"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Set up Python
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: pip
|
||||
cache-dependency-path: |
|
||||
**/requirements*.txt
|
||||
.pre-commit-config.yaml
|
||||
|
||||
- name: Set up Node.js
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'llama_stack/ui/'
|
||||
|
||||
- name: Install npm dependencies
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
run: npm ci
|
||||
working-directory: llama_stack/ui
|
||||
|
||||
- name: Run pre-commit
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
id: precommit
|
||||
uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
|
||||
continue-on-error: true
|
||||
env:
|
||||
SKIP: no-commit-to-branch
|
||||
RUFF_OUTPUT_FORMAT: github
|
||||
|
||||
- name: Check for changes
|
||||
if: steps.check_author.outputs.authorized == 'true'
|
||||
id: changes
|
||||
run: |
|
||||
if ! git diff --exit-code || [ -n "$(git ls-files --others --exclude-standard)" ]; then
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
echo "Changes detected after pre-commit"
|
||||
else
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
echo "No changes after pre-commit"
|
||||
fi
|
||||
|
||||
- name: Commit and push changes
|
||||
if: steps.check_author.outputs.authorized == 'true' && steps.changes.outputs.has_changes == 'true'
|
||||
run: |
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
|
||||
git add -A
|
||||
git commit -m "style: apply pre-commit fixes
|
||||
|
||||
🤖 Applied by @github-actions bot via pre-commit workflow"
|
||||
|
||||
# Push changes
|
||||
git push origin HEAD:${{ steps.check_author.outputs.pr_head_ref }}
|
||||
|
||||
- name: Comment success with changes
|
||||
if: steps.check_author.outputs.authorized == 'true' && steps.changes.outputs.has_changes == 'true'
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: ${{ steps.check_author.outputs.pr_number }},
|
||||
body: `✅ Pre-commit hooks completed successfully!\n\n🔧 Changes have been committed and pushed to the PR branch.`
|
||||
});
|
||||
|
||||
- name: Comment success without changes
|
||||
if: steps.check_author.outputs.authorized == 'true' && steps.changes.outputs.has_changes == 'false' && steps.precommit.outcome == 'success'
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: ${{ steps.check_author.outputs.pr_number }},
|
||||
body: `✅ Pre-commit hooks passed!\n\n✨ No changes needed - your code is already formatted correctly.`
|
||||
});
|
||||
|
||||
- name: Comment failure
|
||||
if: failure()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: ${{ steps.check_author.outputs.pr_number }},
|
||||
body: `❌ Pre-commit workflow failed!\n\nPlease check the [workflow logs](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}) for details.`
|
||||
});
|
||||
76
.github/workflows/providers-build.yml
vendored
76
.github/workflows/providers-build.yml
vendored
|
|
@ -7,24 +7,24 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'llama_stack/cli/stack/build.py'
|
||||
- 'llama_stack/cli/stack/_build.py'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- 'src/llama_stack/cli/stack/build.py'
|
||||
- 'src/llama_stack/cli/stack/_build.py'
|
||||
- 'src/llama_stack/core/build.*'
|
||||
- 'src/llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-build.yml'
|
||||
- 'llama_stack/distributions/**'
|
||||
- 'src/llama_stack/distributions/**'
|
||||
- 'pyproject.toml'
|
||||
- 'containers/Containerfile'
|
||||
- '.dockerignore'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- 'llama_stack/cli/stack/build.py'
|
||||
- 'llama_stack/cli/stack/_build.py'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- 'src/llama_stack/cli/stack/build.py'
|
||||
- 'src/llama_stack/cli/stack/_build.py'
|
||||
- 'src/llama_stack/core/build.*'
|
||||
- 'src/llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-build.yml'
|
||||
- 'llama_stack/distributions/**'
|
||||
- 'src/llama_stack/distributions/**'
|
||||
- 'pyproject.toml'
|
||||
- 'containers/Containerfile'
|
||||
- '.dockerignore'
|
||||
|
|
@ -40,12 +40,12 @@ jobs:
|
|||
distros: ${{ steps.set-matrix.outputs.distros }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Generate Distribution List
|
||||
id: set-matrix
|
||||
run: |
|
||||
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
||||
distros=$(ls src/llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
||||
echo "distros=$distros" >> "$GITHUB_OUTPUT"
|
||||
|
||||
build:
|
||||
|
|
@ -59,7 +59,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -72,10 +72,16 @@ jobs:
|
|||
- name: Build container image
|
||||
if: matrix.image-type == 'container'
|
||||
run: |
|
||||
BUILD_ARGS="--build-arg INSTALL_MODE=editable --build-arg DISTRO_NAME=${{ matrix.distro }}"
|
||||
if [ -n "${UV_EXTRA_INDEX_URL:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_EXTRA_INDEX_URL=$UV_EXTRA_INDEX_URL"
|
||||
fi
|
||||
if [ -n "${UV_INDEX_STRATEGY:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_INDEX_STRATEGY=$UV_INDEX_STRATEGY"
|
||||
fi
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=${{ matrix.distro }} \
|
||||
$BUILD_ARGS \
|
||||
--tag llama-stack:${{ matrix.distro }}-ci
|
||||
|
||||
- name: Print dependencies in the image
|
||||
|
|
@ -87,7 +93,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -100,20 +106,26 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Build container image
|
||||
run: |
|
||||
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' llama_stack/distributions/ci-tests/build.yaml)
|
||||
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' src/llama_stack/distributions/ci-tests/build.yaml)
|
||||
BUILD_ARGS="--build-arg INSTALL_MODE=editable --build-arg DISTRO_NAME=ci-tests"
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg BASE_IMAGE=$BASE_IMAGE"
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg RUN_CONFIG_PATH=/workspace/src/llama_stack/distributions/ci-tests/run.yaml"
|
||||
if [ -n "${UV_EXTRA_INDEX_URL:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_EXTRA_INDEX_URL=$UV_EXTRA_INDEX_URL"
|
||||
fi
|
||||
if [ -n "${UV_INDEX_STRATEGY:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_INDEX_STRATEGY=$UV_INDEX_STRATEGY"
|
||||
fi
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=ci-tests \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
|
||||
$BUILD_ARGS \
|
||||
-t llama-stack:ci-tests
|
||||
|
||||
- name: Inspect the container image entrypoint
|
||||
|
|
@ -134,7 +146,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -143,17 +155,23 @@ jobs:
|
|||
run: |
|
||||
yq -i '
|
||||
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
|
||||
' llama_stack/distributions/ci-tests/build.yaml
|
||||
' src/llama_stack/distributions/ci-tests/build.yaml
|
||||
|
||||
- name: Build UBI9 container image
|
||||
run: |
|
||||
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' llama_stack/distributions/ci-tests/build.yaml)
|
||||
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' src/llama_stack/distributions/ci-tests/build.yaml)
|
||||
BUILD_ARGS="--build-arg INSTALL_MODE=editable --build-arg DISTRO_NAME=ci-tests"
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg BASE_IMAGE=$BASE_IMAGE"
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg RUN_CONFIG_PATH=/workspace/src/llama_stack/distributions/ci-tests/run.yaml"
|
||||
if [ -n "${UV_EXTRA_INDEX_URL:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_EXTRA_INDEX_URL=$UV_EXTRA_INDEX_URL"
|
||||
fi
|
||||
if [ -n "${UV_INDEX_STRATEGY:-}" ]; then
|
||||
BUILD_ARGS="$BUILD_ARGS --build-arg UV_INDEX_STRATEGY=$UV_INDEX_STRATEGY"
|
||||
fi
|
||||
docker build . \
|
||||
-f containers/Containerfile \
|
||||
--build-arg INSTALL_MODE=editable \
|
||||
--build-arg DISTRO_NAME=ci-tests \
|
||||
--build-arg BASE_IMAGE="$BASE_IMAGE" \
|
||||
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
|
||||
$BUILD_ARGS \
|
||||
-t llama-stack:ci-tests-ubi9
|
||||
|
||||
- name: Inspect UBI9 image
|
||||
|
|
|
|||
32
.github/workflows/providers-list-deps.yml
vendored
32
.github/workflows/providers-list-deps.yml
vendored
|
|
@ -7,22 +7,22 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'llama_stack/cli/stack/list_deps.py'
|
||||
- 'llama_stack/cli/stack/_list_deps.py'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- 'src/llama_stack/cli/stack/list_deps.py'
|
||||
- 'src/llama_stack/cli/stack/_list_deps.py'
|
||||
- 'src/llama_stack/core/build.*'
|
||||
- 'src/llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-list-deps.yml'
|
||||
- 'llama_stack/templates/**'
|
||||
- 'src/llama_stack/templates/**'
|
||||
- 'pyproject.toml'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- 'llama_stack/cli/stack/list_deps.py'
|
||||
- 'llama_stack/cli/stack/_list_deps.py'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- 'src/llama_stack/cli/stack/list_deps.py'
|
||||
- 'src/llama_stack/cli/stack/_list_deps.py'
|
||||
- 'src/llama_stack/core/build.*'
|
||||
- 'src/llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-list-deps.yml'
|
||||
- 'llama_stack/templates/**'
|
||||
- 'src/llama_stack/templates/**'
|
||||
- 'pyproject.toml'
|
||||
|
||||
concurrency:
|
||||
|
|
@ -36,12 +36,12 @@ jobs:
|
|||
distros: ${{ steps.set-matrix.outputs.distros }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Generate Distribution List
|
||||
id: set-matrix
|
||||
run: |
|
||||
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
||||
distros=$(ls src/llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
||||
echo "distros=$distros" >> "$GITHUB_OUTPUT"
|
||||
|
||||
list-deps:
|
||||
|
|
@ -55,7 +55,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -79,7 +79,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -92,7 +92,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -102,4 +102,4 @@ jobs:
|
|||
USE_COPY_NOT_MOUNT: "true"
|
||||
LLAMA_STACK_DIR: "."
|
||||
run: |
|
||||
uv run llama stack list-deps llama_stack/distributions/ci-tests/build.yaml
|
||||
uv run llama stack list-deps src/llama_stack/distributions/ci-tests/build.yaml
|
||||
|
|
|
|||
20
.github/workflows/python-build-test.yml
vendored
20
.github/workflows/python-build-test.yml
vendored
|
|
@ -10,7 +10,7 @@ on:
|
|||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- 'llama_stack/ui/**'
|
||||
- 'src/llama_stack_ui/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
|
@ -21,22 +21,25 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@3259c6206f993105e3a61b142c2d97bf4b9ef83d # v7.1.0
|
||||
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
activate-environment: true
|
||||
version: 0.7.6
|
||||
|
||||
- name: Build Llama Stack package
|
||||
run: |
|
||||
uv build
|
||||
- name: Build Llama Stack API package
|
||||
working-directory: src/llama_stack_api
|
||||
run: uv build
|
||||
|
||||
- name: Install Llama Stack package
|
||||
- name: Build Llama Stack package
|
||||
run: uv build
|
||||
|
||||
- name: Install Llama Stack package (with api stubs from local build)
|
||||
run: |
|
||||
uv pip install dist/*.whl
|
||||
uv pip install --find-links src/llama_stack_api/dist dist/*.whl
|
||||
|
||||
- name: Verify Llama Stack package
|
||||
run: |
|
||||
|
|
@ -45,3 +48,4 @@ jobs:
|
|||
command -v llama
|
||||
llama stack list-apis
|
||||
llama stack list-providers inference
|
||||
llama stack list-deps starter
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ jobs:
|
|||
echo "::endgroup::"
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
|
|
|||
146
.github/workflows/stainless-builds.yml
vendored
Normal file
146
.github/workflows/stainless-builds.yml
vendored
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
name: Stainless SDK Builds
|
||||
run-name: Build Stainless SDK from OpenAPI spec changes
|
||||
|
||||
# This workflow uses pull_request_target, which allows it to run on pull requests
|
||||
# from forks with access to secrets. This is safe because the workflow definition
|
||||
# comes from the base branch (trusted), and the action only reads OpenAPI spec
|
||||
# files without executing any code from the PR.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- closed
|
||||
paths:
|
||||
- "client-sdks/stainless/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
# Stainless organization name.
|
||||
STAINLESS_ORG: llamastack
|
||||
|
||||
# Stainless project name.
|
||||
STAINLESS_PROJECT: llama-stack-client
|
||||
|
||||
# Path to your OpenAPI spec.
|
||||
OAS_PATH: ./client-sdks/stainless/openapi.yml
|
||||
|
||||
# Path to your Stainless config. Optional; only provide this if you prefer
|
||||
# to maintain the ground truth Stainless config in your own repo.
|
||||
CONFIG_PATH: ./client-sdks/stainless/config.yml
|
||||
|
||||
# When to fail the job based on build conclusion.
|
||||
# Options: "never" | "note" | "warning" | "error" | "fatal".
|
||||
FAIL_ON: error
|
||||
|
||||
# In your repo secrets, configure:
|
||||
# - STAINLESS_API_KEY: a Stainless API key, which you can generate on the
|
||||
# Stainless organization dashboard
|
||||
|
||||
jobs:
|
||||
compute-branch:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
preview_branch: ${{ steps.compute.outputs.preview_branch }}
|
||||
base_branch: ${{ steps.compute.outputs.base_branch }}
|
||||
merge_branch: ${{ steps.compute.outputs.merge_branch }}
|
||||
steps:
|
||||
- name: Compute branch names
|
||||
id: compute
|
||||
run: |
|
||||
HEAD_REPO="${{ github.event.pull_request.head.repo.full_name }}"
|
||||
BASE_REPO="${{ github.repository }}"
|
||||
BRANCH_NAME="${{ github.event.pull_request.head.ref }}"
|
||||
FORK_OWNER="${{ github.event.pull_request.head.repo.owner.login }}"
|
||||
|
||||
if [ "$HEAD_REPO" != "$BASE_REPO" ]; then
|
||||
# Fork PR: prefix with fork owner for isolation
|
||||
if [ -z "$FORK_OWNER" ]; then
|
||||
echo "Error: Fork PR detected but fork owner is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
PREVIEW_BRANCH="preview/${FORK_OWNER}/${BRANCH_NAME}"
|
||||
BASE_BRANCH="preview/base/${FORK_OWNER}/${BRANCH_NAME}"
|
||||
else
|
||||
# Same-repo PR
|
||||
PREVIEW_BRANCH="preview/${BRANCH_NAME}"
|
||||
BASE_BRANCH="preview/base/${BRANCH_NAME}"
|
||||
fi
|
||||
|
||||
echo "preview_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "base_branch=${BASE_BRANCH}" >> $GITHUB_OUTPUT
|
||||
echo "merge_branch=${PREVIEW_BRANCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
preview:
|
||||
needs: compute-branch
|
||||
if: github.event.action != 'closed'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
# Checkout the PR's code to access the OpenAPI spec and config files.
|
||||
# This is necessary to read the spec/config from the PR (including from forks).
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Run preview builds
|
||||
uses: stainless-api/upload-openapi-spec-action/preview@9133735bca5ce0a1df7d3b26e75364e26137a016 # 1.7.0
|
||||
with:
|
||||
stainless_api_key: ${{ secrets.STAINLESS_API_KEY }}
|
||||
org: ${{ env.STAINLESS_ORG }}
|
||||
project: ${{ env.STAINLESS_PROJECT }}
|
||||
oas_path: ${{ env.OAS_PATH }}
|
||||
config_path: ${{ env.CONFIG_PATH }}
|
||||
fail_on: ${{ env.FAIL_ON }}
|
||||
base_sha: ${{ github.event.pull_request.base.sha }}
|
||||
base_ref: ${{ github.event.pull_request.base.ref }}
|
||||
head_sha: ${{ github.event.pull_request.head.sha }}
|
||||
branch: ${{ needs.compute-branch.outputs.preview_branch }}
|
||||
base_branch: ${{ needs.compute-branch.outputs.base_branch }}
|
||||
|
||||
merge:
|
||||
needs: compute-branch
|
||||
if: github.event.action == 'closed' && github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
# Checkout the PR's code to access the OpenAPI spec and config files.
|
||||
# This is necessary to read the spec/config from the PR (including from forks).
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
with:
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 2
|
||||
|
||||
# Note that this only merges in changes that happened on the last build on
|
||||
# the computed preview branch. It's possible that there are OAS/config
|
||||
# changes that haven't been built, if the preview job didn't finish
|
||||
# before this step starts. In theory we want to wait for all builds
|
||||
# against the preview branch to complete, but assuming that
|
||||
# the preview job happens before the PR merge, it should be fine.
|
||||
- name: Run merge build
|
||||
uses: stainless-api/upload-openapi-spec-action/merge@9133735bca5ce0a1df7d3b26e75364e26137a016 # 1.7.0
|
||||
with:
|
||||
stainless_api_key: ${{ secrets.STAINLESS_API_KEY }}
|
||||
org: ${{ env.STAINLESS_ORG }}
|
||||
project: ${{ env.STAINLESS_PROJECT }}
|
||||
oas_path: ${{ env.OAS_PATH }}
|
||||
config_path: ${{ env.CONFIG_PATH }}
|
||||
fail_on: ${{ env.FAIL_ON }}
|
||||
base_sha: ${{ github.event.pull_request.base.sha }}
|
||||
base_ref: ${{ github.event.pull_request.base.ref }}
|
||||
head_sha: ${{ github.event.pull_request.head.sha }}
|
||||
merge_branch: ${{ needs.compute-branch.outputs.merge_branch }}
|
||||
|
|
@ -8,7 +8,7 @@ on:
|
|||
pull_request:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'llama_stack/**'
|
||||
- 'src/llama_stack/**'
|
||||
- 'tests/integration/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -27,7 +27,7 @@ jobs:
|
|||
# container and point 'uv pip install' to the correct path...
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -78,7 +78,7 @@ jobs:
|
|||
|
||||
- name: Upload all logs to artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: logs-${{ github.run_id }}-${{ github.run_attempt }}-external-provider-module-test
|
||||
path: |
|
||||
|
|
|
|||
8
.github/workflows/test-external.yml
vendored
8
.github/workflows/test-external.yml
vendored
|
|
@ -8,8 +8,8 @@ on:
|
|||
pull_request:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'llama_stack/**'
|
||||
- '!llama_stack/ui/**'
|
||||
- 'src/llama_stack/**'
|
||||
- '!src/llama_stack_ui/**'
|
||||
- 'tests/integration/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -27,7 +27,7 @@ jobs:
|
|||
# container and point 'uv pip install' to the correct path...
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -84,7 +84,7 @@ jobs:
|
|||
|
||||
- name: Upload all logs to artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: logs-${{ github.run_id }}-${{ github.run_attempt }}-external-test
|
||||
path: |
|
||||
|
|
|
|||
14
.github/workflows/ui-unit-tests.yml
vendored
14
.github/workflows/ui-unit-tests.yml
vendored
|
|
@ -8,7 +8,7 @@ on:
|
|||
pull_request:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'llama_stack/ui/**'
|
||||
- 'src/llama_stack_ui/**'
|
||||
- '.github/workflows/ui-unit-tests.yml' # This workflow
|
||||
workflow_dispatch:
|
||||
|
||||
|
|
@ -26,29 +26,29 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
cache-dependency-path: 'llama_stack/ui/package-lock.json'
|
||||
cache-dependency-path: 'src/llama_stack_ui/package-lock.json'
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: llama_stack/ui
|
||||
working-directory: src/llama_stack_ui
|
||||
run: npm ci
|
||||
|
||||
- name: Run linting
|
||||
working-directory: llama_stack/ui
|
||||
working-directory: src/llama_stack_ui
|
||||
run: npm run lint
|
||||
|
||||
- name: Run format check
|
||||
working-directory: llama_stack/ui
|
||||
working-directory: src/llama_stack_ui
|
||||
run: npm run format:check
|
||||
|
||||
- name: Run unit tests
|
||||
working-directory: llama_stack/ui
|
||||
working-directory: src/llama_stack_ui
|
||||
env:
|
||||
CI: true
|
||||
|
||||
|
|
|
|||
16
.github/workflows/unit-tests.yml
vendored
16
.github/workflows/unit-tests.yml
vendored
|
|
@ -4,12 +4,16 @@ run-name: Run the unit test suite
|
|||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
branches:
|
||||
- main
|
||||
- 'release-[0-9]+.[0-9]+.x'
|
||||
paths:
|
||||
- 'llama_stack/**'
|
||||
- '!llama_stack/ui/**'
|
||||
- 'src/llama_stack/**'
|
||||
- '!src/llama_stack_ui/**'
|
||||
- 'tests/unit/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -32,7 +36,7 @@ jobs:
|
|||
- "3.13"
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
|
@ -45,7 +49,7 @@ jobs:
|
|||
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: test-results-${{ matrix.python }}
|
||||
path: |
|
||||
|
|
|
|||
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -32,3 +32,8 @@ CLAUDE.md
|
|||
docs/.docusaurus/
|
||||
docs/node_modules/
|
||||
docs/static/imported-files/
|
||||
docs/docs/api-deprecated/
|
||||
docs/docs/api-experimental/
|
||||
docs/docs/api/
|
||||
tests/integration/client-typescript/node_modules/
|
||||
.ts-client-checkout/
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
exclude: 'build/'
|
||||
|
||||
minimum_pre_commit_version: 4.4.0
|
||||
default_language_version:
|
||||
python: python3.12
|
||||
node: "22"
|
||||
|
|
@ -42,7 +42,6 @@ repos:
|
|||
hooks:
|
||||
- id: ruff
|
||||
args: [ --fix ]
|
||||
exclude: ^llama_stack/strong_typing/.*$
|
||||
- id: ruff-format
|
||||
|
||||
- repo: https://github.com/adamchainz/blacken-docs
|
||||
|
|
@ -52,13 +51,9 @@ repos:
|
|||
additional_dependencies:
|
||||
- black==24.3.0
|
||||
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
rev: 0.7.20
|
||||
hooks:
|
||||
- id: uv-lock
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.16.1
|
||||
rev: v1.18.2
|
||||
hooks:
|
||||
- id: mypy
|
||||
additional_dependencies:
|
||||
|
|
@ -78,33 +73,48 @@ repos:
|
|||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: uv-lock
|
||||
name: uv-lock
|
||||
additional_dependencies:
|
||||
- uv==0.7.20
|
||||
entry: ./scripts/uv-run-with-index.sh lock
|
||||
language: python
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
files: ^(pyproject\.toml|uv\.lock)$
|
||||
- id: mypy-full
|
||||
name: mypy (full type_checking)
|
||||
entry: ./scripts/uv-run-with-index.sh run --group dev --group type_checking mypy
|
||||
language: system
|
||||
pass_filenames: false
|
||||
stages: [manual]
|
||||
- id: distro-codegen
|
||||
name: Distribution Template Codegen
|
||||
additional_dependencies:
|
||||
- uv==0.7.8
|
||||
entry: uv run --group codegen ./scripts/distro_codegen.py
|
||||
entry: ./scripts/uv-run-with-index.sh run --group codegen ./scripts/distro_codegen.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
files: ^llama_stack/distributions/.*$|^llama_stack/providers/.*/inference/.*/models\.py$
|
||||
files: ^src/llama_stack/distributions/.*$|^src/llama_stack/providers/.*/inference/.*/models\.py$
|
||||
- id: provider-codegen
|
||||
name: Provider Codegen
|
||||
additional_dependencies:
|
||||
- uv==0.7.8
|
||||
entry: uv run --group codegen ./scripts/provider_codegen.py
|
||||
entry: ./scripts/uv-run-with-index.sh run --group codegen ./scripts/provider_codegen.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
files: ^llama_stack/providers/.*$
|
||||
files: ^src/llama_stack/providers/.*$|^scripts/run_openapi_generator.sh$
|
||||
- id: openapi-codegen
|
||||
name: API Spec Codegen
|
||||
additional_dependencies:
|
||||
- uv==0.7.8
|
||||
entry: sh -c 'uv run ./docs/openapi_generator/run_openapi_generator.sh > /dev/null'
|
||||
entry: sh -c './scripts/uv-run-with-index.sh run scripts/run_openapi_generator.sh'
|
||||
language: python
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
files: ^llama_stack/apis/|^docs/openapi_generator/
|
||||
files: ^src/llama_stack_api/.*$
|
||||
- id: check-workflows-use-hashes
|
||||
name: Check GitHub Actions use SHA-pinned actions
|
||||
entry: ./scripts/check-workflows-use-hashes.sh
|
||||
|
|
@ -120,7 +130,7 @@ repos:
|
|||
pass_filenames: false
|
||||
require_serial: true
|
||||
always_run: true
|
||||
files: ^llama_stack/.*$
|
||||
files: ^src/llama_stack/.*$
|
||||
- id: forbid-pytest-asyncio
|
||||
name: Block @pytest.mark.asyncio and @pytest_asyncio.fixture
|
||||
entry: bash
|
||||
|
|
@ -141,7 +151,7 @@ repos:
|
|||
name: Generate CI documentation
|
||||
additional_dependencies:
|
||||
- uv==0.7.8
|
||||
entry: uv run ./scripts/gen-ci-docs.py
|
||||
entry: ./scripts/uv-run-with-index.sh run ./scripts/gen-ci-docs.py
|
||||
language: python
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
|
|
@ -150,7 +160,7 @@ repos:
|
|||
name: Format & Lint UI
|
||||
entry: bash ./scripts/run-ui-linter.sh
|
||||
language: system
|
||||
files: ^llama_stack/ui/.*\.(ts|tsx)$
|
||||
files: ^src/llama_stack_ui/.*\.(ts|tsx)$
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
|
||||
|
|
@ -172,6 +182,44 @@ repos:
|
|||
exit 1
|
||||
fi
|
||||
exit 0
|
||||
- id: fips-compliance
|
||||
name: Ensure llama-stack remains FIPS compliant
|
||||
entry: bash
|
||||
language: system
|
||||
types: [python]
|
||||
pass_filenames: true
|
||||
exclude: '^tests/.*$' # Exclude test dir as some safety tests used MD5
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
grep -EnH '^[^#]*\b(md5|sha1|uuid3|uuid5)\b' "$@" && {
|
||||
echo;
|
||||
echo "❌ Do not use any of the following functions: hashlib.md5, hashlib.sha1, uuid.uuid3, uuid.uuid5"
|
||||
echo " These functions are not FIPS-compliant"
|
||||
echo;
|
||||
exit 1;
|
||||
} || true
|
||||
- id: check-api-independence
|
||||
name: Ensure llama_stack_api does not import llama_stack
|
||||
entry: bash
|
||||
language: system
|
||||
pass_filenames: false
|
||||
require_serial: true
|
||||
always_run: true
|
||||
files: ^src/llama_stack_api/.*$
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
API_DIR="src/llama_stack_api"
|
||||
grep -rn --include="*.py" -E '^[^#]*(import llama_stack\b|from llama_stack\b)' "$API_DIR" 2>/dev/null && {
|
||||
echo "llama_stack_api must not import llama_stack";
|
||||
exit 1;
|
||||
}
|
||||
[ -f "$API_DIR/pyproject.toml" ] && grep -n 'llama_stack[^_]' "$API_DIR/pyproject.toml" && {
|
||||
echo "llama_stack_api must not depend on llama_stack in pyproject.toml";
|
||||
exit 1;
|
||||
}
|
||||
exit 0
|
||||
|
||||
ci:
|
||||
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks
|
||||
|
|
|
|||
614
CHANGELOG.md
614
CHANGELOG.md
|
|
@ -1,614 +0,0 @@
|
|||
# Changelog
|
||||
|
||||
# v0.2.20
|
||||
Published on: 2025-08-29T22:25:32Z
|
||||
|
||||
Here are some key changes that are coming as part of this release.
|
||||
|
||||
### Build and Environment
|
||||
|
||||
- Environment improvements: fixed env var replacement to preserve types.
|
||||
- Docker stability: fixed container startup failures for Fireworks AI provider.
|
||||
- Removed absolute paths in build for better portability.
|
||||
|
||||
### Features
|
||||
|
||||
- UI Enhancements: Implemented file upload and VectorDB creation/configuration directly in UI.
|
||||
- Vector Store Improvements: Added keyword, vector, and hybrid search inside vector store.
|
||||
- Added S3 authorization support for file providers.
|
||||
- SQL Store: Added inequality support to where clause.
|
||||
|
||||
### Documentation
|
||||
|
||||
- Fixed post-training docs.
|
||||
- Added Contributor Guidelines for creating Internal vs. External providers.
|
||||
|
||||
### Fixes
|
||||
|
||||
- Removed unsupported bfcl scoring function.
|
||||
- Multiple reliability and configuration fixes for providers and environment handling.
|
||||
|
||||
### Engineering / Chores
|
||||
|
||||
- Cleaner internal development setup with consistent paths.
|
||||
- Incremental improvements to provider integration and vector store behavior.
|
||||
|
||||
|
||||
### New Contributors
|
||||
- @omertuc made their first contribution in #3270
|
||||
- @r3v5 made their first contribution in vector store hybrid search
|
||||
|
||||
---
|
||||
|
||||
# v0.2.19
|
||||
Published on: 2025-08-26T22:06:55Z
|
||||
|
||||
## Highlights
|
||||
* feat: Add CORS configuration support for server by @skamenan7 in https://github.com/llamastack/llama-stack/pull/3201
|
||||
* feat(api): introduce /rerank by @ehhuang in https://github.com/llamastack/llama-stack/pull/2940
|
||||
* feat: Add S3 Files Provider by @mattf in https://github.com/llamastack/llama-stack/pull/3202
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.18
|
||||
Published on: 2025-08-20T01:09:27Z
|
||||
|
||||
## Highlights
|
||||
* Add moderations create API
|
||||
* Hybrid search in Milvus
|
||||
* Numerous Responses API improvements
|
||||
* Documentation updates
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.17
|
||||
Published on: 2025-08-05T01:51:14Z
|
||||
|
||||
## Highlights
|
||||
|
||||
* feat(tests): introduce inference record/replay to increase test reliability by @ashwinb in https://github.com/meta-llama/llama-stack/pull/2941
|
||||
* fix(library_client): improve initialization error handling and prevent AttributeError by @mattf in https://github.com/meta-llama/llama-stack/pull/2944
|
||||
* fix: use OLLAMA_URL to activate Ollama provider in starter by @ashwinb in https://github.com/meta-llama/llama-stack/pull/2963
|
||||
* feat(UI): adding MVP playground UI by @franciscojavierarceo in https://github.com/meta-llama/llama-stack/pull/2828
|
||||
* Standardization of errors (@nathan-weinberg)
|
||||
* feat: Enable DPO training with HuggingFace inline provider by @Nehanth in https://github.com/meta-llama/llama-stack/pull/2825
|
||||
* chore: rename templates to distributions by @ashwinb in https://github.com/meta-llama/llama-stack/pull/3035
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.16
|
||||
Published on: 2025-07-28T23:35:23Z
|
||||
|
||||
## Highlights
|
||||
|
||||
* Automatic model registration for self-hosted providers (ollama and vllm currently). No need for `INFERENCE_MODEL` environment variables which need to be updated, etc.
|
||||
* Much simplified starter distribution. Most `ENABLE_` env variables are now gone. When you set `VLLM_URL`, the `vllm` provider is auto-enabled. Similar for `MILVUS_URL`, `PGVECTOR_DB`, etc. Check the [run.yaml](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/templates/starter/run.yaml) for more details.
|
||||
* All tests migrated to pytest now (thanks @Elbehery)
|
||||
* DPO implementation in the post-training provider (thanks @Nehanth)
|
||||
* (Huge!) Support for external APIs and providers thereof (thanks @leseb, @cdoern and others). This is a really big deal -- you can now add more APIs completely out of tree and experiment with them before (optionally) wanting to contribute back.
|
||||
* `inline::vllm` provider is gone thank you very much
|
||||
* several improvements to OpenAI inference implementations and LiteLLM backend (thanks @mattf)
|
||||
* Chroma now supports Vector Store API (thanks @franciscojavierarceo).
|
||||
* Authorization improvements: Vector Store/File APIs now supports access control (thanks @franciscojavierarceo); Telemetry read APIs are gated according to logged-in user's roles.
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.15
|
||||
Published on: 2025-07-16T03:30:01Z
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.14
|
||||
Published on: 2025-07-04T16:06:48Z
|
||||
|
||||
## Highlights
|
||||
|
||||
* Support for Llama Guard 4
|
||||
* Added Milvus support to vector-stores API
|
||||
* Documentation and zero-to-hero updates for latest APIs
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.13
|
||||
Published on: 2025-06-28T04:28:11Z
|
||||
|
||||
## Highlights
|
||||
* search_mode support in OpenAI vector store API
|
||||
* Security fixes
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.12
|
||||
Published on: 2025-06-20T22:52:12Z
|
||||
|
||||
## Highlights
|
||||
* Filter support in file search
|
||||
* Support auth attributes in inference and response stores
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.11
|
||||
Published on: 2025-06-17T20:26:26Z
|
||||
|
||||
## Highlights
|
||||
* OpenAI-compatible vector store APIs
|
||||
* Hybrid Search in Sqlite-vec
|
||||
* File search tool in Responses API
|
||||
* Pagination in inference and response stores
|
||||
* Added `suffix` to completions API for fill-in-the-middle tasks
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.10.1
|
||||
Published on: 2025-06-06T20:11:02Z
|
||||
|
||||
## Highlights
|
||||
* ChromaDB provider fix
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.10
|
||||
Published on: 2025-06-05T23:21:45Z
|
||||
|
||||
## Highlights
|
||||
|
||||
* OpenAI-compatible embeddings API
|
||||
* OpenAI-compatible Files API
|
||||
* Postgres support in starter distro
|
||||
* Enable ingestion of precomputed embeddings
|
||||
* Full multi-turn support in Responses API
|
||||
* Fine-grained access control policy
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.9
|
||||
Published on: 2025-05-30T20:01:56Z
|
||||
|
||||
## Highlights
|
||||
* Added initial streaming support in Responses API
|
||||
* UI view for Responses
|
||||
* Postgres inference store support
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.8
|
||||
Published on: 2025-05-27T21:03:47Z
|
||||
|
||||
# Release v0.2.8
|
||||
|
||||
## Highlights
|
||||
|
||||
* Server-side MCP with auth firewalls now works in the Stack - both for Agents and Responses
|
||||
* Get chat completions APIs and UI to show chat completions
|
||||
* Enable keyword search for sqlite-vec
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.7
|
||||
Published on: 2025-05-16T20:38:10Z
|
||||
|
||||
## Highlights
|
||||
|
||||
This is a small update. But a couple highlights:
|
||||
|
||||
* feat: function tools in OpenAI Responses by @bbrowning in https://github.com/meta-llama/llama-stack/pull/2094, getting closer to ready. Streaming is the next missing piece.
|
||||
* feat: Adding support for customizing chunk context in RAG insertion and querying by @franciscojavierarceo in https://github.com/meta-llama/llama-stack/pull/2134
|
||||
* feat: scaffolding for Llama Stack UI by @ehhuang in https://github.com/meta-llama/llama-stack/pull/2149, more to come in the coming releases.
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.6
|
||||
Published on: 2025-05-12T18:06:52Z
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.5
|
||||
Published on: 2025-05-04T20:16:49Z
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.4
|
||||
Published on: 2025-04-29T17:26:01Z
|
||||
|
||||
## Highlights
|
||||
|
||||
* One-liner to install and run Llama Stack yay! by @reluctantfuturist in https://github.com/meta-llama/llama-stack/pull/1383
|
||||
* support for NVIDIA NeMo datastore by @raspawar in https://github.com/meta-llama/llama-stack/pull/1852
|
||||
* (yuge!) Kubernetes authentication by @leseb in https://github.com/meta-llama/llama-stack/pull/1778
|
||||
* (yuge!) OpenAI Responses API by @bbrowning in https://github.com/meta-llama/llama-stack/pull/1989
|
||||
* add api.llama provider, llama-guard-4 model by @ashwinb in https://github.com/meta-llama/llama-stack/pull/2058
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.3
|
||||
Published on: 2025-04-25T22:46:21Z
|
||||
|
||||
## Highlights
|
||||
|
||||
* OpenAI compatible inference endpoints and client-SDK support. `client.chat.completions.create()` now works.
|
||||
* significant improvements and functionality added to the nVIDIA distribution
|
||||
* many improvements to the test verification suite.
|
||||
* new inference providers: Ramalama, IBM WatsonX
|
||||
* many improvements to the Playground UI
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.2
|
||||
Published on: 2025-04-13T01:19:49Z
|
||||
|
||||
## Main changes
|
||||
|
||||
- Bring Your Own Provider (@leseb) - use out-of-tree provider code to execute the distribution server
|
||||
- OpenAI compatible inference API in progress (@bbrowning)
|
||||
- Provider verifications (@ehhuang)
|
||||
- Many updates and fixes to playground
|
||||
- Several llama4 related fixes
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.1
|
||||
Published on: 2025-04-05T23:13:00Z
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.2.0
|
||||
Published on: 2025-04-05T19:04:29Z
|
||||
|
||||
## Llama 4 Support
|
||||
|
||||
Checkout more at https://www.llama.com
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.9
|
||||
Published on: 2025-03-29T00:52:23Z
|
||||
|
||||
### Build and Test Agents
|
||||
* Agents: Entire document context with attachments
|
||||
* RAG: Documentation with sqlite-vec faiss comparison
|
||||
* Getting started: Fixes to getting started notebook.
|
||||
|
||||
### Agent Evals and Model Customization
|
||||
* (**New**) Post-training: Add nemo customizer
|
||||
|
||||
### Better Engineering
|
||||
* Moved sqlite-vec to non-blocking calls
|
||||
* Don't return a payload on file delete
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.8
|
||||
Published on: 2025-03-24T01:28:50Z
|
||||
|
||||
# v0.1.8 Release Notes
|
||||
|
||||
### Build and Test Agents
|
||||
* Safety: Integrated NVIDIA as a safety provider.
|
||||
* VectorDB: Added Qdrant as an inline provider.
|
||||
* Agents: Added support for multiple tool groups in agents.
|
||||
* Agents: Simplified imports for Agents in client package
|
||||
|
||||
|
||||
### Agent Evals and Model Customization
|
||||
* Introduced DocVQA and IfEval benchmarks.
|
||||
|
||||
### Deploying and Monitoring Agents
|
||||
* Introduced a Containerfile and image workflow for the Playground.
|
||||
* Implemented support for Bearer (API Key) authentication.
|
||||
* Added attribute-based access control for resources.
|
||||
* Fixes on docker deployments: use --pull always and standardized the default port to 8321
|
||||
* Deprecated: /v1/inspect/providers use /v1/providers/ instead
|
||||
|
||||
### Better Engineering
|
||||
* Consolidated scripts under the ./scripts directory.
|
||||
* Addressed mypy violations in various modules.
|
||||
* Added Dependabot scans for Python dependencies.
|
||||
* Implemented a scheduled workflow to update the changelog automatically.
|
||||
* Enforced concurrency to reduce CI loads.
|
||||
|
||||
|
||||
### New Contributors
|
||||
* @cmodi-meta made their first contribution in https://github.com/meta-llama/llama-stack/pull/1650
|
||||
* @jeffmaury made their first contribution in https://github.com/meta-llama/llama-stack/pull/1671
|
||||
* @derekhiggins made their first contribution in https://github.com/meta-llama/llama-stack/pull/1698
|
||||
* @Bobbins228 made their first contribution in https://github.com/meta-llama/llama-stack/pull/1745
|
||||
|
||||
**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.7...v0.1.8
|
||||
|
||||
---
|
||||
|
||||
# v0.1.7
|
||||
Published on: 2025-03-14T22:30:51Z
|
||||
|
||||
## 0.1.7 Release Notes
|
||||
|
||||
### Build and Test Agents
|
||||
* Inference: ImageType is now refactored to LlamaStackImageType
|
||||
* Inference: Added tests to measure TTFT
|
||||
* Inference: Bring back usage metrics
|
||||
* Agents: Added endpoint for get agent, list agents and list sessions
|
||||
* Agents: Automated conversion of type hints in client tool for lite llm format
|
||||
* Agents: Deprecated ToolResponseMessage in agent.resume API
|
||||
* Added Provider API for listing and inspecting provider info
|
||||
|
||||
### Agent Evals and Model Customization
|
||||
* Eval: Added new eval benchmarks Math 500 and BFCL v3
|
||||
* Deploy and Monitoring of Agents
|
||||
* Telemetry: Fix tracing to work across coroutines
|
||||
|
||||
### Better Engineering
|
||||
* Display code coverage for unit tests
|
||||
* Updated call sites (inference, tool calls, agents) to move to async non blocking calls
|
||||
* Unit tests also run on Python 3.11, 3.12, and 3.13
|
||||
* Added ollama inference to Integration tests CI
|
||||
* Improved documentation across examples, testing, CLI, updated providers table )
|
||||
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.6
|
||||
Published on: 2025-03-08T04:35:08Z
|
||||
|
||||
## 0.1.6 Release Notes
|
||||
|
||||
### Build and Test Agents
|
||||
* Inference: Fixed support for inline vllm provider
|
||||
* (**New**) Agent: Build & Monitor Agent Workflows with Llama Stack + Anthropic's Best Practice [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb)
|
||||
* (**New**) Agent: Revamped agent [documentation](https://llama-stack.readthedocs.io/en/latest/building_applications/agent.html) with more details and examples
|
||||
* Agent: Unify tools and Python SDK Agents API
|
||||
* Agent: AsyncAgent Python SDK wrapper supporting async client tool calls
|
||||
* Agent: Support python functions without @client_tool decorator as client tools
|
||||
* Agent: deprecation for allow_resume_turn flag, and remove need to specify tool_prompt_format
|
||||
* VectorIO: MilvusDB support added
|
||||
|
||||
### Agent Evals and Model Customization
|
||||
* (**New**) Agent: Llama Stack RAG Lifecycle [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb)
|
||||
* Eval: Documentation for eval, scoring, adding new benchmarks
|
||||
* Eval: Distribution template to run benchmarks on llama & non-llama models
|
||||
* Eval: Ability to register new custom LLM-as-judge scoring functions
|
||||
* (**New**) Looking for contributors for open benchmarks. See [documentation](https://llama-stack.readthedocs.io/en/latest/references/evals_reference/index.html#open-benchmark-contributing-guide) for details.
|
||||
|
||||
### Deploy and Monitoring of Agents
|
||||
* Better support for different log levels across all components for better monitoring
|
||||
|
||||
### Better Engineering
|
||||
* Enhance OpenAPI spec to include Error types across all APIs
|
||||
* Moved all tests to /tests and created unit tests to run on each PR
|
||||
* Removed all dependencies on llama-models repo
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.5.1
|
||||
Published on: 2025-02-28T22:37:44Z
|
||||
|
||||
## 0.1.5.1 Release Notes
|
||||
* Fixes for security risk in https://github.com/meta-llama/llama-stack/pull/1327 and https://github.com/meta-llama/llama-stack/pull/1328
|
||||
|
||||
**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.5...v0.1.5.1
|
||||
|
||||
---
|
||||
|
||||
# v0.1.5
|
||||
Published on: 2025-02-28T18:14:01Z
|
||||
|
||||
## 0.1.5 Release Notes
|
||||
### Build Agents
|
||||
* Inference: Support more non-llama models (openai, anthropic, gemini)
|
||||
* Inference: Can use the provider's model name in addition to the HF alias
|
||||
* Inference: Fixed issues with calling tools that weren't specified in the prompt
|
||||
* RAG: Improved system prompt for RAG and no more need for hard-coded rag-tool calling
|
||||
* Embeddings: Added support for Nemo retriever embedding models
|
||||
* Tools: Added support for MCP tools in Ollama Distribution
|
||||
* Distributions: Added new Groq distribution
|
||||
|
||||
### Customize Models
|
||||
* Save post-trained checkpoint in SafeTensor format to allow Ollama inference provider to use the post-trained model
|
||||
|
||||
### Monitor agents
|
||||
* More comprehensive logging of agent steps including client tools
|
||||
* Telemetry inputs/outputs are now structured and queryable
|
||||
* Ability to retrieve agents session, turn, step by ids
|
||||
|
||||
### Better Engineering
|
||||
* Moved executorch Swift code out of this repo into the llama-stack-client-swift repo, similar to kotlin
|
||||
* Move most logging to use logger instead of prints
|
||||
* Completed text /chat-completion and /completion tests
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.4
|
||||
Published on: 2025-02-25T00:02:43Z
|
||||
|
||||
## v0.1.4 Release Notes
|
||||
Here are the key changes coming as part of this release:
|
||||
|
||||
### Build and Test Agents
|
||||
* Inference: Added support for non-llama models
|
||||
* Inference: Added option to list all downloaded models and remove models
|
||||
* Agent: Introduce new api agents.resume_turn to include client side tool execution in the same turn
|
||||
* Agent: AgentConfig introduces new variable “tool_config” that allows for better tool configuration and system prompt overrides
|
||||
* Agent: Added logging for agent step start and completion times
|
||||
* Agent: Added support for logging for tool execution metadata
|
||||
* Embedding: Updated /inference/embeddings to support asymmetric models, truncation and variable sized outputs
|
||||
* Embedding: Updated embedding models for Ollama, Together, and Fireworks with available defaults
|
||||
* VectorIO: Improved performance of sqlite-vec using chunked writes
|
||||
### Agent Evals and Model Customization
|
||||
* Deprecated api /eval-tasks. Use /eval/benchmark instead
|
||||
* Added CPU training support for TorchTune
|
||||
### Deploy and Monitoring of Agents
|
||||
* Consistent view of client and server tool calls in telemetry
|
||||
### Better Engineering
|
||||
* Made tests more data-driven for consistent evaluation
|
||||
* Fixed documentation links and improved API reference generation
|
||||
* Various small fixes for build scripts and system reliability
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.3
|
||||
Published on: 2025-02-14T20:24:32Z
|
||||
|
||||
## v0.1.3 Release
|
||||
|
||||
Here are some key changes that are coming as part of this release.
|
||||
|
||||
### Build and Test Agents
|
||||
Streamlined the initial development experience
|
||||
- Added support for llama stack run --image-type venv
|
||||
- Enhanced vector store options with new sqlite-vec provider and improved Qdrant integration
|
||||
- vLLM improvements for tool calling and logprobs
|
||||
- Better handling of sporadic code_interpreter tool calls
|
||||
|
||||
### Agent Evals
|
||||
Better benchmarking and Agent performance assessment
|
||||
- Renamed eval API /eval-task to /benchmarks
|
||||
- Improved documentation and notebooks for RAG and evals
|
||||
|
||||
### Deploy and Monitoring of Agents
|
||||
Improved production readiness
|
||||
- Added usage metrics collection for chat completions
|
||||
- CLI improvements for provider information
|
||||
- Improved error handling and system reliability
|
||||
- Better model endpoint handling and accessibility
|
||||
- Improved signal handling on distro server
|
||||
|
||||
### Better Engineering
|
||||
Infrastructure and code quality improvements
|
||||
- Faster text-based chat completion tests
|
||||
- Improved testing for non-streaming agent apis
|
||||
- Standardized import formatting with ruff linter
|
||||
- Added conventional commits standard
|
||||
- Fixed documentation parsing issues
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.2
|
||||
Published on: 2025-02-07T22:06:49Z
|
||||
|
||||
# TL;DR
|
||||
- Several stabilizations to development flows after the switch to `uv`
|
||||
- Migrated CI workflows to new OSS repo - [llama-stack-ops](https://github.com/meta-llama/llama-stack-ops)
|
||||
- Added automated rebuilds for ReadTheDocs
|
||||
- Llama Stack server supports HTTPS
|
||||
- Added system prompt overrides support
|
||||
- Several bug fixes and improvements to documentation (check out Kubernetes deployment guide by @terrytangyuan )
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.1
|
||||
Published on: 2025-02-02T02:29:24Z
|
||||
|
||||
A bunch of small / big improvements everywhere including support for Windows, switching to `uv` and many provider improvements.
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.0
|
||||
Published on: 2025-01-24T17:47:47Z
|
||||
|
||||
We are excited to announce a stable API release of Llama Stack, which enables developers to build RAG applications and Agents using tools and safety shields, monitor and those agents with telemetry, and evaluate the agent with scoring functions.
|
||||
|
||||
## Context
|
||||
GenAI application developers need more than just an LLM - they need to integrate tools, connect with their data sources, establish guardrails, and ground the LLM responses effectively. Currently, developers must piece together various tools and APIs, complicating the development lifecycle and increasing costs. The result is that developers are spending more time on these integrations rather than focusing on the application logic itself. The bespoke coupling of components also makes it challenging to adopt state-of-the-art solutions in the rapidly evolving GenAI space. This is particularly difficult for open models like Llama, as best practices are not widely established in the open.
|
||||
|
||||
Llama Stack was created to provide developers with a comprehensive and coherent interface that simplifies AI application development and codifies best practices across the Llama ecosystem. Since our launch in September 2024, we have seen a huge uptick in interest in Llama Stack APIs by both AI developers and from partners building AI services with Llama models. Partners like Nvidia, Fireworks, and Ollama have collaborated with us to develop implementations across various APIs, including inference, memory, and safety.
|
||||
|
||||
With Llama Stack, you can easily build a RAG agent which can also search the web, do complex math, and custom tool calling. You can use telemetry to inspect those traces, and convert telemetry into evals datasets. And with Llama Stack’s plugin architecture and prepackage distributions, you choose to run your agent anywhere - in the cloud with our partners, deploy your own environment using virtualenv or Docker, operate locally with Ollama, or even run on mobile devices with our SDKs. Llama Stack offers unprecedented flexibility while also simplifying the developer experience.
|
||||
|
||||
## Release
|
||||
After iterating on the APIs for the last 3 months, today we’re launching a stable release (V1) of the Llama Stack APIs and the corresponding llama-stack server and client packages(v0.1.0). We now have automated tests for providers. These tests make sure that all provider implementations are verified. Developers can now easily and reliably select distributions or providers based on their specific requirements.
|
||||
|
||||
There are example standalone apps in llama-stack-apps.
|
||||
|
||||
|
||||
## Key Features of this release
|
||||
|
||||
- **Unified API Layer**
|
||||
- Inference: Run LLM models
|
||||
- RAG: Store and retrieve knowledge for RAG
|
||||
- Agents: Build multi-step agentic workflows
|
||||
- Tools: Register tools that can be called by the agent
|
||||
- Safety: Apply content filtering and safety policies
|
||||
- Evaluation: Test model and agent quality
|
||||
- Telemetry: Collect and analyze usage data and complex agentic traces
|
||||
- Post Training ( Coming Soon ): Fine tune models for specific use cases
|
||||
|
||||
- **Rich Provider Ecosystem**
|
||||
- Local Development: Meta's Reference, Ollama
|
||||
- Cloud: Fireworks, Together, Nvidia, AWS Bedrock, Groq, Cerebras
|
||||
- On-premises: Nvidia NIM, vLLM, TGI, Dell-TGI
|
||||
- On-device: iOS and Android support
|
||||
|
||||
- **Built for Production**
|
||||
- Pre-packaged distributions for common deployment scenarios
|
||||
- Backwards compatibility across model versions
|
||||
- Comprehensive evaluation capabilities
|
||||
- Full observability and monitoring
|
||||
|
||||
- **Multiple developer interfaces**
|
||||
- CLI: Command line interface
|
||||
- Python SDK
|
||||
- Swift iOS SDK
|
||||
- Kotlin Android SDK
|
||||
|
||||
- **Sample llama stack applications**
|
||||
- Python
|
||||
- iOS
|
||||
- Android
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.1.0rc12
|
||||
Published on: 2025-01-22T22:24:01Z
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
# v0.0.63
|
||||
Published on: 2024-12-18T07:17:43Z
|
||||
|
||||
A small but important bug-fix release to update the URL datatype for the client-SDKs. The issue affected multimodal agentic turns especially.
|
||||
|
||||
**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.0.62...v0.0.63
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -61,6 +61,18 @@ uv run pre-commit run --all-files -v
|
|||
|
||||
The `-v` (verbose) parameter is optional but often helpful for getting more information about any issues with that the pre-commit checks identify.
|
||||
|
||||
To run the expanded mypy configuration that CI enforces, use:
|
||||
|
||||
```bash
|
||||
uv run pre-commit run mypy-full --hook-stage manual --all-files
|
||||
```
|
||||
|
||||
or invoke mypy directly with all optional dependencies:
|
||||
|
||||
```bash
|
||||
uv run --group dev --group type_checking mypy
|
||||
```
|
||||
|
||||
```{caution}
|
||||
Before pushing your changes, make sure that the pre-commit hooks have passed successfully.
|
||||
```
|
||||
|
|
@ -219,7 +231,7 @@ npm run serve
|
|||
If you modify or add new API endpoints, update the API documentation accordingly. You can do this by running the following command:
|
||||
|
||||
```bash
|
||||
uv run ./docs/openapi_generator/run_openapi_generator.sh
|
||||
uv run ./scripts/run_openapi_generator.sh
|
||||
```
|
||||
|
||||
The generated API schema will be available in `docs/static/`. Make sure to review the changes before committing.
|
||||
|
|
|
|||
18
MANIFEST.in
18
MANIFEST.in
|
|
@ -1,11 +1,11 @@
|
|||
include pyproject.toml
|
||||
include llama_stack/models/llama/llama3/tokenizer.model
|
||||
include llama_stack/models/llama/llama4/tokenizer.model
|
||||
include llama_stack/core/*.sh
|
||||
include llama_stack/cli/scripts/*.sh
|
||||
include llama_stack/distributions/*/*.yaml
|
||||
exclude llama_stack/distributions/ci-tests
|
||||
include src/llama_stack/models/llama/llama3/tokenizer.model
|
||||
include src/llama_stack/models/llama/llama4/tokenizer.model
|
||||
include src/llama_stack/core/*.sh
|
||||
include src/llama_stack/cli/scripts/*.sh
|
||||
include src/llama_stack/distributions/*/*.yaml
|
||||
exclude src/llama_stack/distributions/ci-tests
|
||||
include tests/integration/test_cases/inference/*.json
|
||||
include llama_stack/models/llama/*/*.md
|
||||
include llama_stack/tests/integration/*.jpg
|
||||
prune llama_stack/distributions/ci-tests
|
||||
include src/llama_stack/models/llama/*/*.md
|
||||
include src/llama_stack/tests/integration/*.jpg
|
||||
prune src/llama_stack/distributions/ci-tests
|
||||
|
|
|
|||
77
README.md
77
README.md
|
|
@ -10,83 +10,6 @@
|
|||
[**Quick Start**](https://llamastack.github.io/docs/getting_started/quickstart) | [**Documentation**](https://llamastack.github.io/docs) | [**Colab Notebook**](./docs/getting_started.ipynb) | [**Discord**](https://discord.gg/llama-stack)
|
||||
|
||||
|
||||
### ✨🎉 Llama 4 Support 🎉✨
|
||||
We released [Version 0.2.0](https://github.com/meta-llama/llama-stack/releases/tag/v0.2.0) with support for the Llama 4 herd of models released by Meta.
|
||||
|
||||
<details>
|
||||
|
||||
<summary>👋 Click here to see how to run Llama 4 models on Llama Stack </summary>
|
||||
|
||||
\
|
||||
*Note you need 8xH100 GPU-host to run these models*
|
||||
|
||||
```bash
|
||||
pip install -U llama_stack
|
||||
|
||||
MODEL="Llama-4-Scout-17B-16E-Instruct"
|
||||
# get meta url from llama.com
|
||||
huggingface-cli download meta-llama/$MODEL --local-dir ~/.llama/$MODEL
|
||||
|
||||
# install dependencies for the distribution
|
||||
llama stack list-deps meta-reference-gpu | xargs -L1 uv pip install
|
||||
|
||||
# start a llama stack server
|
||||
INFERENCE_MODEL=meta-llama/$MODEL llama stack run meta-reference-gpu
|
||||
|
||||
# install client to interact with the server
|
||||
pip install llama-stack-client
|
||||
```
|
||||
### CLI
|
||||
```bash
|
||||
# Run a chat completion
|
||||
MODEL="Llama-4-Scout-17B-16E-Instruct"
|
||||
|
||||
llama-stack-client --endpoint http://localhost:8321 \
|
||||
inference chat-completion \
|
||||
--model-id meta-llama/$MODEL \
|
||||
--message "write a haiku for meta's llama 4 models"
|
||||
|
||||
OpenAIChatCompletion(
|
||||
...
|
||||
choices=[
|
||||
OpenAIChatCompletionChoice(
|
||||
finish_reason='stop',
|
||||
index=0,
|
||||
message=OpenAIChatCompletionChoiceMessageOpenAIAssistantMessageParam(
|
||||
role='assistant',
|
||||
content='...**Silent minds awaken,** \n**Whispers of billions of words,** \n**Reasoning breaks the night.** \n\n— \n*This haiku blends the essence of LLaMA 4\'s capabilities with nature-inspired metaphor, evoking its vast training data and transformative potential.*',
|
||||
...
|
||||
),
|
||||
...
|
||||
)
|
||||
],
|
||||
...
|
||||
)
|
||||
```
|
||||
### Python SDK
|
||||
```python
|
||||
from llama_stack_client import LlamaStackClient
|
||||
|
||||
client = LlamaStackClient(base_url=f"http://localhost:8321")
|
||||
|
||||
model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
|
||||
prompt = "Write a haiku about coding"
|
||||
|
||||
print(f"User> {prompt}")
|
||||
response = client.chat.completions.create(
|
||||
model=model_id,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
)
|
||||
print(f"Assistant> {response.choices[0].message.content}")
|
||||
```
|
||||
As more providers start supporting Llama 4, you can use them in Llama Stack as well. We are adding to the list. Stay tuned!
|
||||
|
||||
|
||||
</details>
|
||||
|
||||
### 🚀 One-Line Installer 🚀
|
||||
|
||||
To try Llama Stack locally, run:
|
||||
|
|
|
|||
|
|
@ -44,14 +44,6 @@ data:
|
|||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
files:
|
||||
- provider_id: meta-reference-files
|
||||
provider_type: inline::localfs
|
||||
config:
|
||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db
|
||||
safety:
|
||||
- provider_id: llama-guard
|
||||
provider_type: inline::llama-guard
|
||||
|
|
@ -115,13 +107,21 @@ data:
|
|||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
references:
|
||||
stores:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
max_write_queue_size: 10000
|
||||
num_writers: 4
|
||||
conversations:
|
||||
backend: sql_default
|
||||
table_name: openai_conversations
|
||||
prompts:
|
||||
backend: kv_default
|
||||
namespace: prompts
|
||||
models:
|
||||
- metadata:
|
||||
embedding_dimension: 768
|
||||
|
|
|
|||
|
|
@ -36,14 +36,6 @@ providers:
|
|||
persistence:
|
||||
namespace: vector_io::chroma_remote
|
||||
backend: kv_default
|
||||
files:
|
||||
- provider_id: meta-reference-files
|
||||
provider_type: inline::localfs
|
||||
config:
|
||||
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
||||
metadata_store:
|
||||
table_name: files_metadata
|
||||
backend: sql_default
|
||||
safety:
|
||||
- provider_id: llama-guard
|
||||
provider_type: inline::llama-guard
|
||||
|
|
@ -108,6 +100,9 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
prompts:
|
||||
namespace: prompts
|
||||
backend: kv_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata:
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
These are the source-of-truth configuration files used to generate the Stainless client SDKs via Stainless.
|
||||
|
||||
- `openapi.yml`: this is the OpenAPI specification for the Llama Stack API.
|
||||
- `openapi.stainless.yml`: this is the Stainless _configuration_ which instructs Stainless how to generate the client SDKs.
|
||||
- `config.yml`: this is the Stainless _configuration_ which instructs Stainless how to generate the client SDKs.
|
||||
|
||||
A small side note: notice the `.yml` suffixes since Stainless uses that suffix typically for its configuration files.
|
||||
|
||||
These files go hand-in-hand. As of now, only the `openapi.yml` file is automatically generated using the `run_openapi_generator.sh` script.
|
||||
These files go hand-in-hand. Both `openapi.yml` and `config.yml` are generated by `scripts/run_openapi_generator.sh`:
|
||||
|
||||
- `openapi.yml` comes from the FastAPI-based generator.
|
||||
- `config.yml` is rendered from `scripts/openapi_generator/stainless_config/config_data.py` so the Stainless config stays in lock-step with the spec.
|
||||
|
|
|
|||
490
client-sdks/stainless/config.yml
Normal file
490
client-sdks/stainless/config.yml
Normal file
|
|
@ -0,0 +1,490 @@
|
|||
# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json
|
||||
|
||||
organization:
|
||||
name: llama-stack-client
|
||||
docs: https://llama-stack.readthedocs.io/en/latest/
|
||||
contact: llamastack@meta.com
|
||||
security:
|
||||
- {}
|
||||
- BearerAuth: []
|
||||
security_schemes:
|
||||
BearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
targets:
|
||||
node:
|
||||
package_name: llama-stack-client
|
||||
production_repo: llamastack/llama-stack-client-typescript
|
||||
publish:
|
||||
npm: false
|
||||
python:
|
||||
package_name: llama_stack_client
|
||||
production_repo: llamastack/llama-stack-client-python
|
||||
options:
|
||||
use_uv: true
|
||||
publish:
|
||||
pypi: true
|
||||
project_name: llama_stack_client
|
||||
kotlin:
|
||||
reverse_domain: com.llama_stack_client.api
|
||||
production_repo: null
|
||||
publish:
|
||||
maven: false
|
||||
go:
|
||||
package_name: llama-stack-client
|
||||
production_repo: llamastack/llama-stack-client-go
|
||||
options:
|
||||
enable_v2: true
|
||||
back_compat_use_shared_package: false
|
||||
client_settings:
|
||||
default_env_prefix: LLAMA_STACK_CLIENT
|
||||
opts:
|
||||
api_key:
|
||||
type: string
|
||||
read_env: LLAMA_STACK_CLIENT_API_KEY
|
||||
auth:
|
||||
security_scheme: BearerAuth
|
||||
nullable: true
|
||||
environments:
|
||||
production: http://any-hosted-llama-stack.com
|
||||
pagination:
|
||||
- name: datasets_iterrows
|
||||
type: offset
|
||||
request:
|
||||
dataset_id:
|
||||
type: string
|
||||
start_index:
|
||||
type: integer
|
||||
x-stainless-pagination-property:
|
||||
purpose: offset_count_param
|
||||
limit:
|
||||
type: integer
|
||||
response:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
next_index:
|
||||
type: integer
|
||||
x-stainless-pagination-property:
|
||||
purpose: offset_count_start_field
|
||||
- name: openai_cursor_page
|
||||
type: cursor
|
||||
request:
|
||||
limit:
|
||||
type: integer
|
||||
after:
|
||||
type: string
|
||||
x-stainless-pagination-property:
|
||||
purpose: next_cursor_param
|
||||
response:
|
||||
data:
|
||||
type: array
|
||||
items: {}
|
||||
has_more:
|
||||
type: boolean
|
||||
last_id:
|
||||
type: string
|
||||
x-stainless-pagination-property:
|
||||
purpose: next_cursor_field
|
||||
settings:
|
||||
license: MIT
|
||||
unwrap_response_fields:
|
||||
- data
|
||||
file_header: 'Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
||||
This source code is licensed under the terms described in the LICENSE file in
|
||||
|
||||
the root directory of this source tree.
|
||||
|
||||
'
|
||||
openapi:
|
||||
transformations:
|
||||
- command: mergeObject
|
||||
reason: Better return_type using enum
|
||||
args:
|
||||
target:
|
||||
- $.components.schemas
|
||||
object:
|
||||
ReturnType:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
enum:
|
||||
- string
|
||||
- number
|
||||
- boolean
|
||||
- array
|
||||
- object
|
||||
- json
|
||||
- union
|
||||
- chat_completion_input
|
||||
- completion_input
|
||||
- agent_turn_input
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
- command: replaceProperties
|
||||
reason: Replace return type properties with better model (see above)
|
||||
args:
|
||||
filter:
|
||||
only:
|
||||
- $.components.schemas.ScoringFn.properties.return_type
|
||||
- $.components.schemas.RegisterScoringFunctionRequest.properties.return_type
|
||||
value:
|
||||
$ref: '#/components/schemas/ReturnType'
|
||||
- command: oneOfToAnyOf
|
||||
reason: Prism (mock server) doesn't like one of our requests as it technically
|
||||
matches multiple variants
|
||||
readme:
|
||||
example_requests:
|
||||
default:
|
||||
type: request
|
||||
endpoint: post /v1/chat/completions
|
||||
params: {}
|
||||
headline:
|
||||
type: request
|
||||
endpoint: get /v1/models
|
||||
params: {}
|
||||
pagination:
|
||||
type: request
|
||||
endpoint: post /v1/chat/completions
|
||||
params: {}
|
||||
resources:
|
||||
$shared:
|
||||
models:
|
||||
interleaved_content_item: InterleavedContentItem
|
||||
interleaved_content: InterleavedContent
|
||||
param_type: ParamType
|
||||
safety_violation: SafetyViolation
|
||||
sampling_params: SamplingParams
|
||||
scoring_result: ScoringResult
|
||||
system_message: SystemMessage
|
||||
toolgroups:
|
||||
models:
|
||||
tool_group: ToolGroup
|
||||
list_tool_groups_response: ListToolGroupsResponse
|
||||
methods:
|
||||
register: post /v1/toolgroups
|
||||
get: get /v1/toolgroups/{toolgroup_id}
|
||||
list: get /v1/toolgroups
|
||||
unregister: delete /v1/toolgroups/{toolgroup_id}
|
||||
tools:
|
||||
methods:
|
||||
get: get /v1/tools/{tool_name}
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/tools
|
||||
tool_runtime:
|
||||
models:
|
||||
tool_def: ToolDef
|
||||
tool_invocation_result: ToolInvocationResult
|
||||
methods:
|
||||
list_tools:
|
||||
paginated: false
|
||||
endpoint: get /v1/tool-runtime/list-tools
|
||||
invoke_tool: post /v1/tool-runtime/invoke
|
||||
responses:
|
||||
models:
|
||||
response_object_stream: OpenAIResponseObjectStream
|
||||
response_object: OpenAIResponseObject
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
streaming:
|
||||
stream_event_model: responses.response_object_stream
|
||||
param_discriminator: stream
|
||||
endpoint: post /v1/responses
|
||||
retrieve: get /v1/responses/{response_id}
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/responses
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/responses/{response_id}
|
||||
subresources:
|
||||
input_items:
|
||||
methods:
|
||||
list:
|
||||
type: http
|
||||
paginated: false
|
||||
endpoint: get /v1/responses/{response_id}/input_items
|
||||
prompts:
|
||||
models:
|
||||
prompt: Prompt
|
||||
list_prompts_response: ListPromptsResponse
|
||||
methods:
|
||||
create: post /v1/prompts
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/prompts
|
||||
retrieve: get /v1/prompts/{prompt_id}
|
||||
update: post /v1/prompts/{prompt_id}
|
||||
delete: delete /v1/prompts/{prompt_id}
|
||||
set_default_version: post /v1/prompts/{prompt_id}/set-default-version
|
||||
subresources:
|
||||
versions:
|
||||
methods:
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/prompts/{prompt_id}/versions
|
||||
conversations:
|
||||
models:
|
||||
conversation_object: Conversation
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/conversations
|
||||
retrieve: get /v1/conversations/{conversation_id}
|
||||
update:
|
||||
type: http
|
||||
endpoint: post /v1/conversations/{conversation_id}
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/conversations/{conversation_id}
|
||||
subresources:
|
||||
items:
|
||||
methods:
|
||||
get:
|
||||
type: http
|
||||
endpoint: get /v1/conversations/{conversation_id}/items/{item_id}
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/conversations/{conversation_id}/items
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/conversations/{conversation_id}/items
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/conversations/{conversation_id}/items/{item_id}
|
||||
inspect:
|
||||
models:
|
||||
healthInfo: HealthInfo
|
||||
providerInfo: ProviderInfo
|
||||
routeInfo: RouteInfo
|
||||
versionInfo: VersionInfo
|
||||
methods:
|
||||
health: get /v1/health
|
||||
version: get /v1/version
|
||||
embeddings:
|
||||
models:
|
||||
create_embeddings_response: OpenAIEmbeddingsResponse
|
||||
methods:
|
||||
create: post /v1/embeddings
|
||||
chat:
|
||||
models:
|
||||
chat_completion_chunk: OpenAIChatCompletionChunk
|
||||
subresources:
|
||||
completions:
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
streaming:
|
||||
stream_event_model: chat.chat_completion_chunk
|
||||
param_discriminator: stream
|
||||
endpoint: post /v1/chat/completions
|
||||
list:
|
||||
type: http
|
||||
paginated: false
|
||||
endpoint: get /v1/chat/completions
|
||||
retrieve:
|
||||
type: http
|
||||
endpoint: get /v1/chat/completions/{completion_id}
|
||||
completions:
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
streaming:
|
||||
param_discriminator: stream
|
||||
endpoint: post /v1/completions
|
||||
vector_io:
|
||||
models:
|
||||
queryChunksResponse: QueryChunksResponse
|
||||
methods:
|
||||
insert: post /v1/vector-io/insert
|
||||
query: post /v1/vector-io/query
|
||||
vector_stores:
|
||||
models:
|
||||
vector_store: VectorStoreObject
|
||||
list_vector_stores_response: VectorStoreListResponse
|
||||
vector_store_delete_response: VectorStoreDeleteResponse
|
||||
vector_store_search_response: VectorStoreSearchResponsePage
|
||||
methods:
|
||||
create: post /v1/vector_stores
|
||||
list: get /v1/vector_stores
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}
|
||||
update: post /v1/vector_stores/{vector_store_id}
|
||||
delete: delete /v1/vector_stores/{vector_store_id}
|
||||
search: post /v1/vector_stores/{vector_store_id}/search
|
||||
subresources:
|
||||
files:
|
||||
models:
|
||||
vector_store_file: VectorStoreFileObject
|
||||
methods:
|
||||
list: get /v1/vector_stores/{vector_store_id}/files
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
update: post /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
delete: delete /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
create: post /v1/vector_stores/{vector_store_id}/files
|
||||
content: get /v1/vector_stores/{vector_store_id}/files/{file_id}/content
|
||||
file_batches:
|
||||
models:
|
||||
vector_store_file_batches: VectorStoreFileBatchObject
|
||||
list_vector_store_files_in_batch_response: VectorStoreFilesListInBatchResponse
|
||||
methods:
|
||||
create: post /v1/vector_stores/{vector_store_id}/file_batches
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}
|
||||
list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files
|
||||
cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel
|
||||
models:
|
||||
models:
|
||||
model: OpenAIModel
|
||||
list_models_response: OpenAIListModelsResponse
|
||||
methods:
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/models
|
||||
retrieve: get /v1/models/{model_id}
|
||||
register: post /v1/models
|
||||
unregister: delete /v1/models/{model_id}
|
||||
subresources:
|
||||
openai:
|
||||
methods:
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/models
|
||||
providers:
|
||||
models:
|
||||
list_providers_response: ListProvidersResponse
|
||||
methods:
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/providers
|
||||
retrieve: get /v1/providers/{provider_id}
|
||||
routes:
|
||||
models:
|
||||
list_routes_response: ListRoutesResponse
|
||||
methods:
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/inspect/routes
|
||||
moderations:
|
||||
models:
|
||||
create_response: ModerationObject
|
||||
methods:
|
||||
create: post /v1/moderations
|
||||
safety:
|
||||
models:
|
||||
run_shield_response: RunShieldResponse
|
||||
methods:
|
||||
run_shield: post /v1/safety/run-shield
|
||||
shields:
|
||||
models:
|
||||
shield: Shield
|
||||
list_shields_response: ListShieldsResponse
|
||||
methods:
|
||||
retrieve: get /v1/shields/{identifier}
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/shields
|
||||
register: post /v1/shields
|
||||
delete: delete /v1/shields/{identifier}
|
||||
scoring:
|
||||
methods:
|
||||
score: post /v1/scoring/score
|
||||
score_batch: post /v1/scoring/score-batch
|
||||
scoring_functions:
|
||||
models:
|
||||
scoring_fn: ScoringFn
|
||||
scoring_fn_params: ScoringFnParams
|
||||
list_scoring_functions_response: ListScoringFunctionsResponse
|
||||
methods:
|
||||
retrieve: get /v1/scoring-functions/{scoring_fn_id}
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1/scoring-functions
|
||||
register: post /v1/scoring-functions
|
||||
unregister: delete /v1/scoring-functions/{scoring_fn_id}
|
||||
files:
|
||||
models:
|
||||
file: OpenAIFileObject
|
||||
list_files_response: ListOpenAIFileResponse
|
||||
delete_file_response: OpenAIFileDeleteResponse
|
||||
methods:
|
||||
create: post /v1/files
|
||||
list: get /v1/files
|
||||
retrieve: get /v1/files/{file_id}
|
||||
delete: delete /v1/files/{file_id}
|
||||
content: get /v1/files/{file_id}/content
|
||||
batches:
|
||||
methods:
|
||||
create: post /v1/batches
|
||||
list: get /v1/batches
|
||||
retrieve: get /v1/batches/{batch_id}
|
||||
cancel: post /v1/batches/{batch_id}/cancel
|
||||
alpha:
|
||||
subresources:
|
||||
inference:
|
||||
methods:
|
||||
rerank: post /v1alpha/inference/rerank
|
||||
post_training:
|
||||
models:
|
||||
algorithm_config: AlgorithmConfig
|
||||
post_training_job: PostTrainingJob
|
||||
list_post_training_jobs_response: ListPostTrainingJobsResponse
|
||||
methods:
|
||||
preference_optimize: post /v1alpha/post-training/preference-optimize
|
||||
supervised_fine_tune: post /v1alpha/post-training/supervised-fine-tune
|
||||
subresources:
|
||||
job:
|
||||
methods:
|
||||
artifacts: get /v1alpha/post-training/job/artifacts
|
||||
cancel: post /v1alpha/post-training/job/cancel
|
||||
status: get /v1alpha/post-training/job/status
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1alpha/post-training/jobs
|
||||
benchmarks:
|
||||
models:
|
||||
benchmark: Benchmark
|
||||
list_benchmarks_response: ListBenchmarksResponse
|
||||
methods:
|
||||
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1alpha/eval/benchmarks
|
||||
register: post /v1alpha/eval/benchmarks
|
||||
unregister: delete /v1alpha/eval/benchmarks/{benchmark_id}
|
||||
eval:
|
||||
models:
|
||||
evaluate_response: EvaluateResponse
|
||||
benchmark_config: BenchmarkConfig
|
||||
job: Job
|
||||
methods:
|
||||
evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
|
||||
run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
|
||||
evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
|
||||
run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
|
||||
subresources:
|
||||
jobs:
|
||||
methods:
|
||||
cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
|
||||
status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
|
||||
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result
|
||||
beta:
|
||||
subresources:
|
||||
datasets:
|
||||
models:
|
||||
list_datasets_response: ListDatasetsResponse
|
||||
methods:
|
||||
register: post /v1beta/datasets
|
||||
retrieve: get /v1beta/datasets/{dataset_id}
|
||||
list:
|
||||
paginated: false
|
||||
endpoint: get /v1beta/datasets
|
||||
unregister: delete /v1beta/datasets/{dataset_id}
|
||||
iterrows: get /v1beta/datasetio/iterrows/{dataset_id}
|
||||
appendrows: post /v1beta/datasetio/append-rows/{dataset_id}
|
||||
|
|
@ -1,610 +0,0 @@
|
|||
# yaml-language-server: $schema=https://app.stainlessapi.com/config-internal.schema.json
|
||||
|
||||
organization:
|
||||
# Name of your organization or company, used to determine the name of the client
|
||||
# and headings.
|
||||
name: llama-stack-client
|
||||
docs: https://llama-stack.readthedocs.io/en/latest/
|
||||
contact: llamastack@meta.com
|
||||
security:
|
||||
- {}
|
||||
- BearerAuth: []
|
||||
security_schemes:
|
||||
BearerAuth:
|
||||
type: http
|
||||
scheme: bearer
|
||||
# `targets` define the output targets and their customization options, such as
|
||||
# whether to emit the Node SDK and what it's package name should be.
|
||||
targets:
|
||||
node:
|
||||
package_name: llama-stack-client
|
||||
production_repo: llamastack/llama-stack-client-typescript
|
||||
publish:
|
||||
npm: false
|
||||
python:
|
||||
package_name: llama_stack_client
|
||||
production_repo: llamastack/llama-stack-client-python
|
||||
options:
|
||||
use_uv: true
|
||||
publish:
|
||||
pypi: true
|
||||
project_name: llama_stack_client
|
||||
kotlin:
|
||||
reverse_domain: com.llama_stack_client.api
|
||||
production_repo: null
|
||||
publish:
|
||||
maven: false
|
||||
go:
|
||||
package_name: llama-stack-client
|
||||
production_repo: llamastack/llama-stack-client-go
|
||||
options:
|
||||
enable_v2: true
|
||||
back_compat_use_shared_package: false
|
||||
|
||||
# `client_settings` define settings for the API client, such as extra constructor
|
||||
# arguments (used for authentication), retry behavior, idempotency, etc.
|
||||
client_settings:
|
||||
default_env_prefix: LLAMA_STACK_CLIENT
|
||||
opts:
|
||||
api_key:
|
||||
type: string
|
||||
read_env: LLAMA_STACK_CLIENT_API_KEY
|
||||
auth: { security_scheme: BearerAuth }
|
||||
nullable: true
|
||||
|
||||
# `environments` are a map of the name of the environment (e.g. "sandbox",
|
||||
# "production") to the corresponding url to use.
|
||||
environments:
|
||||
production: http://any-hosted-llama-stack.com
|
||||
|
||||
# `pagination` defines [pagination schemes] which provides a template to match
|
||||
# endpoints and generate next-page and auto-pagination helpers in the SDKs.
|
||||
pagination:
|
||||
- name: datasets_iterrows
|
||||
type: offset
|
||||
request:
|
||||
dataset_id:
|
||||
type: string
|
||||
start_index:
|
||||
type: integer
|
||||
x-stainless-pagination-property:
|
||||
purpose: offset_count_param
|
||||
limit:
|
||||
type: integer
|
||||
response:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
next_index:
|
||||
type: integer
|
||||
x-stainless-pagination-property:
|
||||
purpose: offset_count_start_field
|
||||
- name: openai_cursor_page
|
||||
type: cursor
|
||||
request:
|
||||
limit:
|
||||
type: integer
|
||||
after:
|
||||
type: string
|
||||
x-stainless-pagination-property:
|
||||
purpose: next_cursor_param
|
||||
response:
|
||||
data:
|
||||
type: array
|
||||
items: {}
|
||||
has_more:
|
||||
type: boolean
|
||||
last_id:
|
||||
type: string
|
||||
x-stainless-pagination-property:
|
||||
purpose: next_cursor_field
|
||||
# `resources` define the structure and organziation for your API, such as how
|
||||
# methods and models are grouped together and accessed. See the [configuration
|
||||
# guide] for more information.
|
||||
#
|
||||
# [configuration guide]:
|
||||
# https://app.stainlessapi.com/docs/guides/configure#resources
|
||||
resources:
|
||||
$shared:
|
||||
models:
|
||||
agent_config: AgentConfig
|
||||
interleaved_content_item: InterleavedContentItem
|
||||
interleaved_content: InterleavedContent
|
||||
param_type: ParamType
|
||||
safety_violation: SafetyViolation
|
||||
sampling_params: SamplingParams
|
||||
scoring_result: ScoringResult
|
||||
message: Message
|
||||
user_message: UserMessage
|
||||
completion_message: CompletionMessage
|
||||
tool_response_message: ToolResponseMessage
|
||||
system_message: SystemMessage
|
||||
tool_call: ToolCall
|
||||
query_result: RAGQueryResult
|
||||
document: RAGDocument
|
||||
query_config: RAGQueryConfig
|
||||
response_format: ResponseFormat
|
||||
toolgroups:
|
||||
models:
|
||||
tool_group: ToolGroup
|
||||
list_tool_groups_response: ListToolGroupsResponse
|
||||
methods:
|
||||
register: post /v1/toolgroups
|
||||
get: get /v1/toolgroups/{toolgroup_id}
|
||||
list: get /v1/toolgroups
|
||||
unregister: delete /v1/toolgroups/{toolgroup_id}
|
||||
tools:
|
||||
methods:
|
||||
get: get /v1/tools/{tool_name}
|
||||
list:
|
||||
endpoint: get /v1/tools
|
||||
paginated: false
|
||||
|
||||
tool_runtime:
|
||||
models:
|
||||
tool_def: ToolDef
|
||||
tool_invocation_result: ToolInvocationResult
|
||||
methods:
|
||||
list_tools:
|
||||
endpoint: get /v1/tool-runtime/list-tools
|
||||
paginated: false
|
||||
invoke_tool: post /v1/tool-runtime/invoke
|
||||
subresources:
|
||||
rag_tool:
|
||||
methods:
|
||||
insert: post /v1/tool-runtime/rag-tool/insert
|
||||
query: post /v1/tool-runtime/rag-tool/query
|
||||
|
||||
responses:
|
||||
models:
|
||||
response_object_stream: OpenAIResponseObjectStream
|
||||
response_object: OpenAIResponseObject
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/responses
|
||||
streaming:
|
||||
stream_event_model: responses.response_object_stream
|
||||
param_discriminator: stream
|
||||
retrieve: get /v1/responses/{response_id}
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/responses
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/responses/{response_id}
|
||||
subresources:
|
||||
input_items:
|
||||
methods:
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/responses/{response_id}/input_items
|
||||
|
||||
conversations:
|
||||
models:
|
||||
conversation_object: Conversation
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/conversations
|
||||
retrieve: get /v1/conversations/{conversation_id}
|
||||
update:
|
||||
type: http
|
||||
endpoint: post /v1/conversations/{conversation_id}
|
||||
delete:
|
||||
type: http
|
||||
endpoint: delete /v1/conversations/{conversation_id}
|
||||
subresources:
|
||||
items:
|
||||
methods:
|
||||
get:
|
||||
type: http
|
||||
endpoint: get /v1/conversations/{conversation_id}/items/{item_id}
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/conversations/{conversation_id}/items
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/conversations/{conversation_id}/items
|
||||
|
||||
inspect:
|
||||
models:
|
||||
healthInfo: HealthInfo
|
||||
providerInfo: ProviderInfo
|
||||
routeInfo: RouteInfo
|
||||
versionInfo: VersionInfo
|
||||
methods:
|
||||
health: get /v1/health
|
||||
version: get /v1/version
|
||||
|
||||
embeddings:
|
||||
models:
|
||||
create_embeddings_response: OpenAIEmbeddingsResponse
|
||||
methods:
|
||||
create: post /v1/embeddings
|
||||
|
||||
chat:
|
||||
models:
|
||||
chat_completion_chunk: OpenAIChatCompletionChunk
|
||||
subresources:
|
||||
completions:
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/chat/completions
|
||||
streaming:
|
||||
stream_event_model: chat.chat_completion_chunk
|
||||
param_discriminator: stream
|
||||
list:
|
||||
type: http
|
||||
endpoint: get /v1/chat/completions
|
||||
retrieve:
|
||||
type: http
|
||||
endpoint: get /v1/chat/completions/{completion_id}
|
||||
completions:
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1/completions
|
||||
streaming:
|
||||
param_discriminator: stream
|
||||
|
||||
vector_io:
|
||||
models:
|
||||
queryChunksResponse: QueryChunksResponse
|
||||
methods:
|
||||
insert: post /v1/vector-io/insert
|
||||
query: post /v1/vector-io/query
|
||||
|
||||
vector_stores:
|
||||
models:
|
||||
vector_store: VectorStoreObject
|
||||
list_vector_stores_response: VectorStoreListResponse
|
||||
vector_store_delete_response: VectorStoreDeleteResponse
|
||||
vector_store_search_response: VectorStoreSearchResponsePage
|
||||
methods:
|
||||
create: post /v1/vector_stores
|
||||
list:
|
||||
endpoint: get /v1/vector_stores
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}
|
||||
update: post /v1/vector_stores/{vector_store_id}
|
||||
delete: delete /v1/vector_stores/{vector_store_id}
|
||||
search: post /v1/vector_stores/{vector_store_id}/search
|
||||
subresources:
|
||||
files:
|
||||
models:
|
||||
vector_store_file: VectorStoreFileObject
|
||||
methods:
|
||||
list: get /v1/vector_stores/{vector_store_id}/files
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
update: post /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
delete: delete /v1/vector_stores/{vector_store_id}/files/{file_id}
|
||||
create: post /v1/vector_stores/{vector_store_id}/files
|
||||
content: get /v1/vector_stores/{vector_store_id}/files/{file_id}/content
|
||||
file_batches:
|
||||
models:
|
||||
vector_store_file_batches: VectorStoreFileBatchObject
|
||||
list_vector_store_files_in_batch_response: VectorStoreFilesListInBatchResponse
|
||||
methods:
|
||||
create: post /v1/vector_stores/{vector_store_id}/file_batches
|
||||
retrieve: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}
|
||||
list_files: get /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files
|
||||
cancel: post /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel
|
||||
|
||||
models:
|
||||
models:
|
||||
model: Model
|
||||
list_models_response: ListModelsResponse
|
||||
methods:
|
||||
retrieve: get /v1/models/{model_id}
|
||||
list:
|
||||
endpoint: get /v1/models
|
||||
paginated: false
|
||||
register: post /v1/models
|
||||
unregister: delete /v1/models/{model_id}
|
||||
subresources:
|
||||
openai:
|
||||
methods:
|
||||
list:
|
||||
endpoint: get /v1/models
|
||||
paginated: false
|
||||
|
||||
providers:
|
||||
models:
|
||||
list_providers_response: ListProvidersResponse
|
||||
methods:
|
||||
list:
|
||||
endpoint: get /v1/providers
|
||||
paginated: false
|
||||
retrieve: get /v1/providers/{provider_id}
|
||||
|
||||
routes:
|
||||
models:
|
||||
list_routes_response: ListRoutesResponse
|
||||
methods:
|
||||
list:
|
||||
endpoint: get /v1/inspect/routes
|
||||
paginated: false
|
||||
|
||||
|
||||
moderations:
|
||||
models:
|
||||
create_response: ModerationObject
|
||||
methods:
|
||||
create: post /v1/moderations
|
||||
|
||||
|
||||
safety:
|
||||
models:
|
||||
run_shield_response: RunShieldResponse
|
||||
methods:
|
||||
run_shield: post /v1/safety/run-shield
|
||||
|
||||
|
||||
shields:
|
||||
models:
|
||||
shield: Shield
|
||||
list_shields_response: ListShieldsResponse
|
||||
methods:
|
||||
retrieve: get /v1/shields/{identifier}
|
||||
list:
|
||||
endpoint: get /v1/shields
|
||||
paginated: false
|
||||
register: post /v1/shields
|
||||
delete: delete /v1/shields/{identifier}
|
||||
|
||||
synthetic_data_generation:
|
||||
models:
|
||||
syntheticDataGenerationResponse: SyntheticDataGenerationResponse
|
||||
methods:
|
||||
generate: post /v1/synthetic-data-generation/generate
|
||||
|
||||
telemetry:
|
||||
models:
|
||||
span_with_status: SpanWithStatus
|
||||
trace: Trace
|
||||
query_spans_response: QuerySpansResponse
|
||||
event: Event
|
||||
query_condition: QueryCondition
|
||||
methods:
|
||||
query_traces:
|
||||
endpoint: post /v1alpha/telemetry/traces
|
||||
skip_test_reason: 'unsupported query params in java / kotlin'
|
||||
get_span_tree: post /v1alpha/telemetry/spans/{span_id}/tree
|
||||
query_spans:
|
||||
endpoint: post /v1alpha/telemetry/spans
|
||||
skip_test_reason: 'unsupported query params in java / kotlin'
|
||||
query_metrics:
|
||||
endpoint: post /v1alpha/telemetry/metrics/{metric_name}
|
||||
skip_test_reason: 'unsupported query params in java / kotlin'
|
||||
# log_event: post /v1alpha/telemetry/events
|
||||
save_spans_to_dataset: post /v1alpha/telemetry/spans/export
|
||||
get_span: get /v1alpha/telemetry/traces/{trace_id}/spans/{span_id}
|
||||
get_trace: get /v1alpha/telemetry/traces/{trace_id}
|
||||
|
||||
scoring:
|
||||
methods:
|
||||
score: post /v1/scoring/score
|
||||
score_batch: post /v1/scoring/score-batch
|
||||
scoring_functions:
|
||||
methods:
|
||||
retrieve: get /v1/scoring-functions/{scoring_fn_id}
|
||||
list:
|
||||
endpoint: get /v1/scoring-functions
|
||||
paginated: false
|
||||
register: post /v1/scoring-functions
|
||||
models:
|
||||
scoring_fn: ScoringFn
|
||||
scoring_fn_params: ScoringFnParams
|
||||
list_scoring_functions_response: ListScoringFunctionsResponse
|
||||
|
||||
benchmarks:
|
||||
methods:
|
||||
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}
|
||||
list:
|
||||
endpoint: get /v1alpha/eval/benchmarks
|
||||
paginated: false
|
||||
register: post /v1alpha/eval/benchmarks
|
||||
models:
|
||||
benchmark: Benchmark
|
||||
list_benchmarks_response: ListBenchmarksResponse
|
||||
|
||||
files:
|
||||
methods:
|
||||
create: post /v1/files
|
||||
list: get /v1/files
|
||||
retrieve: get /v1/files/{file_id}
|
||||
delete: delete /v1/files/{file_id}
|
||||
content: get /v1/files/{file_id}/content
|
||||
models:
|
||||
file: OpenAIFileObject
|
||||
list_files_response: ListOpenAIFileResponse
|
||||
delete_file_response: OpenAIFileDeleteResponse
|
||||
|
||||
alpha:
|
||||
subresources:
|
||||
inference:
|
||||
methods:
|
||||
rerank: post /v1alpha/inference/rerank
|
||||
|
||||
post_training:
|
||||
models:
|
||||
algorithm_config: AlgorithmConfig
|
||||
post_training_job: PostTrainingJob
|
||||
list_post_training_jobs_response: ListPostTrainingJobsResponse
|
||||
methods:
|
||||
preference_optimize: post /v1alpha/post-training/preference-optimize
|
||||
supervised_fine_tune: post /v1alpha/post-training/supervised-fine-tune
|
||||
subresources:
|
||||
job:
|
||||
methods:
|
||||
artifacts: get /v1alpha/post-training/job/artifacts
|
||||
cancel: post /v1alpha/post-training/job/cancel
|
||||
status: get /v1alpha/post-training/job/status
|
||||
list:
|
||||
endpoint: get /v1alpha/post-training/jobs
|
||||
paginated: false
|
||||
|
||||
eval:
|
||||
methods:
|
||||
evaluate_rows: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
|
||||
run_eval: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
|
||||
evaluate_rows_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/evaluations
|
||||
run_eval_alpha: post /v1alpha/eval/benchmarks/{benchmark_id}/jobs
|
||||
|
||||
subresources:
|
||||
jobs:
|
||||
methods:
|
||||
cancel: delete /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
|
||||
status: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}
|
||||
retrieve: get /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result
|
||||
models:
|
||||
evaluate_response: EvaluateResponse
|
||||
benchmark_config: BenchmarkConfig
|
||||
job: Job
|
||||
|
||||
agents:
|
||||
methods:
|
||||
create: post /v1alpha/agents
|
||||
list: get /v1alpha/agents
|
||||
retrieve: get /v1alpha/agents/{agent_id}
|
||||
delete: delete /v1alpha/agents/{agent_id}
|
||||
models:
|
||||
inference_step: InferenceStep
|
||||
tool_execution_step: ToolExecutionStep
|
||||
tool_response: ToolResponse
|
||||
shield_call_step: ShieldCallStep
|
||||
memory_retrieval_step: MemoryRetrievalStep
|
||||
subresources:
|
||||
session:
|
||||
models:
|
||||
session: Session
|
||||
methods:
|
||||
list: get /v1alpha/agents/{agent_id}/sessions
|
||||
create: post /v1alpha/agents/{agent_id}/session
|
||||
delete: delete /v1alpha/agents/{agent_id}/session/{session_id}
|
||||
retrieve: get /v1alpha/agents/{agent_id}/session/{session_id}
|
||||
steps:
|
||||
methods:
|
||||
retrieve: get /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}
|
||||
turn:
|
||||
models:
|
||||
turn: Turn
|
||||
turn_response_event: AgentTurnResponseEvent
|
||||
agent_turn_response_stream_chunk: AgentTurnResponseStreamChunk
|
||||
methods:
|
||||
create:
|
||||
type: http
|
||||
endpoint: post /v1alpha/agents/{agent_id}/session/{session_id}/turn
|
||||
streaming:
|
||||
stream_event_model: alpha.agents.turn.agent_turn_response_stream_chunk
|
||||
param_discriminator: stream
|
||||
retrieve: get /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}
|
||||
resume:
|
||||
type: http
|
||||
endpoint: post /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume
|
||||
streaming:
|
||||
stream_event_model: alpha.agents.turn.agent_turn_response_stream_chunk
|
||||
param_discriminator: stream
|
||||
|
||||
beta:
|
||||
subresources:
|
||||
datasets:
|
||||
models:
|
||||
list_datasets_response: ListDatasetsResponse
|
||||
methods:
|
||||
register: post /v1beta/datasets
|
||||
retrieve: get /v1beta/datasets/{dataset_id}
|
||||
list:
|
||||
endpoint: get /v1beta/datasets
|
||||
paginated: false
|
||||
unregister: delete /v1beta/datasets/{dataset_id}
|
||||
iterrows: get /v1beta/datasetio/iterrows/{dataset_id}
|
||||
appendrows: post /v1beta/datasetio/append-rows/{dataset_id}
|
||||
|
||||
|
||||
settings:
|
||||
license: MIT
|
||||
unwrap_response_fields: [ data ]
|
||||
|
||||
openapi:
|
||||
transformations:
|
||||
- command: renameValue
|
||||
reason: pydantic reserved name
|
||||
args:
|
||||
filter:
|
||||
only:
|
||||
- '$.components.schemas.InferenceStep.properties.model_response'
|
||||
rename:
|
||||
python:
|
||||
property_name: 'inference_model_response'
|
||||
|
||||
# - command: renameValue
|
||||
# reason: pydantic reserved name
|
||||
# args:
|
||||
# filter:
|
||||
# only:
|
||||
# - '$.components.schemas.Model.properties.model_type'
|
||||
# rename:
|
||||
# python:
|
||||
# property_name: 'type'
|
||||
- command: mergeObject
|
||||
reason: Better return_type using enum
|
||||
args:
|
||||
target:
|
||||
- '$.components.schemas'
|
||||
object:
|
||||
ReturnType:
|
||||
additionalProperties: false
|
||||
properties:
|
||||
type:
|
||||
enum:
|
||||
- string
|
||||
- number
|
||||
- boolean
|
||||
- array
|
||||
- object
|
||||
- json
|
||||
- union
|
||||
- chat_completion_input
|
||||
- completion_input
|
||||
- agent_turn_input
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
- command: replaceProperties
|
||||
reason: Replace return type properties with better model (see above)
|
||||
args:
|
||||
filter:
|
||||
only:
|
||||
- '$.components.schemas.ScoringFn.properties.return_type'
|
||||
- '$.components.schemas.RegisterScoringFunctionRequest.properties.return_type'
|
||||
value:
|
||||
$ref: '#/components/schemas/ReturnType'
|
||||
- command: oneOfToAnyOf
|
||||
reason: Prism (mock server) doesn't like one of our requests as it technically matches multiple variants
|
||||
- reason: For better names
|
||||
command: extractToRefs
|
||||
args:
|
||||
ref:
|
||||
target: '$.components.schemas.ToolCallDelta.properties.tool_call'
|
||||
name: '#/components/schemas/ToolCallOrString'
|
||||
|
||||
# `readme` is used to configure the code snippets that will be rendered in the
|
||||
# README.md of various SDKs. In particular, you can change the `headline`
|
||||
# snippet's endpoint and the arguments to call it with.
|
||||
readme:
|
||||
example_requests:
|
||||
default:
|
||||
type: request
|
||||
endpoint: post /v1/chat/completions
|
||||
params: &ref_0 {}
|
||||
headline:
|
||||
type: request
|
||||
endpoint: post /v1/models
|
||||
params: *ref_0
|
||||
pagination:
|
||||
type: request
|
||||
endpoint: post /v1/chat/completions
|
||||
params: {}
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -19,6 +19,8 @@ ARG KEEP_WORKSPACE=""
|
|||
ARG DISTRO_NAME="starter"
|
||||
ARG RUN_CONFIG_PATH=""
|
||||
ARG UV_HTTP_TIMEOUT=500
|
||||
ARG UV_EXTRA_INDEX_URL=""
|
||||
ARG UV_INDEX_STRATEGY=""
|
||||
ENV UV_HTTP_TIMEOUT=${UV_HTTP_TIMEOUT}
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
|
|
@ -62,7 +64,9 @@ COPY . /workspace
|
|||
|
||||
# Install the client package if it is provided
|
||||
# NOTE: this is installed before llama-stack since llama-stack depends on llama-stack-client-python
|
||||
# Unset UV index env vars to ensure we only use PyPI for the client
|
||||
RUN set -eux; \
|
||||
unset UV_EXTRA_INDEX_URL UV_INDEX_STRATEGY; \
|
||||
if [ -n "$LLAMA_STACK_CLIENT_DIR" ]; then \
|
||||
if [ ! -d "$LLAMA_STACK_CLIENT_DIR" ]; then \
|
||||
echo "LLAMA_STACK_CLIENT_DIR is set but $LLAMA_STACK_CLIENT_DIR does not exist" >&2; \
|
||||
|
|
@ -72,13 +76,22 @@ RUN set -eux; \
|
|||
fi;
|
||||
|
||||
# Install llama-stack
|
||||
# Use UV_EXTRA_INDEX_URL inline only for editable install with RC dependencies
|
||||
RUN set -eux; \
|
||||
SAVED_UV_EXTRA_INDEX_URL="${UV_EXTRA_INDEX_URL:-}"; \
|
||||
SAVED_UV_INDEX_STRATEGY="${UV_INDEX_STRATEGY:-}"; \
|
||||
unset UV_EXTRA_INDEX_URL UV_INDEX_STRATEGY; \
|
||||
if [ "$INSTALL_MODE" = "editable" ]; then \
|
||||
if [ ! -d "$LLAMA_STACK_DIR" ]; then \
|
||||
echo "INSTALL_MODE=editable requires LLAMA_STACK_DIR to point to a directory inside the build context" >&2; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
if [ -n "$SAVED_UV_EXTRA_INDEX_URL" ] && [ -n "$SAVED_UV_INDEX_STRATEGY" ]; then \
|
||||
UV_EXTRA_INDEX_URL="$SAVED_UV_EXTRA_INDEX_URL" UV_INDEX_STRATEGY="$SAVED_UV_INDEX_STRATEGY" \
|
||||
uv pip install --no-cache -e "$LLAMA_STACK_DIR"; \
|
||||
else \
|
||||
uv pip install --no-cache -e "$LLAMA_STACK_DIR"; \
|
||||
fi; \
|
||||
elif [ "$INSTALL_MODE" = "test-pypi" ]; then \
|
||||
uv pip install --no-cache fastapi libcst; \
|
||||
if [ -n "$TEST_PYPI_VERSION" ]; then \
|
||||
|
|
@ -95,7 +108,9 @@ RUN set -eux; \
|
|||
fi;
|
||||
|
||||
# Install the dependencies for the distribution
|
||||
# Explicitly unset UV index env vars to ensure we only use PyPI for distribution deps
|
||||
RUN set -eux; \
|
||||
unset UV_EXTRA_INDEX_URL UV_INDEX_STRATEGY; \
|
||||
if [ -z "$DISTRO_NAME" ]; then \
|
||||
echo "DISTRO_NAME must be provided" >&2; \
|
||||
exit 1; \
|
||||
|
|
|
|||
62
docs/docs/api-deprecated/index.mdx
Normal file
62
docs/docs/api-deprecated/index.mdx
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
title: Deprecated APIs
|
||||
description: Legacy APIs that are being phased out
|
||||
sidebar_label: Deprecated
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Deprecated APIs
|
||||
|
||||
This section contains APIs that are being phased out in favor of newer, more standardized implementations. These APIs are maintained for backward compatibility but are not recommended for new projects.
|
||||
|
||||
:::warning Deprecation Notice
|
||||
These APIs are deprecated and will be removed in future versions. Please migrate to the recommended alternatives listed below.
|
||||
:::
|
||||
|
||||
## Migration Guide
|
||||
|
||||
When using deprecated APIs, please refer to the migration guides provided for each API to understand how to transition to the supported alternatives.
|
||||
|
||||
## Deprecated API List
|
||||
|
||||
### Legacy Inference APIs
|
||||
Some older inference endpoints that have been superseded by the standardized Inference API.
|
||||
|
||||
**Migration Path:** Use the [Inference API](../api/) instead.
|
||||
|
||||
### Legacy Vector Operations
|
||||
Older vector database operations that have been replaced by the Vector IO API.
|
||||
|
||||
**Migration Path:** Use the [Vector IO API](../api/) instead.
|
||||
|
||||
### Legacy File Operations
|
||||
Older file management endpoints that have been replaced by the Files API.
|
||||
|
||||
**Migration Path:** Use the [Files API](../api/) instead.
|
||||
|
||||
## Support Timeline
|
||||
|
||||
Deprecated APIs will be supported according to the following timeline:
|
||||
|
||||
- **Current Version**: Full support with deprecation warnings
|
||||
- **Next Major Version**: Limited support with migration notices
|
||||
- **Following Major Version**: Removal of deprecated APIs
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you need assistance migrating from deprecated APIs:
|
||||
|
||||
1. Check the specific migration guides for each API
|
||||
2. Review the [API Reference](../api/) for current alternatives
|
||||
3. Consult the [Community Forums](https://github.com/llamastack/llama-stack/discussions) for migration support
|
||||
4. Open an issue on GitHub for specific migration questions
|
||||
|
||||
## Contributing
|
||||
|
||||
If you find issues with deprecated APIs or have suggestions for improving the migration process, please contribute by:
|
||||
|
||||
1. Opening an issue describing the problem
|
||||
2. Submitting a pull request with improvements
|
||||
3. Updating migration documentation
|
||||
|
||||
For more information on contributing, see our [Contributing Guide](../contributing/).
|
||||
128
docs/docs/api-experimental/index.mdx
Normal file
128
docs/docs/api-experimental/index.mdx
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
---
|
||||
title: Experimental APIs
|
||||
description: APIs in development with limited support
|
||||
sidebar_label: Experimental
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# Experimental APIs
|
||||
|
||||
This section contains APIs that are currently in development and may have limited support or stability. These APIs are available for testing and feedback but should not be used in production environments.
|
||||
|
||||
:::warning Experimental Notice
|
||||
These APIs are experimental and may change without notice. Use with caution and provide feedback to help improve them.
|
||||
:::
|
||||
|
||||
## Current Experimental APIs
|
||||
|
||||
### Batch Inference API
|
||||
Run inference on a dataset of inputs in batch mode for improved efficiency.
|
||||
|
||||
**Status:** In Development
|
||||
**Provider Support:** Limited
|
||||
**Use Case:** Large-scale inference operations
|
||||
|
||||
**Features:**
|
||||
- Batch processing of multiple inputs
|
||||
- Optimized resource utilization
|
||||
- Progress tracking and monitoring
|
||||
|
||||
### Batch Agents API
|
||||
Run agentic workflows on a dataset of inputs in batch mode.
|
||||
|
||||
**Status:** In Development
|
||||
**Provider Support:** Limited
|
||||
**Use Case:** Large-scale agent operations
|
||||
|
||||
**Features:**
|
||||
- Batch agent execution
|
||||
- Parallel processing capabilities
|
||||
- Result aggregation and analysis
|
||||
|
||||
### Synthetic Data Generation API
|
||||
Generate synthetic data for model development and testing.
|
||||
|
||||
**Status:** Early Development
|
||||
**Provider Support:** Very Limited
|
||||
**Use Case:** Training data augmentation
|
||||
|
||||
**Features:**
|
||||
- Automated data generation
|
||||
- Quality control mechanisms
|
||||
- Customizable generation parameters
|
||||
|
||||
### Batches API (OpenAI-compatible)
|
||||
OpenAI-compatible batch management for inference operations.
|
||||
|
||||
**Status:** In Development
|
||||
**Provider Support:** Limited
|
||||
**Use Case:** OpenAI batch processing compatibility
|
||||
|
||||
**Features:**
|
||||
- OpenAI batch API compatibility
|
||||
- Job scheduling and management
|
||||
- Status tracking and monitoring
|
||||
|
||||
## Getting Started with Experimental APIs
|
||||
|
||||
### Prerequisites
|
||||
- Llama Stack server running with experimental features enabled
|
||||
- Appropriate provider configurations
|
||||
- Understanding of API limitations
|
||||
|
||||
### Configuration
|
||||
Experimental APIs may require special configuration flags or provider settings. Check the specific API documentation for setup requirements.
|
||||
|
||||
### Usage Guidelines
|
||||
1. **Testing Only**: Use experimental APIs for testing and development only
|
||||
2. **Monitor Changes**: Watch for updates and breaking changes
|
||||
3. **Provide Feedback**: Report issues and suggest improvements
|
||||
4. **Backup Data**: Always backup important data when using experimental features
|
||||
|
||||
## Feedback and Contribution
|
||||
|
||||
We encourage feedback on experimental APIs to help improve them:
|
||||
|
||||
### Reporting Issues
|
||||
- Use GitHub issues with the "experimental" label
|
||||
- Include detailed error messages and reproduction steps
|
||||
- Specify the API version and provider being used
|
||||
|
||||
### Feature Requests
|
||||
- Submit feature requests through GitHub discussions
|
||||
- Provide use cases and expected behavior
|
||||
- Consider contributing implementations
|
||||
|
||||
### Testing
|
||||
- Test experimental APIs in your environment
|
||||
- Report performance issues and optimization opportunities
|
||||
- Share success stories and use cases
|
||||
|
||||
## Migration to Stable APIs
|
||||
|
||||
As experimental APIs mature, they will be moved to the stable API section. When this happens:
|
||||
|
||||
1. **Announcement**: We'll announce the promotion in release notes
|
||||
2. **Migration Guide**: Detailed migration instructions will be provided
|
||||
3. **Deprecation Timeline**: Experimental versions will be deprecated with notice
|
||||
4. **Support**: Full support will be available for stable versions
|
||||
|
||||
## Provider Support
|
||||
|
||||
Experimental APIs may have limited provider support. Check the specific API documentation for:
|
||||
|
||||
- Supported providers
|
||||
- Configuration requirements
|
||||
- Known limitations
|
||||
- Performance characteristics
|
||||
|
||||
## Roadmap
|
||||
|
||||
Experimental APIs are part of our ongoing development roadmap:
|
||||
|
||||
- **Q1 2024**: Batch Inference API stabilization
|
||||
- **Q2 2024**: Batch Agents API improvements
|
||||
- **Q3 2024**: Synthetic Data Generation API expansion
|
||||
- **Q4 2024**: Batches API full OpenAI compatibility
|
||||
|
||||
For the latest updates, follow our [GitHub releases](https://github.com/llamastack/llama-stack/releases) and [roadmap discussions](https://github.com/llamastack/llama-stack/discussions).
|
||||
287
docs/docs/api-openai/index.mdx
Normal file
287
docs/docs/api-openai/index.mdx
Normal file
|
|
@ -0,0 +1,287 @@
|
|||
---
|
||||
title: OpenAI API Compatibility
|
||||
description: OpenAI-compatible APIs and features in Llama Stack
|
||||
sidebar_label: OpenAI Compatibility
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# OpenAI API Compatibility
|
||||
|
||||
Llama Stack provides comprehensive OpenAI API compatibility, allowing you to use existing OpenAI API clients and tools with Llama Stack providers. This compatibility layer ensures seamless migration and interoperability.
|
||||
|
||||
## Overview
|
||||
|
||||
OpenAI API compatibility in Llama Stack includes:
|
||||
|
||||
- **OpenAI-compatible endpoints** for all major APIs
|
||||
- **Request/response format compatibility** with OpenAI standards
|
||||
- **Authentication and authorization** using OpenAI-style API keys
|
||||
- **Error handling** with OpenAI-compatible error codes and messages
|
||||
- **Rate limiting** and usage tracking compatible with OpenAI patterns
|
||||
|
||||
## Supported OpenAI APIs
|
||||
|
||||
### Chat Completions API
|
||||
OpenAI-compatible chat completions for conversational AI applications.
|
||||
|
||||
**Endpoint:** `/v1/chat/completions`
|
||||
**Compatibility:** Full OpenAI API compatibility
|
||||
**Providers:** All inference providers
|
||||
|
||||
**Features:**
|
||||
- Message-based conversations
|
||||
- System prompts and user messages
|
||||
- Function calling support
|
||||
- Streaming responses
|
||||
- Temperature and other parameter controls
|
||||
|
||||
### Completions API
|
||||
OpenAI-compatible text completions for general text generation.
|
||||
|
||||
**Endpoint:** `/v1/completions`
|
||||
**Compatibility:** Full OpenAI API compatibility
|
||||
**Providers:** All inference providers
|
||||
|
||||
**Features:**
|
||||
- Text completion generation
|
||||
- Prompt engineering support
|
||||
- Customizable parameters
|
||||
- Batch processing capabilities
|
||||
|
||||
### Embeddings API
|
||||
OpenAI-compatible embeddings for vector operations.
|
||||
|
||||
**Endpoint:** `/v1/embeddings`
|
||||
**Compatibility:** Full OpenAI API compatibility
|
||||
**Providers:** All embedding providers
|
||||
|
||||
**Features:**
|
||||
- Text embedding generation
|
||||
- Multiple embedding models
|
||||
- Batch embedding processing
|
||||
- Vector similarity operations
|
||||
|
||||
### Files API
|
||||
OpenAI-compatible file management for document processing.
|
||||
|
||||
**Endpoint:** `/v1/files`
|
||||
**Compatibility:** Full OpenAI API compatibility
|
||||
**Providers:** Local Filesystem, S3
|
||||
|
||||
**Features:**
|
||||
- File upload and management
|
||||
- Document processing
|
||||
- File metadata tracking
|
||||
- Secure file access
|
||||
|
||||
### Vector Store Files API
|
||||
OpenAI-compatible vector store file operations for RAG applications.
|
||||
|
||||
**Endpoint:** `/v1/vector_stores/{vector_store_id}/files`
|
||||
**Compatibility:** Full OpenAI API compatibility
|
||||
**Providers:** FAISS, SQLite-vec, Milvus, ChromaDB, Qdrant, Weaviate, Postgres (PGVector)
|
||||
|
||||
**Features:**
|
||||
- Automatic document processing
|
||||
- Vector store integration
|
||||
- File chunking and indexing
|
||||
- Search and retrieval operations
|
||||
|
||||
### Batches API
|
||||
OpenAI-compatible batch processing for large-scale operations.
|
||||
|
||||
**Endpoint:** `/v1/batches`
|
||||
**Compatibility:** OpenAI API compatibility (experimental)
|
||||
**Providers:** Limited support
|
||||
|
||||
**Features:**
|
||||
- Batch job creation and management
|
||||
- Progress tracking
|
||||
- Result retrieval
|
||||
- Error handling
|
||||
|
||||
## Migration from OpenAI
|
||||
|
||||
### Step 1: Update API Endpoint
|
||||
Change your API endpoint from OpenAI to your Llama Stack server:
|
||||
|
||||
```python
|
||||
# Before (OpenAI)
|
||||
import openai
|
||||
client = openai.OpenAI(api_key="your-openai-key")
|
||||
|
||||
# After (Llama Stack)
|
||||
import openai
|
||||
client = openai.OpenAI(
|
||||
api_key="your-llama-stack-key",
|
||||
base_url="http://localhost:8000/v1" # Your Llama Stack server
|
||||
)
|
||||
```
|
||||
|
||||
### Step 2: Configure Providers
|
||||
Set up your preferred providers in the Llama Stack configuration:
|
||||
|
||||
```yaml
|
||||
# stack-config.yaml
|
||||
inference:
|
||||
providers:
|
||||
- name: "meta-reference"
|
||||
type: "inline"
|
||||
model: "llama-3.1-8b"
|
||||
```
|
||||
|
||||
### Step 3: Test Compatibility
|
||||
Verify that your existing code works with Llama Stack:
|
||||
|
||||
```python
|
||||
# Test chat completions
|
||||
response = client.chat.completions.create(
|
||||
model="llama-3.1-8b",
|
||||
messages=[
|
||||
{"role": "user", "content": "Hello, world!"}
|
||||
]
|
||||
)
|
||||
print(response.choices[0].message.content)
|
||||
```
|
||||
|
||||
## Provider-Specific Features
|
||||
|
||||
### Meta Reference Provider
|
||||
- Full OpenAI API compatibility
|
||||
- Local model execution
|
||||
- Custom model support
|
||||
|
||||
### Remote Providers
|
||||
- OpenAI API compatibility
|
||||
- Cloud-based execution
|
||||
- Scalable infrastructure
|
||||
|
||||
### Vector Store Providers
|
||||
- OpenAI vector store API compatibility
|
||||
- Automatic document processing
|
||||
- Advanced search capabilities
|
||||
|
||||
## Authentication
|
||||
|
||||
Llama Stack supports OpenAI-style authentication:
|
||||
|
||||
### API Key Authentication
|
||||
```python
|
||||
client = openai.OpenAI(
|
||||
api_key="your-api-key",
|
||||
base_url="http://localhost:8000/v1"
|
||||
)
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
```bash
|
||||
export OPENAI_API_KEY="your-api-key"
|
||||
export OPENAI_BASE_URL="http://localhost:8000/v1"
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Llama Stack provides OpenAI-compatible error responses:
|
||||
|
||||
```python
|
||||
try:
|
||||
response = client.chat.completions.create(...)
|
||||
except openai.APIError as e:
|
||||
print(f"API Error: {e}")
|
||||
except openai.RateLimitError as e:
|
||||
print(f"Rate Limit Error: {e}")
|
||||
except openai.APIConnectionError as e:
|
||||
print(f"Connection Error: {e}")
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
OpenAI-compatible rate limiting is supported:
|
||||
|
||||
- **Requests per minute** limits
|
||||
- **Tokens per minute** limits
|
||||
- **Concurrent request** limits
|
||||
- **Usage tracking** and monitoring
|
||||
|
||||
## Monitoring and Observability
|
||||
|
||||
Track your API usage with OpenAI-compatible monitoring:
|
||||
|
||||
- **Request/response logging**
|
||||
- **Usage metrics** and analytics
|
||||
- **Performance monitoring**
|
||||
- **Error tracking** and alerting
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Provider Selection
|
||||
Choose providers based on your requirements:
|
||||
- **Local development**: Meta Reference, Ollama
|
||||
- **Production**: Cloud providers (Fireworks, Together, NVIDIA)
|
||||
- **Specialized use cases**: Custom providers
|
||||
|
||||
### 2. Model Configuration
|
||||
Configure models for optimal performance:
|
||||
- **Model selection** based on task requirements
|
||||
- **Parameter tuning** for specific use cases
|
||||
- **Resource allocation** for performance
|
||||
|
||||
### 3. Error Handling
|
||||
Implement robust error handling:
|
||||
- **Retry logic** for transient failures
|
||||
- **Fallback providers** for high availability
|
||||
- **Monitoring** and alerting for issues
|
||||
|
||||
### 4. Security
|
||||
Follow security best practices:
|
||||
- **API key management** and rotation
|
||||
- **Access control** and authorization
|
||||
- **Data privacy** and compliance
|
||||
|
||||
## Implementation Examples
|
||||
|
||||
For detailed code examples and implementation guides, see our [OpenAI Implementation Guide](../providers/openai.mdx).
|
||||
|
||||
## Known Limitations
|
||||
|
||||
### Responses API Limitations
|
||||
The Responses API is still in active development. For detailed information about current limitations and implementation status, see our [OpenAI Responses API Limitations](../providers/openai_responses_limitations.mdx).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Connection Errors**
|
||||
- Verify server is running
|
||||
- Check network connectivity
|
||||
- Validate API endpoint URL
|
||||
|
||||
**Authentication Errors**
|
||||
- Verify API key is correct
|
||||
- Check key permissions
|
||||
- Ensure proper authentication headers
|
||||
|
||||
**Model Errors**
|
||||
- Verify model is available
|
||||
- Check provider configuration
|
||||
- Validate model parameters
|
||||
|
||||
### Getting Help
|
||||
|
||||
For OpenAI compatibility issues:
|
||||
|
||||
1. **Check Documentation**: Review provider-specific documentation
|
||||
2. **Community Support**: Ask questions in GitHub discussions
|
||||
3. **Issue Reporting**: Open GitHub issues for bugs
|
||||
4. **Professional Support**: Contact support for enterprise issues
|
||||
|
||||
## Roadmap
|
||||
|
||||
Upcoming OpenAI compatibility features:
|
||||
|
||||
- **Enhanced batch processing** support
|
||||
- **Advanced function calling** capabilities
|
||||
- **Improved error handling** and diagnostics
|
||||
- **Performance optimizations** for large-scale deployments
|
||||
|
||||
For the latest updates, follow our [GitHub releases](https://github.com/llamastack/llama-stack/releases) and [roadmap discussions](https://github.com/llamastack/llama-stack/discussions).
|
||||
144
docs/docs/api/index.mdx
Normal file
144
docs/docs/api/index.mdx
Normal file
|
|
@ -0,0 +1,144 @@
|
|||
---
|
||||
title: API Reference
|
||||
description: Complete reference for Llama Stack APIs
|
||||
sidebar_label: Overview
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
# API Reference
|
||||
|
||||
Llama Stack provides a comprehensive set of APIs for building generative AI applications. All APIs follow OpenAI-compatible standards and can be used interchangeably across different providers.
|
||||
|
||||
## Core APIs
|
||||
|
||||
### Inference API
|
||||
Run inference with Large Language Models (LLMs) and embedding models.
|
||||
|
||||
**Supported Providers:**
|
||||
- Meta Reference (Single Node)
|
||||
- Ollama (Single Node)
|
||||
- Fireworks (Hosted)
|
||||
- Together (Hosted)
|
||||
- NVIDIA NIM (Hosted and Single Node)
|
||||
- vLLM (Hosted and Single Node)
|
||||
- TGI (Hosted and Single Node)
|
||||
- AWS Bedrock (Hosted)
|
||||
- Cerebras (Hosted)
|
||||
- Groq (Hosted)
|
||||
- SambaNova (Hosted)
|
||||
- PyTorch ExecuTorch (On-device iOS, Android)
|
||||
- OpenAI (Hosted)
|
||||
- Anthropic (Hosted)
|
||||
- Gemini (Hosted)
|
||||
- WatsonX (Hosted)
|
||||
|
||||
### Agents API
|
||||
Run multi-step agentic workflows with LLMs, including tool usage, memory (RAG), and complex reasoning.
|
||||
|
||||
**Supported Providers:**
|
||||
- Meta Reference (Single Node)
|
||||
- Fireworks (Hosted)
|
||||
- Together (Hosted)
|
||||
- PyTorch ExecuTorch (On-device iOS)
|
||||
|
||||
### Vector IO API
|
||||
Perform operations on vector stores, including adding documents, searching, and deleting documents.
|
||||
|
||||
**Supported Providers:**
|
||||
- FAISS (Single Node)
|
||||
- SQLite-Vec (Single Node)
|
||||
- Chroma (Hosted and Single Node)
|
||||
- Milvus (Hosted and Single Node)
|
||||
- Postgres (PGVector) (Hosted and Single Node)
|
||||
- Weaviate (Hosted)
|
||||
- Qdrant (Hosted and Single Node)
|
||||
|
||||
### Files API (OpenAI-compatible)
|
||||
Manage file uploads, storage, and retrieval with OpenAI-compatible endpoints.
|
||||
|
||||
**Supported Providers:**
|
||||
- Local Filesystem (Single Node)
|
||||
- S3 (Hosted)
|
||||
|
||||
### Vector Store Files API (OpenAI-compatible)
|
||||
Integrate file operations with vector stores for automatic document processing and search.
|
||||
|
||||
**Supported Providers:**
|
||||
- FAISS (Single Node)
|
||||
- SQLite-vec (Single Node)
|
||||
- Milvus (Single Node)
|
||||
- ChromaDB (Hosted and Single Node)
|
||||
- Qdrant (Hosted and Single Node)
|
||||
- Weaviate (Hosted)
|
||||
- Postgres (PGVector) (Hosted and Single Node)
|
||||
|
||||
### Safety API
|
||||
Apply safety policies to outputs at a systems level, not just model level.
|
||||
|
||||
**Supported Providers:**
|
||||
- Llama Guard (Depends on Inference Provider)
|
||||
- Prompt Guard (Single Node)
|
||||
- Code Scanner (Single Node)
|
||||
- AWS Bedrock (Hosted)
|
||||
|
||||
### Post Training API
|
||||
Fine-tune models for specific use cases and domains.
|
||||
|
||||
**Supported Providers:**
|
||||
- Meta Reference (Single Node)
|
||||
- HuggingFace (Single Node)
|
||||
- TorchTune (Single Node)
|
||||
- NVIDIA NEMO (Hosted)
|
||||
|
||||
### Eval API
|
||||
Generate outputs and perform scoring to evaluate system performance.
|
||||
|
||||
**Supported Providers:**
|
||||
- Meta Reference (Single Node)
|
||||
- NVIDIA NEMO (Hosted)
|
||||
|
||||
### Telemetry API
|
||||
Collect telemetry data from the system for monitoring and observability.
|
||||
|
||||
**Supported Providers:**
|
||||
- Meta Reference (Single Node)
|
||||
|
||||
### Tool Runtime API
|
||||
Interact with various tools and protocols to extend LLM capabilities.
|
||||
|
||||
**Supported Providers:**
|
||||
- Brave Search (Hosted)
|
||||
- RAG Runtime (Single Node)
|
||||
|
||||
## API Compatibility
|
||||
|
||||
All Llama Stack APIs are designed to be OpenAI-compatible, allowing you to:
|
||||
- Use existing OpenAI API clients and tools
|
||||
- Migrate from OpenAI to other providers seamlessly
|
||||
- Maintain consistent API contracts across different environments
|
||||
|
||||
## Getting Started
|
||||
|
||||
To get started with Llama Stack APIs:
|
||||
|
||||
1. **Choose a Distribution**: Select a pre-configured distribution that matches your environment
|
||||
2. **Configure Providers**: Set up the providers you want to use for each API
|
||||
3. **Start the Server**: Launch the Llama Stack server with your configuration
|
||||
4. **Use the APIs**: Make requests to the API endpoints using your preferred client
|
||||
|
||||
For detailed setup instructions, see our [Getting Started Guide](../getting_started/quickstart).
|
||||
|
||||
## Provider Details
|
||||
|
||||
For complete provider compatibility and setup instructions, see our [Providers Documentation](../providers/).
|
||||
|
||||
## API Stability
|
||||
|
||||
Llama Stack APIs are organized by stability level:
|
||||
- **[Stable APIs](./index.mdx)** - Production-ready APIs with full support
|
||||
- **[Experimental APIs](../api-experimental/)** - APIs in development with limited support
|
||||
- **[Deprecated APIs](../api-deprecated/)** - Legacy APIs being phased out
|
||||
|
||||
## OpenAI Integration
|
||||
|
||||
For specific OpenAI API compatibility features, see our [OpenAI Compatibility Guide](../api-openai/).
|
||||
|
|
@ -35,9 +35,6 @@ Here are the key topics that will help you build effective AI applications:
|
|||
- **[Telemetry](./telemetry.mdx)** - Monitor and analyze your agents' performance and behavior
|
||||
- **[Safety](./safety.mdx)** - Implement guardrails and safety measures to ensure responsible AI behavior
|
||||
|
||||
### 🎮 **Interactive Development**
|
||||
- **[Playground](./playground.mdx)** - Interactive environment for testing and developing applications
|
||||
|
||||
## Application Patterns
|
||||
|
||||
### 🤖 **Conversational Agents**
|
||||
|
|
|
|||
|
|
@ -1,298 +1,87 @@
|
|||
---
|
||||
title: Llama Stack Playground
|
||||
description: Interactive interface to explore and experiment with Llama Stack capabilities
|
||||
title: Admin UI & Chat Playground
|
||||
description: Web-based admin interface and chat playground for Llama Stack
|
||||
sidebar_label: Playground
|
||||
sidebar_position: 10
|
||||
---
|
||||
|
||||
import Tabs from '@theme/Tabs';
|
||||
import TabItem from '@theme/TabItem';
|
||||
# Admin UI & Chat Playground
|
||||
|
||||
# Llama Stack Playground
|
||||
The Llama Stack UI provides a comprehensive web-based admin interface for managing your Llama Stack server, with an integrated chat playground for interactive testing. This admin interface is the primary way to monitor, manage, and debug your Llama Stack applications.
|
||||
|
||||
:::note[Experimental Feature]
|
||||
The Llama Stack Playground is currently experimental and subject to change. We welcome feedback and contributions to help improve it.
|
||||
:::
|
||||
## Quick Start
|
||||
|
||||
The Llama Stack Playground is a simple interface that aims to:
|
||||
- **Showcase capabilities and concepts** of Llama Stack in an interactive environment
|
||||
- **Demo end-to-end application code** to help users get started building their own applications
|
||||
- **Provide a UI** to help users inspect and understand Llama Stack API providers and resources
|
||||
|
||||
## Key Features
|
||||
|
||||
### Interactive Playground Pages
|
||||
|
||||
The playground provides interactive pages for users to explore Llama Stack API capabilities:
|
||||
|
||||
#### Chatbot Interface
|
||||
|
||||
<video
|
||||
controls
|
||||
autoPlay
|
||||
playsInline
|
||||
muted
|
||||
loop
|
||||
style={{width: '100%'}}
|
||||
>
|
||||
<source src="https://github.com/user-attachments/assets/8d2ef802-5812-4a28-96e1-316038c84cbf" type="video/mp4" />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="chat" label="Chat">
|
||||
|
||||
**Simple Chat Interface**
|
||||
- Chat directly with Llama models through an intuitive interface
|
||||
- Uses the `/chat/completions` streaming API under the hood
|
||||
- Real-time message streaming for responsive interactions
|
||||
- Perfect for testing model capabilities and prompt engineering
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="rag" label="RAG Chat">
|
||||
|
||||
**Document-Aware Conversations**
|
||||
- Upload documents to create memory banks
|
||||
- Chat with a RAG-enabled agent that can query your documents
|
||||
- Uses Llama Stack's `/agents` API to create and manage RAG sessions
|
||||
- Ideal for exploring knowledge-enhanced AI applications
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
#### Evaluation Interface
|
||||
|
||||
<video
|
||||
controls
|
||||
autoPlay
|
||||
playsInline
|
||||
muted
|
||||
loop
|
||||
style={{width: '100%'}}
|
||||
>
|
||||
<source src="https://github.com/user-attachments/assets/6cc1659f-eba4-49ca-a0a5-7c243557b4f5" type="video/mp4" />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="scoring" label="Scoring Evaluations">
|
||||
|
||||
**Custom Dataset Evaluation**
|
||||
- Upload your own evaluation datasets
|
||||
- Run evaluations using available scoring functions
|
||||
- Uses Llama Stack's `/scoring` API for flexible evaluation workflows
|
||||
- Great for testing application performance on custom metrics
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="benchmarks" label="Benchmark Evaluations">
|
||||
|
||||
<video
|
||||
controls
|
||||
autoPlay
|
||||
playsInline
|
||||
muted
|
||||
loop
|
||||
style={{width: '100%', marginBottom: '1rem'}}
|
||||
>
|
||||
<source src="https://github.com/user-attachments/assets/345845c7-2a2b-4095-960a-9ae40f6a93cf" type="video/mp4" />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
**Pre-registered Evaluation Tasks**
|
||||
- Evaluate models or agents on pre-defined tasks
|
||||
- Uses Llama Stack's `/eval` API for comprehensive evaluation
|
||||
- Combines datasets and scoring functions for standardized testing
|
||||
|
||||
**Setup Requirements:**
|
||||
Register evaluation datasets and benchmarks first:
|
||||
Launch the admin UI with:
|
||||
|
||||
```bash
|
||||
# Register evaluation dataset
|
||||
llama-stack-client datasets register \
|
||||
--dataset-id "mmlu" \
|
||||
--provider-id "huggingface" \
|
||||
--url "https://huggingface.co/datasets/llamastack/evals" \
|
||||
--metadata '{"path": "llamastack/evals", "name": "evals__mmlu__details", "split": "train"}' \
|
||||
--schema '{"input_query": {"type": "string"}, "expected_answer": {"type": "string"}, "chat_completion_input": {"type": "string"}}'
|
||||
|
||||
# Register benchmark task
|
||||
llama-stack-client benchmarks register \
|
||||
--eval-task-id meta-reference-mmlu \
|
||||
--provider-id meta-reference \
|
||||
--dataset-id mmlu \
|
||||
--scoring-functions basic::regex_parser_multiple_choice_answer
|
||||
npx llama-stack-ui
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
Then visit `http://localhost:8322` to access the interface.
|
||||
|
||||
#### Inspection Interface
|
||||
## Admin Interface Features
|
||||
|
||||
<video
|
||||
controls
|
||||
autoPlay
|
||||
playsInline
|
||||
muted
|
||||
loop
|
||||
style={{width: '100%'}}
|
||||
>
|
||||
<source src="https://github.com/user-attachments/assets/01d52b2d-92af-4e3a-b623-a9b8ba22ba99" type="video/mp4" />
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
The Llama Stack UI is organized into three main sections:
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="providers" label="API Providers">
|
||||
### 🎯 Create
|
||||
**Chat Playground** - Interactive testing environment
|
||||
- Real-time chat interface for testing agents and models
|
||||
- Multi-turn conversations with tool calling support
|
||||
- Agent SDK integration (will be migrated to Responses API)
|
||||
- Custom system prompts and model parameter adjustment
|
||||
|
||||
**Provider Management**
|
||||
- Inspect available Llama Stack API providers
|
||||
- View provider configurations and capabilities
|
||||
- Uses the `/providers` API for real-time provider information
|
||||
- Essential for understanding your deployment's capabilities
|
||||
### 📊 Manage
|
||||
**Logs & Resource Management** - Monitor and manage your stack
|
||||
- **Responses Logs**: View and analyze agent responses and interactions
|
||||
- **Chat Completions Logs**: Monitor chat completion requests and responses
|
||||
- **Vector Stores**: Create, manage, and monitor vector databases for RAG workflows
|
||||
- **Prompts**: Full CRUD operations for prompt templates and management
|
||||
- **Files**: Forthcoming file management capabilities
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="resources" label="API Resources">
|
||||
## Key Capabilities for Application Development
|
||||
|
||||
**Resource Exploration**
|
||||
- Inspect Llama Stack API resources including:
|
||||
- **Models**: Available language models
|
||||
- **Datasets**: Registered evaluation datasets
|
||||
- **Memory Banks**: Vector databases and knowledge stores
|
||||
- **Benchmarks**: Evaluation tasks and scoring functions
|
||||
- **Shields**: Safety and content moderation tools
|
||||
- Uses `/<resources>/list` APIs for comprehensive resource visibility
|
||||
- For detailed information about resources, see [Core Concepts](/docs/concepts)
|
||||
### Real-time Monitoring
|
||||
- **Response Tracking**: Monitor all agent responses and tool calls
|
||||
- **Completion Analysis**: View chat completion performance and patterns
|
||||
- **Vector Store Activity**: Track RAG operations and document processing
|
||||
- **Prompt Usage**: Analyze prompt template performance
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
### Resource Management
|
||||
- **Vector Store CRUD**: Create, update, and delete vector databases
|
||||
- **Prompt Library**: Organize and version control your prompts
|
||||
- **File Operations**: Manage documents and assets (forthcoming)
|
||||
|
||||
### Interactive Testing
|
||||
- **Chat Playground**: Test conversational flows before production deployment
|
||||
- **Agent Prototyping**: Validate agent behaviors and tool integrations
|
||||
|
||||
## Development Workflow Integration
|
||||
|
||||
The admin UI supports your development lifecycle:
|
||||
|
||||
1. **Development**: Use chat playground to prototype and test features
|
||||
2. **Monitoring**: Track system performance through logs and metrics
|
||||
3. **Management**: Organize prompts, vector stores, and other resources
|
||||
4. **Debugging**: Analyze logs to identify and resolve issues
|
||||
|
||||
## Architecture Notes
|
||||
|
||||
- **Current**: Chat playground uses Agents SDK
|
||||
- **Future**: Migration to Responses API for improved performance and consistency
|
||||
- **Admin Focus**: Primary emphasis on monitoring, logging, and resource management
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Quick Start Guide
|
||||
1. **Launch the UI**: Run `npx llama-stack-ui`
|
||||
2. **Explore Logs**: Start with Responses and Chat Completions logs to understand your system activity
|
||||
3. **Test in Playground**: Use the chat interface to validate your agent configurations
|
||||
4. **Manage Resources**: Create vector stores and organize prompts through the UI
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="setup" label="Setup">
|
||||
For detailed setup and configuration, see the [Llama Stack UI documentation](/docs/distributions/llama_stack_ui).
|
||||
|
||||
**1. Start the Llama Stack API Server**
|
||||
## Next Steps
|
||||
|
||||
```bash
|
||||
llama stack list-deps together | xargs -L1 uv pip install
|
||||
llama stack run together
|
||||
```
|
||||
|
||||
**2. Start the Streamlit UI**
|
||||
|
||||
```bash
|
||||
# Launch the playground interface
|
||||
uv run --with ".[ui]" streamlit run llama_stack.core/ui/app.py
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="usage" label="Usage Tips">
|
||||
|
||||
**Making the Most of the Playground:**
|
||||
|
||||
- **Start with Chat**: Test basic model interactions and prompt engineering
|
||||
- **Explore RAG**: Upload sample documents to see knowledge-enhanced responses
|
||||
- **Try Evaluations**: Use the scoring interface to understand evaluation metrics
|
||||
- **Inspect Resources**: Check what providers and resources are available
|
||||
- **Experiment with Settings**: Adjust parameters to see how they affect results
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Available Distributions
|
||||
|
||||
The playground works with any Llama Stack distribution. Popular options include:
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="together" label="Together AI">
|
||||
|
||||
```bash
|
||||
llama stack list-deps together | xargs -L1 uv pip install
|
||||
llama stack run together
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Cloud-hosted models
|
||||
- Fast inference
|
||||
- Multiple model options
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="ollama" label="Ollama (Local)">
|
||||
|
||||
```bash
|
||||
llama stack list-deps ollama | xargs -L1 uv pip install
|
||||
llama stack run ollama
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Local model execution
|
||||
- Privacy-focused
|
||||
- No internet required
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="meta-reference" label="Meta Reference">
|
||||
|
||||
```bash
|
||||
llama stack list-deps meta-reference | xargs -L1 uv pip install
|
||||
llama stack run meta-reference
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Reference implementation
|
||||
- All API features available
|
||||
- Best for development
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
## Use Cases & Examples
|
||||
|
||||
### Educational Use Cases
|
||||
- **Learning Llama Stack**: Hands-on exploration of API capabilities
|
||||
- **Prompt Engineering**: Interactive testing of different prompting strategies
|
||||
- **RAG Experimentation**: Understanding how document retrieval affects responses
|
||||
- **Evaluation Understanding**: See how different metrics evaluate model performance
|
||||
|
||||
### Development Use Cases
|
||||
- **Prototype Testing**: Quick validation of application concepts
|
||||
- **API Exploration**: Understanding available endpoints and parameters
|
||||
- **Integration Planning**: Seeing how different components work together
|
||||
- **Demo Creation**: Showcasing Llama Stack capabilities to stakeholders
|
||||
|
||||
### Research Use Cases
|
||||
- **Model Comparison**: Side-by-side testing of different models
|
||||
- **Evaluation Design**: Understanding how scoring functions work
|
||||
- **Safety Testing**: Exploring shield effectiveness with different inputs
|
||||
- **Performance Analysis**: Measuring model behavior across different scenarios
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 🚀 **Getting Started**
|
||||
- Begin with simple chat interactions to understand basic functionality
|
||||
- Gradually explore more advanced features like RAG and evaluations
|
||||
- Use the inspection tools to understand your deployment's capabilities
|
||||
|
||||
### 🔧 **Development Workflow**
|
||||
- Use the playground to prototype before writing application code
|
||||
- Test different parameter settings interactively
|
||||
- Validate evaluation approaches before implementing them programmatically
|
||||
|
||||
### 📊 **Evaluation & Testing**
|
||||
- Start with simple scoring functions before trying complex evaluations
|
||||
- Use the playground to understand evaluation results before automation
|
||||
- Test safety features with various input types
|
||||
|
||||
### 🎯 **Production Preparation**
|
||||
- Use playground insights to inform your production API usage
|
||||
- Test edge cases and error conditions interactively
|
||||
- Validate resource configurations before deployment
|
||||
|
||||
## Related Resources
|
||||
|
||||
- **[Getting Started Guide](../getting_started/quickstart)** - Complete setup and introduction
|
||||
- **[Core Concepts](/docs/concepts)** - Understanding Llama Stack fundamentals
|
||||
- **[Agents](./agent)** - Building intelligent agents
|
||||
- **[RAG (Retrieval Augmented Generation)](./rag)** - Knowledge-enhanced applications
|
||||
- **[Evaluations](./evals)** - Comprehensive evaluation framework
|
||||
- **[API Reference](/docs/api/llama-stack-specification)** - Complete API documentation
|
||||
- Set up your [first agent](/docs/building_applications/agent)
|
||||
- Implement [RAG functionality](/docs/building_applications/rag)
|
||||
- Add [evaluation metrics](/docs/building_applications/evals)
|
||||
- Configure [safety measures](/docs/building_applications/safety)
|
||||
|
|
|
|||
|
|
@ -104,23 +104,19 @@ client.toolgroups.register(
|
|||
)
|
||||
```
|
||||
|
||||
Note that most of the more useful MCP servers need you to authenticate with them. Many of them use OAuth2.0 for authentication. You can provide authorization headers to send to the MCP server using the "Provider Data" abstraction provided by Llama Stack. When making an agent call,
|
||||
Note that most of the more useful MCP servers need you to authenticate with them. Many of them use OAuth2.0 for authentication. You can provide the authorization token when creating the Agent:
|
||||
|
||||
```python
|
||||
agent = Agent(
|
||||
...,
|
||||
tools=["mcp::deepwiki"],
|
||||
extra_headers={
|
||||
"X-LlamaStack-Provider-Data": json.dumps(
|
||||
tools=[
|
||||
{
|
||||
"mcp_headers": {
|
||||
"http://mcp.deepwiki.com/sse": {
|
||||
"Authorization": "Bearer <your_access_token>",
|
||||
},
|
||||
},
|
||||
"type": "mcp",
|
||||
"server_url": "https://mcp.deepwiki.com/sse",
|
||||
"server_label": "mcp::deepwiki",
|
||||
"authorization": "<your_access_token>", # OAuth token (without "Bearer " prefix)
|
||||
}
|
||||
),
|
||||
},
|
||||
],
|
||||
)
|
||||
agent.create_turn(...)
|
||||
```
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ External APIs must expose a `available_providers()` function in their module tha
|
|||
|
||||
```python
|
||||
# llama_stack_api_weather/api.py
|
||||
from llama_stack.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||
from llama_stack_api import Api, InlineProviderSpec, ProviderSpec
|
||||
|
||||
|
||||
def available_providers() -> list[ProviderSpec]:
|
||||
|
|
@ -79,7 +79,7 @@ A Protocol class like so:
|
|||
# llama_stack_api_weather/api.py
|
||||
from typing import Protocol
|
||||
|
||||
from llama_stack.schema_utils import webmethod
|
||||
from llama_stack_api import webmethod
|
||||
|
||||
|
||||
class WeatherAPI(Protocol):
|
||||
|
|
@ -151,13 +151,12 @@ __all__ = ["WeatherAPI", "available_providers"]
|
|||
# llama-stack-api-weather/src/llama_stack_api_weather/weather.py
|
||||
from typing import Protocol
|
||||
|
||||
from llama_stack.providers.datatypes import (
|
||||
from llama_stack_api import (
|
||||
Api,
|
||||
ProviderSpec,
|
||||
RemoteProviderSpec,
|
||||
webmethod,
|
||||
)
|
||||
from llama_stack.schema_utils import webmethod
|
||||
|
||||
|
||||
def available_providers() -> list[ProviderSpec]:
|
||||
return [
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ sidebar_position: 1
|
|||
|
||||
# APIs
|
||||
|
||||
A Llama Stack API is described as a collection of REST endpoints. We currently support the following APIs:
|
||||
A Llama Stack API is described as a collection of REST endpoints following OpenAI API standards. We currently support the following APIs:
|
||||
|
||||
- **Inference**: run inference with a LLM
|
||||
- **Safety**: apply safety policies to the output at a Systems (not only model) level
|
||||
|
|
@ -16,12 +16,26 @@ A Llama Stack API is described as a collection of REST endpoints. We currently s
|
|||
- **Scoring**: evaluate outputs of the system
|
||||
- **Eval**: generate outputs (via Inference or Agents) and perform scoring
|
||||
- **VectorIO**: perform operations on vector stores, such as adding documents, searching, and deleting documents
|
||||
- **Files**: manage file uploads, storage, and retrieval
|
||||
- **Telemetry**: collect telemetry data from the system
|
||||
- **Post Training**: fine-tune a model
|
||||
- **Tool Runtime**: interact with various tools and protocols
|
||||
- **Responses**: generate responses from an LLM using this OpenAI compatible API.
|
||||
- **Responses**: generate responses from an LLM
|
||||
|
||||
We are working on adding a few more APIs to complete the application lifecycle. These will include:
|
||||
- **Batch Inference**: run inference on a dataset of inputs
|
||||
- **Batch Agents**: run agents on a dataset of inputs
|
||||
- **Synthetic Data Generation**: generate synthetic data for model development
|
||||
- **Batches**: OpenAI-compatible batch management for inference
|
||||
|
||||
|
||||
## OpenAI API Compatibility
|
||||
We are working on adding OpenAI API compatibility to Llama Stack. This will allow you to use Llama Stack with OpenAI API clients and tools.
|
||||
|
||||
### File Operations and Vector Store Integration
|
||||
|
||||
The Files API and Vector Store APIs work together through file operations, enabling automatic document processing and search. This integration implements the [OpenAI Vector Store Files API specification](https://platform.openai.com/docs/api-reference/vector-stores-files) and allows you to:
|
||||
- Upload documents through the Files API
|
||||
- Automatically process and chunk documents into searchable vectors
|
||||
- Store processed content in vector databases based on the availability of [our providers](../../providers/index.mdx)
|
||||
- Search through documents using natural language queries
|
||||
For detailed information about this integration, see [File Operations and Vector Store Integration](../file_operations_vector_stores.md).
|
||||
|
|
|
|||
420
docs/docs/concepts/file_operations_vector_stores.mdx
Normal file
420
docs/docs/concepts/file_operations_vector_stores.mdx
Normal file
|
|
@ -0,0 +1,420 @@
|
|||
# File Operations and Vector Store Integration
|
||||
|
||||
## Overview
|
||||
|
||||
Llama Stack provides seamless integration between the Files API and Vector Store APIs, enabling you to upload documents and automatically process them into searchable vector embeddings. This integration implements file operations following the [OpenAI Vector Store Files API specification](https://platform.openai.com/docs/api-reference/vector-stores-files).
|
||||
|
||||
## Enhanced Capabilities Beyond OpenAI
|
||||
|
||||
While Llama Stack maintains full compatibility with OpenAI's Vector Store API, it provides several additional capabilities that enhance functionality and flexibility:
|
||||
|
||||
### **Embedding Model Specification**
|
||||
Unlike OpenAI's vector stores which use a fixed embedding model, Llama Stack allows you to specify which embedding model to use when creating a vector store:
|
||||
|
||||
```python
|
||||
# Create vector store with specific embedding model
|
||||
vector_store = client.vector_stores.create(
|
||||
name="my_documents",
|
||||
embedding_model="all-MiniLM-L6-v2", # Specify your preferred model
|
||||
embedding_dimension=384,
|
||||
)
|
||||
```
|
||||
|
||||
### **Advanced Search Modes**
|
||||
Llama Stack supports multiple search modes beyond basic vector similarity:
|
||||
|
||||
- **Vector Search**: Pure semantic similarity search using embeddings
|
||||
- **Keyword Search**: Traditional keyword-based search for exact matches
|
||||
- **Hybrid Search**: Combines both vector and keyword search for optimal results
|
||||
|
||||
```python
|
||||
# Different search modes
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id,
|
||||
query="machine learning algorithms",
|
||||
search_mode="hybrid", # or "vector", "keyword"
|
||||
max_num_results=5,
|
||||
)
|
||||
```
|
||||
|
||||
### **Flexible Ranking Options**
|
||||
For hybrid search, Llama Stack offers configurable ranking strategies:
|
||||
|
||||
- **RRF (Reciprocal Rank Fusion)**: Combines rankings with configurable impact factor
|
||||
- **Weighted Ranker**: Linear combination of vector and keyword scores with adjustable weights
|
||||
|
||||
```python
|
||||
# Custom ranking configuration
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id,
|
||||
query="neural networks",
|
||||
search_mode="hybrid",
|
||||
ranking_options={
|
||||
"ranker": {"type": "weighted", "alpha": 0.7} # 70% vector, 30% keyword
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
### **Provider Selection**
|
||||
Choose from multiple vector store providers based on your specific needs:
|
||||
|
||||
- **Inline Providers**: FAISS (fast in-memory), SQLite-vec (disk-based), Milvus (high-performance)
|
||||
- **Remote Providers**: ChromaDB, Qdrant, Weaviate, Postgres (PGVector), Milvus
|
||||
|
||||
```python
|
||||
# Specify provider when creating vector store
|
||||
vector_store = client.vector_stores.create(
|
||||
name="my_documents", provider_id="sqlite-vec" # Choose your preferred provider
|
||||
)
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
The file operations work through several key components:
|
||||
|
||||
1. **File Upload**: Documents are uploaded through the Files API
|
||||
2. **Automatic Processing**: Files are automatically chunked and converted to embeddings
|
||||
3. **Vector Storage**: Chunks are stored in vector databases with metadata
|
||||
4. **Search & Retrieval**: Users can search through processed documents using natural language
|
||||
|
||||
## Supported Vector Store Providers
|
||||
|
||||
The following vector store providers support file operations:
|
||||
|
||||
### Inline Providers (Single Node)
|
||||
|
||||
- **FAISS**: Fast in-memory vector similarity search
|
||||
- **SQLite-vec**: Disk-based storage with hybrid search capabilities
|
||||
|
||||
### Remote Providers (Hosted)
|
||||
|
||||
- **ChromaDB**: Vector database with metadata filtering
|
||||
- **Weaviate**: Vector database with GraphQL interface
|
||||
- **Postgres (PGVector)**: Vector extensions for PostgreSQL
|
||||
|
||||
### Both Inline & Remote Providers
|
||||
- **Milvus**: High-performance vector database with advanced indexing
|
||||
- **Qdrant**: Vector similarity search with payload filtering
|
||||
|
||||
## File Processing Pipeline
|
||||
|
||||
### 1. File Upload
|
||||
|
||||
```python
|
||||
from llama_stack import LlamaStackClient
|
||||
|
||||
client = LlamaStackClient("http://localhost:8000")
|
||||
|
||||
# Upload a document
|
||||
with open("document.pdf", "rb") as f:
|
||||
file_info = await client.files.upload(file=f, purpose="assistants")
|
||||
```
|
||||
|
||||
### 2. Attach to Vector Store
|
||||
|
||||
```python
|
||||
# Create a vector store
|
||||
vector_store = client.vector_stores.create(name="my_documents")
|
||||
|
||||
# Attach the file to the vector store
|
||||
file_attach_response = await client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id, file_id=file_info.id
|
||||
)
|
||||
```
|
||||
|
||||
### 3. Automatic Processing
|
||||
|
||||
The system automatically:
|
||||
- Detects the file type and extracts text content
|
||||
- Splits content into chunks (default: 800 tokens with 400 token overlap)
|
||||
- Generates embeddings for each chunk
|
||||
- Stores chunks with metadata in the vector store
|
||||
- Updates file status to "completed"
|
||||
|
||||
### 4. Search and Retrieval
|
||||
|
||||
```python
|
||||
# Search through processed documents
|
||||
search_results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id,
|
||||
query="What is the main topic discussed?",
|
||||
max_num_results=5,
|
||||
)
|
||||
|
||||
# Process results
|
||||
for result in search_results.data:
|
||||
print(f"Score: {result.score}")
|
||||
for content in result.content:
|
||||
print(f"Content: {content.text}")
|
||||
```
|
||||
|
||||
## Supported File Types
|
||||
|
||||
The FileResponse system supports various document formats:
|
||||
|
||||
- **Text Files**: `.txt`, `.md`, `.rst`
|
||||
- **Documents**: `.pdf`, `.docx`, `.doc`
|
||||
- **Code**: `.py`, `.js`, `.java`, `.cpp`, etc.
|
||||
- **Data**: `.json`, `.csv`, `.xml`
|
||||
- **Web Content**: HTML files
|
||||
|
||||
## Chunking Strategies
|
||||
|
||||
### Default Strategy
|
||||
|
||||
The default chunking strategy uses:
|
||||
- **Max Chunk Size**: 800 tokens
|
||||
- **Overlap**: 400 tokens
|
||||
- **Method**: Semantic boundary detection
|
||||
|
||||
### Custom Chunking
|
||||
|
||||
You can customize chunking when attaching files:
|
||||
|
||||
```python
|
||||
from llama_stack.apis.vector_io import VectorStoreChunkingStrategy
|
||||
|
||||
# Attach file with custom chunking
|
||||
file_attach_response = await client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id,
|
||||
file_id=file_info.id,
|
||||
chunking_strategy=chunking_strategy,
|
||||
)
|
||||
```
|
||||
|
||||
**Note**: While Llama Stack is OpenAI-compatible, it also supports additional options beyond the standard OpenAI API. When creating vector stores, you can specify custom embedding models and embedding dimensions that will be used when processing chunks from attached files.
|
||||
|
||||
|
||||
## File Management
|
||||
|
||||
### List Files in Vector Store
|
||||
|
||||
```python
|
||||
# List all files in a vector store
|
||||
files = await client.vector_stores.files.list(vector_store_id=vector_store.id)
|
||||
|
||||
for file in files:
|
||||
print(f"File: {file.filename}, Status: {file.status}")
|
||||
```
|
||||
|
||||
### File Status Tracking
|
||||
|
||||
Files go through several statuses:
|
||||
- **in_progress**: File is being processed
|
||||
- **completed**: File successfully processed and searchable
|
||||
- **failed**: Processing failed (check `last_error` for details)
|
||||
- **cancelled**: Processing was cancelled
|
||||
|
||||
### Retrieve File Content
|
||||
|
||||
```python
|
||||
# Get chunked content from vector store
|
||||
content_response = await client.vector_stores.files.retrieve_content(
|
||||
vector_store_id=vector_store.id, file_id=file_info.id
|
||||
)
|
||||
|
||||
for chunk in content_response.content:
|
||||
print(f"Chunk {chunk.metadata.get('chunk_index', 0)}: {chunk.text}")
|
||||
```
|
||||
|
||||
## Vector Store Management
|
||||
|
||||
### List Vector Stores
|
||||
|
||||
Retrieve a paginated list of all vector stores:
|
||||
|
||||
```python
|
||||
# List all vector stores with default pagination
|
||||
vector_stores = await client.vector_stores.list()
|
||||
|
||||
# Custom pagination and ordering
|
||||
vector_stores = await client.vector_stores.list(
|
||||
limit=10,
|
||||
order="asc", # or "desc"
|
||||
after="vs_12345678", # cursor-based pagination
|
||||
)
|
||||
|
||||
for store in vector_stores.data:
|
||||
print(f"Store: {store.name}, Files: {store.file_counts.total}")
|
||||
print(f"Created: {store.created_at}, Status: {store.status}")
|
||||
```
|
||||
|
||||
### Retrieve Vector Store Details
|
||||
|
||||
Get detailed information about a specific vector store:
|
||||
|
||||
```python
|
||||
# Get vector store details
|
||||
store_details = await client.vector_stores.retrieve(vector_store_id="vs_12345678")
|
||||
|
||||
print(f"Name: {store_details.name}")
|
||||
print(f"Status: {store_details.status}")
|
||||
print(f"File Counts: {store_details.file_counts}")
|
||||
print(f"Usage: {store_details.usage_bytes} bytes")
|
||||
print(f"Created: {store_details.created_at}")
|
||||
print(f"Metadata: {store_details.metadata}")
|
||||
```
|
||||
|
||||
### Update Vector Store
|
||||
|
||||
Modify vector store properties such as name, metadata, or expiration settings:
|
||||
|
||||
```python
|
||||
# Update vector store name and metadata
|
||||
updated_store = await client.vector_stores.update(
|
||||
vector_store_id="vs_12345678",
|
||||
name="Updated Document Collection",
|
||||
metadata={
|
||||
"description": "Updated collection for research",
|
||||
"category": "research",
|
||||
"version": "2.0",
|
||||
},
|
||||
)
|
||||
|
||||
# Set expiration policy
|
||||
expired_store = await client.vector_stores.update(
|
||||
vector_store_id="vs_12345678",
|
||||
expires_after={"anchor": "last_active_at", "days": 30},
|
||||
)
|
||||
|
||||
print(f"Updated store: {updated_store.name}")
|
||||
print(f"Last active: {updated_store.last_active_at}")
|
||||
```
|
||||
|
||||
### Delete Vector Store
|
||||
|
||||
Remove a vector store and all its associated data:
|
||||
|
||||
```python
|
||||
# Delete a vector store
|
||||
delete_response = await client.vector_stores.delete(vector_store_id="vs_12345678")
|
||||
|
||||
if delete_response.deleted:
|
||||
print(f"Vector store {delete_response.id} successfully deleted")
|
||||
else:
|
||||
print("Failed to delete vector store")
|
||||
```
|
||||
|
||||
**Important Notes:**
|
||||
- Deleting a vector store removes all files, chunks, and embeddings
|
||||
- This operation cannot be undone
|
||||
- The underlying vector database is also cleaned up
|
||||
- Consider backing up important data before deletion
|
||||
|
||||
## Search Capabilities
|
||||
|
||||
### Vector Search
|
||||
|
||||
Pure similarity search using embeddings:
|
||||
|
||||
```python
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id,
|
||||
query="machine learning algorithms",
|
||||
max_num_results=10,
|
||||
)
|
||||
```
|
||||
|
||||
### Filtered Search
|
||||
|
||||
Combine vector search with metadata filtering:
|
||||
|
||||
```python
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id,
|
||||
query="machine learning algorithms",
|
||||
filters={"file_type": "pdf", "upload_date": "2024-01-01"},
|
||||
max_num_results=10,
|
||||
)
|
||||
```
|
||||
|
||||
### Hybrid Search
|
||||
|
||||
[SQLite-vec](../providers/vector_io/inline_sqlite-vec.mdx), [pgvector](../providers/vector_io/remote_pgvector.mdx), and [Milvus](../providers/vector_io/inline_milvus.mdx) support combining vector and keyword search.
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
> **Note**: For detailed performance optimization strategies, see [Performance Considerations](../providers/files/openai_file_operations_support.md#performance-considerations) in the provider documentation.
|
||||
|
||||
**Key Points:**
|
||||
- **Chunk Size**: 400-600 tokens for precision, 800-1200 for context
|
||||
- **Storage**: Choose provider based on your performance needs
|
||||
- **Search**: Optimize for your specific use case
|
||||
|
||||
## Error Handling
|
||||
|
||||
> **Note**: For comprehensive troubleshooting and error handling, see [Troubleshooting](../providers/files/openai_file_operations_support.md#troubleshooting) in the provider documentation.
|
||||
|
||||
**Common Issues:**
|
||||
- File processing failures (format, size limits)
|
||||
- Search performance optimization
|
||||
- Storage and memory issues
|
||||
|
||||
## Best Practices
|
||||
|
||||
> **Note**: For detailed best practices and recommendations, see [Best Practices](../providers/files/openai_file_operations_support.md#best-practices) in the provider documentation.
|
||||
|
||||
**Key Recommendations:**
|
||||
- File organization and naming conventions
|
||||
- Chunking strategy optimization
|
||||
- Metadata and monitoring practices
|
||||
- Regular cleanup and maintenance
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### RAG Application
|
||||
|
||||
```python
|
||||
# Build a RAG system with file uploads
|
||||
async def build_rag_system():
|
||||
# Create vector store
|
||||
vector_store = client.vector_stores.create(name="knowledge_base")
|
||||
|
||||
# Upload and process documents
|
||||
documents = ["doc1.pdf", "doc2.pdf", "doc3.pdf"]
|
||||
for doc in documents:
|
||||
with open(doc, "rb") as f:
|
||||
file_info = await client.files.create(file=f, purpose="assistants")
|
||||
await client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id, file_id=file_info.id
|
||||
)
|
||||
|
||||
return vector_store
|
||||
|
||||
|
||||
# Query the RAG system
|
||||
async def query_rag(vector_store_id, question):
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store_id, query=question, max_num_results=5
|
||||
)
|
||||
return results
|
||||
```
|
||||
|
||||
### Document Analysis
|
||||
|
||||
```python
|
||||
# Analyze document content through vector search
|
||||
async def analyze_document(vector_store_id, file_id):
|
||||
# Get document content
|
||||
content = await client.vector_stores.files.retrieve_content(
|
||||
vector_store_id=vector_store_id, file_id=file_id
|
||||
)
|
||||
|
||||
# Search for specific topics
|
||||
topics = ["introduction", "methodology", "conclusion"]
|
||||
analysis = {}
|
||||
|
||||
for topic in topics:
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store_id, query=topic, max_num_results=3
|
||||
)
|
||||
analysis[topic] = results.data
|
||||
|
||||
return analysis
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Explore the [Files API documentation](../../providers/files/files.mdx) for detailed API reference
|
||||
- Check [Vector Store Providers](../providers/vector_io/index.mdx) for specific implementation details
|
||||
- Review [Getting Started](../getting_started/quickstart.mdx) for quick setup instructions
|
||||
|
|
@ -10,7 +10,7 @@ import TabItem from '@theme/TabItem';
|
|||
|
||||
# Kubernetes Deployment Guide
|
||||
|
||||
Deploy Llama Stack and vLLM servers in a Kubernetes cluster instead of running them locally. This guide covers both local development with Kind and production deployment on AWS EKS.
|
||||
Deploy Llama Stack and vLLM servers in a Kubernetes cluster instead of running them locally. This guide covers deployment using the Kubernetes operator to manage the Llama Stack server with Kind. The vLLM inference server is deployed manually.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
|
@ -110,115 +110,176 @@ spec:
|
|||
EOF
|
||||
```
|
||||
|
||||
### Step 3: Configure Llama Stack
|
||||
### Step 3: Install Kubernetes Operator
|
||||
|
||||
Update your run configuration:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: vllm
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
url: http://vllm-server.default.svc.cluster.local:8000/v1
|
||||
max_tokens: 4096
|
||||
api_token: fake
|
||||
```
|
||||
|
||||
Build container image:
|
||||
Install the Llama Stack Kubernetes operator to manage Llama Stack deployments:
|
||||
|
||||
```bash
|
||||
tmp_dir=$(mktemp -d) && cat >$tmp_dir/Containerfile.llama-stack-run-k8s <<EOF
|
||||
FROM distribution-myenv:dev
|
||||
RUN apt-get update && apt-get install -y git
|
||||
RUN git clone https://github.com/meta-llama/llama-stack.git /app/llama-stack-source
|
||||
ADD ./vllm-llama-stack-run-k8s.yaml /app/config.yaml
|
||||
EOF
|
||||
podman build -f $tmp_dir/Containerfile.llama-stack-run-k8s -t llama-stack-run-k8s $tmp_dir
|
||||
# Install from the latest main branch
|
||||
kubectl apply -f https://raw.githubusercontent.com/llamastack/llama-stack-k8s-operator/main/release/operator.yaml
|
||||
|
||||
# Or install a specific version (e.g., v0.4.0)
|
||||
# kubectl apply -f https://raw.githubusercontent.com/llamastack/llama-stack-k8s-operator/v0.4.0/release/operator.yaml
|
||||
```
|
||||
|
||||
### Step 4: Deploy Llama Stack Server
|
||||
Verify the operator is running:
|
||||
|
||||
```bash
|
||||
kubectl get pods -n llama-stack-operator-system
|
||||
```
|
||||
|
||||
For more information about the operator, see the [llama-stack-k8s-operator repository](https://github.com/llamastack/llama-stack-k8s-operator).
|
||||
|
||||
### Step 4: Deploy Llama Stack Server using Operator
|
||||
|
||||
Create a `LlamaStackDistribution` custom resource to deploy the Llama Stack server. The operator will automatically create the necessary Deployment, Service, and other resources.
|
||||
You can optionally override the default `run.yaml` using `spec.server.userConfig` with a ConfigMap (see [userConfig spec](https://github.com/llamastack/llama-stack-k8s-operator/blob/main/docs/api-overview.md#userconfigspec)).
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: llamastack.io/v1alpha1
|
||||
kind: LlamaStackDistribution
|
||||
metadata:
|
||||
name: llama-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: llama-stack-server
|
||||
name: llamastack-vllm
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: llama-stack
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: llama-stack
|
||||
spec:
|
||||
containers:
|
||||
- name: llama-stack
|
||||
image: localhost/llama-stack-run-k8s:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["llama", "stack", "run", "/app/config.yaml"]
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
volumeMounts:
|
||||
- name: llama-storage
|
||||
mountPath: /root/.llama
|
||||
volumes:
|
||||
- name: llama-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: llama-pvc
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: llama-stack-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: llama-stack
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5000
|
||||
targetPort: 5000
|
||||
type: ClusterIP
|
||||
server:
|
||||
distribution:
|
||||
name: starter
|
||||
containerSpec:
|
||||
port: 8321
|
||||
env:
|
||||
- name: VLLM_URL
|
||||
value: "http://vllm-server.default.svc.cluster.local:8000/v1"
|
||||
- name: VLLM_MAX_TOKENS
|
||||
value: "4096"
|
||||
- name: VLLM_API_TOKEN
|
||||
value: "fake"
|
||||
# Optional: override run.yaml from a ConfigMap using userConfig
|
||||
userConfig:
|
||||
configMap:
|
||||
name: llama-stack-config
|
||||
storage:
|
||||
size: "20Gi"
|
||||
mountPath: "/home/lls/.lls"
|
||||
EOF
|
||||
```
|
||||
|
||||
**Configuration Options:**
|
||||
|
||||
- `replicas`: Number of Llama Stack server instances to run
|
||||
- `server.distribution.name`: The distribution to use (e.g., `starter` for the starter distribution). See the [list of supported distributions](https://github.com/llamastack/llama-stack-k8s-operator/blob/main/distributions.json) in the operator repository.
|
||||
- `server.distribution.image`: (Optional) Custom container image for non-supported distributions. Use this field when deploying a distribution that is not in the supported list. If specified, this takes precedence over `name`.
|
||||
- `server.containerSpec.port`: Port on which the Llama Stack server listens (default: 8321)
|
||||
- `server.containerSpec.env`: Environment variables to configure providers:
|
||||
- `server.userConfig`: (Optional) Override the default `run.yaml` using a ConfigMap. See [userConfig spec](https://github.com/llamastack/llama-stack-k8s-operator/blob/main/docs/api-overview.md#userconfigspec).
|
||||
- `server.storage.size`: Size of the persistent volume for model and data storage
|
||||
- `server.storage.mountPath`: Where to mount the storage in the container
|
||||
|
||||
**Note:** For a complete list of supported distributions, see [distributions.json](https://github.com/llamastack/llama-stack-k8s-operator/blob/main/distributions.json) in the operator repository. To use a custom or non-supported distribution, set the `server.distribution.image` field with your container image instead of `server.distribution.name`.
|
||||
|
||||
The operator automatically creates:
|
||||
- A Deployment for the Llama Stack server
|
||||
- A Service to access the server
|
||||
- A PersistentVolumeClaim for storage
|
||||
- All necessary RBAC resources
|
||||
|
||||
|
||||
Check the status of your deployment:
|
||||
|
||||
```bash
|
||||
kubectl get llamastackdistribution
|
||||
kubectl describe llamastackdistribution llamastack-vllm
|
||||
```
|
||||
|
||||
### Step 5: Test Deployment
|
||||
|
||||
Wait for the Llama Stack server pod to be ready:
|
||||
|
||||
```bash
|
||||
# Port forward and test
|
||||
kubectl port-forward service/llama-stack-service 5000:5000
|
||||
llama-stack-client --endpoint http://localhost:5000 inference chat-completion --message "hello, what model are you?"
|
||||
# Check the status of the LlamaStackDistribution
|
||||
kubectl get llamastackdistribution llamastack-vllm
|
||||
|
||||
# Check the pods created by the operator
|
||||
kubectl get pods -l app.kubernetes.io/name=llama-stack
|
||||
|
||||
# Wait for the pod to be ready
|
||||
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=llama-stack --timeout=300s
|
||||
```
|
||||
|
||||
Get the service name created by the operator (it typically follows the pattern `<llamastackdistribution-name>-service`):
|
||||
|
||||
```bash
|
||||
# List services to find the service name
|
||||
kubectl get services | grep llamastack
|
||||
|
||||
# Port forward and test (replace SERVICE_NAME with the actual service name)
|
||||
kubectl port-forward service/llamastack-vllm-service 8321:8321
|
||||
```
|
||||
|
||||
In another terminal, test the deployment:
|
||||
|
||||
```bash
|
||||
llama-stack-client --endpoint http://localhost:8321 inference chat-completion --message "hello, what model are you?"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Check pod status:**
|
||||
### vLLM Server Issues
|
||||
|
||||
**Check vLLM pod status:**
|
||||
```bash
|
||||
kubectl get pods -l app.kubernetes.io/name=vllm
|
||||
kubectl logs -l app.kubernetes.io/name=vllm
|
||||
```
|
||||
|
||||
**Test service connectivity:**
|
||||
**Test vLLM service connectivity:**
|
||||
```bash
|
||||
kubectl run -it --rm debug --image=curlimages/curl --restart=Never -- curl http://vllm-server:8000/v1/models
|
||||
```
|
||||
|
||||
### Llama Stack Server Issues
|
||||
|
||||
**Check LlamaStackDistribution status:**
|
||||
```bash
|
||||
# Get detailed status
|
||||
kubectl describe llamastackdistribution llamastack-vllm
|
||||
|
||||
# Check for events
|
||||
kubectl get events --sort-by='.lastTimestamp' | grep llamastack-vllm
|
||||
```
|
||||
|
||||
**Check operator-managed pods:**
|
||||
```bash
|
||||
# List all pods managed by the operator
|
||||
kubectl get pods -l app.kubernetes.io/name=llama-stack
|
||||
|
||||
# Check pod logs (replace POD_NAME with actual pod name)
|
||||
kubectl logs -l app.kubernetes.io/name=llama-stack
|
||||
```
|
||||
|
||||
**Check operator status:**
|
||||
```bash
|
||||
# Verify the operator is running
|
||||
kubectl get pods -n llama-stack-operator-system
|
||||
|
||||
# Check operator logs if issues persist
|
||||
kubectl logs -n llama-stack-operator-system -l control-plane=controller-manager
|
||||
```
|
||||
|
||||
**Verify service connectivity:**
|
||||
```bash
|
||||
# Get the service endpoint
|
||||
kubectl get svc llamastack-vllm-service
|
||||
|
||||
# Test connectivity from within the cluster
|
||||
kubectl run -it --rm debug --image=curlimages/curl --restart=Never -- curl http://llamastack-vllm-service:8321/health
|
||||
```
|
||||
|
||||
## Related Resources
|
||||
|
||||
- **[Deployment Overview](/docs/deploying/)** - Overview of deployment options
|
||||
- **[Distributions](/docs/distributions)** - Understanding Llama Stack distributions
|
||||
- **[Configuration](/docs/distributions/configuration)** - Detailed configuration options
|
||||
- **[LlamaStack Operator](https://github.com/llamastack/llama-stack-k8s-operator)** - Overview of llama-stack kubernetes operator
|
||||
- **[LlamaStackDistribution](https://github.com/llamastack/llama-stack-k8s-operator/blob/main/docs/api-overview.md)** - API Spec of the llama-stack operator Custom Resource.
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ external_providers_dir: /workspace/providers.d
|
|||
Inside `providers.d/custom_ollama/provider.py`, define `get_provider_spec()` so the CLI can discover dependencies:
|
||||
|
||||
```python
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api.providers.datatypes import ProviderSpec
|
||||
|
||||
|
||||
def get_provider_spec() -> ProviderSpec:
|
||||
|
|
|
|||
|
|
@ -58,13 +58,21 @@ storage:
|
|||
sql_default:
|
||||
type: sql_sqlite
|
||||
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/ollama}/sqlstore.db
|
||||
references:
|
||||
stores:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
max_write_queue_size: 10000
|
||||
num_writers: 4
|
||||
conversations:
|
||||
backend: sql_default
|
||||
table_name: openai_conversations
|
||||
prompts:
|
||||
backend: kv_default
|
||||
namespace: prompts
|
||||
models:
|
||||
- metadata: {}
|
||||
model_id: ${env.INFERENCE_MODEL}
|
||||
|
|
@ -213,7 +221,15 @@ models:
|
|||
```
|
||||
A Model is an instance of a "Resource" (see [Concepts](../concepts/)) and is associated with a specific inference provider (in this case, the provider with identifier `ollama`). This is an instance of a "pre-registered" model. While we always encourage the clients to register models before using them, some Stack servers may come up a list of "already known and available" models.
|
||||
|
||||
What's with the `provider_model_id` field? This is an identifier for the model inside the provider's model catalog. Contrast it with `model_id` which is the identifier for the same model for Llama Stack's purposes. For example, you may want to name "llama3.2:vision-11b" as "image_captioning_model" when you use it in your Stack interactions. When omitted, the server will set `provider_model_id` to be the same as `model_id`.
|
||||
What's with the `provider_model_id` field? This is an identifier for the model inside the provider's model catalog. The `model_id` field is provided for configuration purposes but is not used as part of the model identifier.
|
||||
|
||||
**Important:** Models are identified as `provider_id/provider_model_id` in the system and when making API calls. When `provider_model_id` is omitted, the server will set it to be the same as `model_id`.
|
||||
|
||||
Examples:
|
||||
- Config: `model_id: llama3.2`, `provider_id: ollama`, `provider_model_id: null`
|
||||
→ Access as: `ollama/llama3.2`
|
||||
- Config: `model_id: my-llama`, `provider_id: vllm-inference`, `provider_model_id: llama-3-2-3b`
|
||||
→ Access as: `vllm-inference/llama-3-2-3b` (the `model_id` is not used in the identifier)
|
||||
|
||||
If you need to conditionally register a model in the configuration, such as only when specific environment variable(s) are set, this can be accomplished by utilizing a special `__disabled__` string as the default value of an environment variable substitution, as shown below:
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ If you are planning to use an external service for Inference (even Ollama or TGI
|
|||
This avoids the overhead of setting up a server.
|
||||
```bash
|
||||
# setup
|
||||
uv pip install llama-stack
|
||||
uv pip install llama-stack llama-stack-client
|
||||
llama stack list-deps starter | xargs -L1 uv pip install
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -19,3 +19,4 @@ This section provides an overview of the distributions available in Llama Stack.
|
|||
- **[Starting Llama Stack Server](./starting_llama_stack_server.mdx)** - How to run distributions
|
||||
- **[Importing as Library](./importing_as_library.mdx)** - Use distributions in your code
|
||||
- **[Configuration Reference](./configuration.mdx)** - Configuration file format details
|
||||
- **[Llama Stack UI](./llama_stack_ui.mdx)** - Web-based user interface for interacting with Llama Stack servers
|
||||
|
|
|
|||
|
|
@ -113,13 +113,21 @@ data:
|
|||
db: ${env.POSTGRES_DB:=llamastack}
|
||||
user: ${env.POSTGRES_USER:=llamastack}
|
||||
password: ${env.POSTGRES_PASSWORD:=llamastack}
|
||||
references:
|
||||
stores:
|
||||
metadata:
|
||||
backend: kv_default
|
||||
namespace: registry
|
||||
inference:
|
||||
backend: sql_default
|
||||
table_name: inference_store
|
||||
max_write_queue_size: 10000
|
||||
num_writers: 4
|
||||
conversations:
|
||||
backend: sql_default
|
||||
table_name: openai_conversations
|
||||
prompts:
|
||||
backend: kv_default
|
||||
namespace: prompts
|
||||
models:
|
||||
- metadata:
|
||||
embedding_dimension: 768
|
||||
|
|
|
|||
|
|
@ -106,6 +106,9 @@ storage:
|
|||
conversations:
|
||||
table_name: openai_conversations
|
||||
backend: sql_default
|
||||
prompts:
|
||||
namespace: prompts
|
||||
backend: kv_default
|
||||
registered_resources:
|
||||
models:
|
||||
- metadata:
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ spec:
|
|||
|
||||
# Navigate to the UI directory
|
||||
echo "Navigating to UI directory..."
|
||||
cd /app/llama_stack/ui
|
||||
cd /app/llama_stack_ui
|
||||
|
||||
# Check if package.json exists
|
||||
if [ ! -f "package.json" ]; then
|
||||
|
|
|
|||
109
docs/docs/distributions/llama_stack_ui.mdx
Normal file
109
docs/docs/distributions/llama_stack_ui.mdx
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
---
|
||||
title: Llama Stack UI
|
||||
description: Web-based user interface for interacting with Llama Stack servers
|
||||
sidebar_label: Llama Stack UI
|
||||
sidebar_position: 8
|
||||
---
|
||||
|
||||
# Llama Stack UI
|
||||
|
||||
The Llama Stack UI is a web-based interface for interacting with Llama Stack servers. Built with Next.js and React, it provides a visual way to work with agents, manage resources, and view logs.
|
||||
|
||||
## Features
|
||||
|
||||
- **Logs & Monitoring**: View chat completions, agent responses, and vector store activity
|
||||
- **Vector Stores**: Create and manage vector databases for RAG (Retrieval-Augmented Generation) workflows
|
||||
- **Prompt Management**: Create and manage reusable prompts
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You need a running Llama Stack server. The UI is a client that connects to the Llama Stack backend.
|
||||
|
||||
If you don't have a Llama Stack server running yet, see the [Starting Llama Stack Server](../getting_started/starting_llama_stack_server.mdx) guide.
|
||||
|
||||
## Running the UI
|
||||
|
||||
### Option 1: Using npx (Recommended for Quick Start)
|
||||
|
||||
The fastest way to get started is using `npx`:
|
||||
|
||||
```bash
|
||||
npx llama-stack-ui
|
||||
```
|
||||
|
||||
This will start the UI server on `http://localhost:8322` (default port).
|
||||
|
||||
### Option 2: Using Docker
|
||||
|
||||
Run the UI in a container:
|
||||
|
||||
```bash
|
||||
docker run -p 8322:8322 llamastack/ui
|
||||
```
|
||||
|
||||
Access the UI at `http://localhost:8322`.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
The UI can be configured using the following environment variables:
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `LLAMA_STACK_BACKEND_URL` | URL of your Llama Stack server | `http://localhost:8321` |
|
||||
| `LLAMA_STACK_UI_PORT` | Port for the UI server | `8322` |
|
||||
|
||||
If the Llama Stack server is running with authentication enabled, you can configure the UI to use it by setting the following environment variables:
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `NEXTAUTH_URL` | NextAuth URL for authentication | `http://localhost:8322` |
|
||||
| `GITHUB_CLIENT_ID` | GitHub OAuth client ID (optional, for authentication) | - |
|
||||
| `GITHUB_CLIENT_SECRET` | GitHub OAuth client secret (optional, for authentication) | - |
|
||||
|
||||
### Setting Environment Variables
|
||||
|
||||
#### For npx:
|
||||
|
||||
```bash
|
||||
LLAMA_STACK_BACKEND_URL=http://localhost:8321 \
|
||||
LLAMA_STACK_UI_PORT=8080 \
|
||||
npx llama-stack-ui
|
||||
```
|
||||
|
||||
#### For Docker:
|
||||
|
||||
```bash
|
||||
docker run -p 8080:8080 \
|
||||
-e LLAMA_STACK_BACKEND_URL=http://localhost:8321 \
|
||||
-e LLAMA_STACK_UI_PORT=8080 \
|
||||
llamastack/ui
|
||||
```
|
||||
|
||||
## Using the UI
|
||||
|
||||
### Managing Resources
|
||||
|
||||
- **Vector Stores**: Create vector databases for RAG workflows, view stored documents and embeddings
|
||||
- **Prompts**: Create and manage reusable prompt templates
|
||||
- **Chat Completions**: View history of chat interactions
|
||||
- **Responses**: Browse detailed agent responses and tool calls
|
||||
|
||||
## Development
|
||||
|
||||
If you want to run the UI from source for development:
|
||||
|
||||
```bash
|
||||
# From the project root
|
||||
cd src/llama_stack_ui
|
||||
|
||||
# Install dependencies
|
||||
npm install
|
||||
|
||||
# Set environment variables
|
||||
export LLAMA_STACK_BACKEND_URL=http://localhost:8321
|
||||
|
||||
# Start the development server
|
||||
npm run dev
|
||||
```
|
||||
|
||||
The development server will start on `http://localhost:8322` with hot reloading enabled.
|
||||
143
docs/docs/distributions/remote_hosted_distro/oci.md
Normal file
143
docs/docs/distributions/remote_hosted_distro/oci.md
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
---
|
||||
orphan: true
|
||||
---
|
||||
<!-- This file was auto-generated by distro_codegen.py, please edit source -->
|
||||
# OCI Distribution
|
||||
|
||||
The `llamastack/distribution-oci` distribution consists of the following provider configurations.
|
||||
|
||||
| API | Provider(s) |
|
||||
|-----|-------------|
|
||||
| agents | `inline::meta-reference` |
|
||||
| datasetio | `remote::huggingface`, `inline::localfs` |
|
||||
| eval | `inline::meta-reference` |
|
||||
| files | `inline::localfs` |
|
||||
| inference | `remote::oci` |
|
||||
| safety | `inline::llama-guard` |
|
||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
|
||||
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The following environment variables can be configured:
|
||||
|
||||
- `OCI_AUTH_TYPE`: OCI authentication type (instance_principal or config_file) (default: `instance_principal`)
|
||||
- `OCI_REGION`: OCI region (e.g., us-ashburn-1, us-chicago-1, us-phoenix-1, eu-frankfurt-1) (default: ``)
|
||||
- `OCI_COMPARTMENT_OCID`: OCI compartment ID for the Generative AI service (default: ``)
|
||||
- `OCI_CONFIG_FILE_PATH`: OCI config file path (required if OCI_AUTH_TYPE is config_file) (default: `~/.oci/config`)
|
||||
- `OCI_CLI_PROFILE`: OCI CLI profile name to use from config file (default: `DEFAULT`)
|
||||
|
||||
|
||||
## Prerequisites
|
||||
### Oracle Cloud Infrastructure Setup
|
||||
|
||||
Before using the OCI Generative AI distribution, ensure you have:
|
||||
|
||||
1. **Oracle Cloud Infrastructure Account**: Sign up at [Oracle Cloud Infrastructure](https://cloud.oracle.com/)
|
||||
2. **Generative AI Service Access**: Enable the Generative AI service in your OCI tenancy
|
||||
3. **Compartment**: Create or identify a compartment where you'll deploy Generative AI models
|
||||
4. **Authentication**: Configure authentication using either:
|
||||
- **Instance Principal** (recommended for cloud-hosted deployments)
|
||||
- **API Key** (for on-premises or development environments)
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
#### Instance Principal Authentication (Recommended)
|
||||
Instance Principal authentication allows OCI resources to authenticate using the identity of the compute instance they're running on. This is the most secure method for production deployments.
|
||||
|
||||
Requirements:
|
||||
- Instance must be running in an Oracle Cloud Infrastructure compartment
|
||||
- Instance must have appropriate IAM policies to access Generative AI services
|
||||
|
||||
#### API Key Authentication
|
||||
For development or on-premises deployments, follow [this doc](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/apisigningkey.htm) to learn how to create your API signing key for your config file.
|
||||
|
||||
### Required IAM Policies
|
||||
|
||||
Ensure your OCI user or instance has the following policy statements:
|
||||
|
||||
```
|
||||
Allow group <group_name> to use generative-ai-inference-endpoints in compartment <compartment_name>
|
||||
Allow group <group_name> to manage generative-ai-inference-endpoints in compartment <compartment_name>
|
||||
```
|
||||
|
||||
## Supported Services
|
||||
|
||||
### Inference: OCI Generative AI
|
||||
Oracle Cloud Infrastructure Generative AI provides access to high-performance AI models through OCI's Platform-as-a-Service offering. The service supports:
|
||||
|
||||
- **Chat Completions**: Conversational AI with context awareness
|
||||
- **Text Generation**: Complete prompts and generate text content
|
||||
|
||||
#### Available Models
|
||||
Common OCI Generative AI models include access to Meta, Cohere, OpenAI, Grok, and more models.
|
||||
|
||||
### Safety: Llama Guard
|
||||
For content safety and moderation, this distribution uses Meta's LlamaGuard model through the OCI Generative AI service to provide:
|
||||
- Content filtering and moderation
|
||||
- Policy compliance checking
|
||||
- Harmful content detection
|
||||
|
||||
### Vector Storage: Multiple Options
|
||||
The distribution supports several vector storage providers:
|
||||
- **FAISS**: Local in-memory vector search
|
||||
- **ChromaDB**: Distributed vector database
|
||||
- **PGVector**: PostgreSQL with vector extensions
|
||||
|
||||
### Additional Services
|
||||
- **Dataset I/O**: Local filesystem and Hugging Face integration
|
||||
- **Tool Runtime**: Web search (Brave, Tavily) and RAG capabilities
|
||||
- **Evaluation**: Meta reference evaluation framework
|
||||
|
||||
## Running Llama Stack with OCI
|
||||
|
||||
You can run the OCI distribution via Docker or local virtual environment.
|
||||
|
||||
### Via venv
|
||||
|
||||
If you've set up your local development environment, you can also build the image using your local virtual environment.
|
||||
|
||||
```bash
|
||||
OCI_AUTH=$OCI_AUTH_TYPE OCI_REGION=$OCI_REGION OCI_COMPARTMENT_OCID=$OCI_COMPARTMENT_OCID llama stack run --port 8321 oci
|
||||
```
|
||||
|
||||
### Configuration Examples
|
||||
|
||||
#### Using Instance Principal (Recommended for Production)
|
||||
```bash
|
||||
export OCI_AUTH_TYPE=instance_principal
|
||||
export OCI_REGION=us-chicago-1
|
||||
export OCI_COMPARTMENT_OCID=ocid1.compartment.oc1..<your-compartment-id>
|
||||
```
|
||||
|
||||
#### Using API Key Authentication (Development)
|
||||
```bash
|
||||
export OCI_AUTH_TYPE=config_file
|
||||
export OCI_CONFIG_FILE_PATH=~/.oci/config
|
||||
export OCI_CLI_PROFILE=DEFAULT
|
||||
export OCI_REGION=us-chicago-1
|
||||
export OCI_COMPARTMENT_OCID=ocid1.compartment.oc1..your-compartment-id
|
||||
```
|
||||
|
||||
## Regional Endpoints
|
||||
|
||||
OCI Generative AI is available in multiple regions. The service automatically routes to the appropriate regional endpoint based on your configuration. For a full list of regional model availability, visit:
|
||||
|
||||
https://docs.oracle.com/en-us/iaas/Content/generative-ai/overview.htm#regions
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Authentication Errors**: Verify your OCI credentials and IAM policies
|
||||
2. **Model Not Found**: Ensure the model OCID is correct and the model is available in your region
|
||||
3. **Permission Denied**: Check compartment permissions and Generative AI service access
|
||||
4. **Region Unavailable**: Verify the specified region supports Generative AI services
|
||||
|
||||
### Getting Help
|
||||
|
||||
For additional support:
|
||||
- [OCI Generative AI Documentation](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)
|
||||
- [Llama Stack Issues](https://github.com/meta-llama/llama-stack/issues)
|
||||
|
|
@ -79,6 +79,33 @@ docker run \
|
|||
--port $LLAMA_STACK_PORT
|
||||
```
|
||||
|
||||
### Via Docker with Custom Run Configuration
|
||||
|
||||
You can also run the Docker container with a custom run configuration file by mounting it into the container:
|
||||
|
||||
```bash
|
||||
# Set the path to your custom run.yaml file
|
||||
CUSTOM_RUN_CONFIG=/path/to/your/custom-run.yaml
|
||||
LLAMA_STACK_PORT=8321
|
||||
|
||||
docker run \
|
||||
-it \
|
||||
--pull always \
|
||||
--gpu all \
|
||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-v ~/.llama:/root/.llama \
|
||||
-v $CUSTOM_RUN_CONFIG:/app/custom-run.yaml \
|
||||
-e RUN_CONFIG_PATH=/app/custom-run.yaml \
|
||||
llamastack/distribution-meta-reference-gpu \
|
||||
--port $LLAMA_STACK_PORT
|
||||
```
|
||||
|
||||
**Note**: The run configuration must be mounted into the container before it can be used. The `-v` flag mounts your local file into the container, and the `RUN_CONFIG_PATH` environment variable tells the entrypoint script which configuration to use.
|
||||
|
||||
Available run configurations for this distribution:
|
||||
- `run.yaml`
|
||||
- `run-with-safety.yaml`
|
||||
|
||||
### Via venv
|
||||
|
||||
Make sure you have the Llama Stack CLI available.
|
||||
|
|
|
|||
|
|
@ -127,13 +127,39 @@ docker run \
|
|||
-it \
|
||||
--pull always \
|
||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-v ./run.yaml:/root/my-run.yaml \
|
||||
-v ~/.llama:/root/.llama \
|
||||
-e NVIDIA_API_KEY=$NVIDIA_API_KEY \
|
||||
llamastack/distribution-nvidia \
|
||||
--config /root/my-run.yaml \
|
||||
--port $LLAMA_STACK_PORT
|
||||
```
|
||||
|
||||
### Via Docker with Custom Run Configuration
|
||||
|
||||
You can also run the Docker container with a custom run configuration file by mounting it into the container:
|
||||
|
||||
```bash
|
||||
# Set the path to your custom run.yaml file
|
||||
CUSTOM_RUN_CONFIG=/path/to/your/custom-run.yaml
|
||||
LLAMA_STACK_PORT=8321
|
||||
|
||||
docker run \
|
||||
-it \
|
||||
--pull always \
|
||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-v ~/.llama:/root/.llama \
|
||||
-v $CUSTOM_RUN_CONFIG:/app/custom-run.yaml \
|
||||
-e RUN_CONFIG_PATH=/app/custom-run.yaml \
|
||||
-e NVIDIA_API_KEY=$NVIDIA_API_KEY \
|
||||
llamastack/distribution-nvidia \
|
||||
--port $LLAMA_STACK_PORT
|
||||
```
|
||||
|
||||
**Note**: The run configuration must be mounted into the container before it can be used. The `-v` flag mounts your local file into the container, and the `RUN_CONFIG_PATH` environment variable tells the entrypoint script which configuration to use.
|
||||
|
||||
Available run configurations for this distribution:
|
||||
- `run.yaml`
|
||||
- `run-with-safety.yaml`
|
||||
|
||||
### Via venv
|
||||
|
||||
If you've set up your local development environment, you can also install the distribution dependencies using your local virtual environment.
|
||||
|
|
|
|||
|
|
@ -163,7 +163,41 @@ docker run \
|
|||
--port $LLAMA_STACK_PORT
|
||||
```
|
||||
|
||||
### Via venv
|
||||
The container will run the distribution with a SQLite store by default. This store is used for the following components:
|
||||
|
||||
- Metadata store: store metadata about the models, providers, etc.
|
||||
- Inference store: collect of responses from the inference provider
|
||||
- Agents store: store agent configurations (sessions, turns, etc.)
|
||||
- Agents Responses store: store responses from the agents
|
||||
|
||||
However, you can use PostgreSQL instead by running the `starter::run-with-postgres-store.yaml` configuration:
|
||||
|
||||
```bash
|
||||
docker run \
|
||||
-it \
|
||||
--pull always \
|
||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||
-e OPENAI_API_KEY=your_openai_key \
|
||||
-e FIREWORKS_API_KEY=your_fireworks_key \
|
||||
-e TOGETHER_API_KEY=your_together_key \
|
||||
-e POSTGRES_HOST=your_postgres_host \
|
||||
-e POSTGRES_PORT=your_postgres_port \
|
||||
-e POSTGRES_DB=your_postgres_db \
|
||||
-e POSTGRES_USER=your_postgres_user \
|
||||
-e POSTGRES_PASSWORD=your_postgres_password \
|
||||
llamastack/distribution-starter \
|
||||
starter::run-with-postgres-store.yaml
|
||||
```
|
||||
|
||||
Postgres environment variables:
|
||||
|
||||
- `POSTGRES_HOST`: Postgres host (default: `localhost`)
|
||||
- `POSTGRES_PORT`: Postgres port (default: `5432`)
|
||||
- `POSTGRES_DB`: Postgres database name (default: `llamastack`)
|
||||
- `POSTGRES_USER`: Postgres username (default: `llamastack`)
|
||||
- `POSTGRES_PASSWORD`: Postgres password (default: `llamastack`)
|
||||
|
||||
### Via Conda or venv
|
||||
|
||||
Ensure you have configured the starter distribution using the environment variables explained above.
|
||||
|
||||
|
|
@ -171,8 +205,11 @@ Ensure you have configured the starter distribution using the environment variab
|
|||
# Install dependencies for the starter distribution
|
||||
uv run --with llama-stack llama stack list-deps starter | xargs -L1 uv pip install
|
||||
|
||||
# Run the server
|
||||
# Run the server (with SQLite - default)
|
||||
uv run --with llama-stack llama stack run starter
|
||||
|
||||
# Or run with PostgreSQL
|
||||
uv run --with llama-stack llama stack run starter::run-with-postgres-store.yaml
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ source .venv/bin/activate
|
|||
```bash
|
||||
uv venv client --python 3.12
|
||||
source client/bin/activate
|
||||
pip install llama-stack-client
|
||||
uv pip install llama-stack-client
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
|
@ -239,8 +239,13 @@ client = LlamaStackClient(base_url="http://localhost:8321")
|
|||
models = client.models.list()
|
||||
|
||||
# Select the first LLM
|
||||
llm = next(m for m in models if m.model_type == "llm" and m.provider_id == "ollama")
|
||||
model_id = llm.identifier
|
||||
llm = next(
|
||||
m for m in models
|
||||
if m.custom_metadata
|
||||
and m.custom_metadata.get("model_type") == "llm"
|
||||
and m.custom_metadata.get("provider_id") == "ollama"
|
||||
)
|
||||
model_id = llm.id
|
||||
|
||||
print("Model:", model_id)
|
||||
|
||||
|
|
@ -279,8 +284,13 @@ import uuid
|
|||
client = LlamaStackClient(base_url=f"http://localhost:8321")
|
||||
|
||||
models = client.models.list()
|
||||
llm = next(m for m in models if m.model_type == "llm" and m.provider_id == "ollama")
|
||||
model_id = llm.identifier
|
||||
llm = next(
|
||||
m for m in models
|
||||
if m.custom_metadata
|
||||
and m.custom_metadata.get("model_type") == "llm"
|
||||
and m.custom_metadata.get("provider_id") == "ollama"
|
||||
)
|
||||
model_id = llm.id
|
||||
|
||||
agent = Agent(client, model=model_id, instructions="You are a helpful assistant.")
|
||||
|
||||
|
|
@ -450,8 +460,11 @@ import uuid
|
|||
client = LlamaStackClient(base_url="http://localhost:8321")
|
||||
|
||||
# Create a vector database instance
|
||||
embed_lm = next(m for m in client.models.list() if m.model_type == "embedding")
|
||||
embedding_model = embed_lm.identifier
|
||||
embed_lm = next(
|
||||
m for m in client.models.list()
|
||||
if m.custom_metadata and m.custom_metadata.get("model_type") == "embedding"
|
||||
)
|
||||
embedding_model = embed_lm.id
|
||||
vector_db_id = f"v{uuid.uuid4().hex}"
|
||||
# The VectorDB API is deprecated; the server now returns its own authoritative ID.
|
||||
# We capture the correct ID from the response's .identifier attribute.
|
||||
|
|
@ -489,9 +502,11 @@ client.tool_runtime.rag_tool.insert(
|
|||
llm = next(
|
||||
m
|
||||
for m in client.models.list()
|
||||
if m.model_type == "llm" and m.provider_id == "ollama"
|
||||
if m.custom_metadata
|
||||
and m.custom_metadata.get("model_type") == "llm"
|
||||
and m.custom_metadata.get("provider_id") == "ollama"
|
||||
)
|
||||
model = llm.identifier
|
||||
model = llm.id
|
||||
|
||||
# Create the RAG agent
|
||||
rag_agent = Agent(
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
description: "Agents
|
||||
description: |
|
||||
Agents
|
||||
|
||||
APIs for creating and interacting with agentic systems."
|
||||
APIs for creating and interacting with agentic systems.
|
||||
sidebar_label: Agents
|
||||
title: Agents
|
||||
---
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Meta's reference implementation of an agent system that can use tools, access ve
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `persistence` | `<class 'inline.agents.meta_reference.config.AgentPersistenceConfig'>` | No | | |
|
||||
| `persistence` | `AgentPersistenceConfig` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
description: "The Batches API enables efficient processing of multiple requests in a single operation,
|
||||
description: |
|
||||
The Batches API enables efficient processing of multiple requests in a single operation,
|
||||
particularly useful for processing large datasets, batch evaluation workflows, and
|
||||
cost-effective inference at scale.
|
||||
|
||||
|
|
@ -8,7 +9,7 @@ description: "The Batches API enables efficient processing of multiple requests
|
|||
This API provides the following extensions:
|
||||
- idempotent batch creation
|
||||
|
||||
Note: This API is currently under active development and may undergo changes."
|
||||
Note: This API is currently under active development and may undergo changes.
|
||||
sidebar_label: Batches
|
||||
title: Batches
|
||||
---
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ Reference implementation of batches API with KVStore persistence.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | Configuration for the key-value store backend. |
|
||||
| `max_concurrent_batches` | `<class 'int'>` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
|
||||
| `max_concurrent_requests_per_batch` | `<class 'int'>` | No | 10 | Maximum number of concurrent requests to process per batch. |
|
||||
| `kvstore` | `KVStoreReference` | No | | Configuration for the key-value store backend. |
|
||||
| `max_concurrent_batches` | `int` | No | 1 | Maximum number of concurrent batches to process simultaneously. |
|
||||
| `max_concurrent_requests_per_batch` | `int` | No | 10 | Maximum number of concurrent requests to process per batch. |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Local filesystem-based dataset I/O provider for reading and writing datasets to
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
| `kvstore` | `KVStoreReference` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ HuggingFace datasets provider for accessing and managing datasets from the Huggi
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
| `kvstore` | `KVStoreReference` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ NVIDIA's dataset I/O provider for accessing datasets from NVIDIA's data platform
|
|||
| `api_key` | `str \| None` | No | | The NVIDIA API key. |
|
||||
| `dataset_namespace` | `str \| None` | No | default | The NVIDIA dataset namespace. |
|
||||
| `project_id` | `str \| None` | No | test-project | The NVIDIA project ID. |
|
||||
| `datasets_url` | `<class 'str'>` | No | http://nemo.test | Base URL for the NeMo Dataset API |
|
||||
| `datasets_url` | `str` | No | http://nemo.test | Base URL for the NeMo Dataset API |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
description: "Evaluations
|
||||
description: |
|
||||
Evaluations
|
||||
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates."
|
||||
Llama Stack Evaluation API for running evaluations on model and agent candidates.
|
||||
sidebar_label: Eval
|
||||
title: Eval
|
||||
---
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ Meta's reference implementation of evaluation tasks with support for multiple la
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `kvstore` | `<class 'llama_stack.core.storage.datatypes.KVStoreReference'>` | No | | |
|
||||
| `kvstore` | `KVStoreReference` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ NVIDIA's evaluation provider for running evaluation tasks on NVIDIA's platform.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `evaluator_url` | `<class 'str'>` | No | http://0.0.0.0:7331 | The url for accessing the evaluator service |
|
||||
| `evaluator_url` | `str` | No | http://0.0.0.0:7331 | The url for accessing the evaluator service |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -80,7 +80,7 @@ container_image: custom-vector-store:latest # optional
|
|||
All providers must contain a `get_provider_spec` function in their `provider` module. This is a standardized structure that Llama Stack expects and is necessary for getting things such as the config class. The `get_provider_spec` method returns a structure identical to the `adapter`. An example function may look like:
|
||||
|
||||
```python
|
||||
from llama_stack.providers.datatypes import (
|
||||
from llama_stack_api.providers.datatypes import (
|
||||
ProviderSpec,
|
||||
Api,
|
||||
RemoteProviderSpec,
|
||||
|
|
|
|||
290
docs/docs/providers/files/files.mdx
Normal file
290
docs/docs/providers/files/files.mdx
Normal file
|
|
@ -0,0 +1,290 @@
|
|||
---
|
||||
sidebar_label: Files
|
||||
title: Files
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The Files API provides file management capabilities for Llama Stack. It allows you to upload, store, retrieve, and manage files that can be used across various endpoints in your application.
|
||||
|
||||
## Features
|
||||
|
||||
- **File Upload**: Upload files with metadata and purpose classification
|
||||
- **File Management**: List, retrieve, and delete files
|
||||
- **Content Retrieval**: Access raw file content for processing
|
||||
- **API Compatibility**: Full compatibility with OpenAI Files API endpoints
|
||||
- **Flexible Storage**: Support for local filesystem and cloud storage backends
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### Upload File
|
||||
|
||||
**POST** `/v1/openai/v1/files`
|
||||
|
||||
Upload a file that can be used across various endpoints.
|
||||
|
||||
**Request Body:**
|
||||
- `file`: The file object to be uploaded (multipart form data)
|
||||
- `purpose`: The intended purpose of the uploaded file
|
||||
|
||||
**Supported Purposes:**
|
||||
- `batch`: Files for batch operations
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "file-abc123",
|
||||
"object": "file",
|
||||
"bytes": 140,
|
||||
"created_at": 1613779121,
|
||||
"filename": "mydata.jsonl",
|
||||
"purpose": "batch"
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
import requests
|
||||
|
||||
with open("data.jsonl", "rb") as f:
|
||||
files = {"file": f}
|
||||
data = {"purpose": "batch"}
|
||||
response = requests.post(
|
||||
"http://localhost:8000/v1/openai/v1/files", files=files, data=data
|
||||
)
|
||||
file_info = response.json()
|
||||
```
|
||||
|
||||
### List Files
|
||||
|
||||
**GET** `/v1/openai/v1/files`
|
||||
|
||||
Returns a list of files that belong to the user's organization.
|
||||
|
||||
**Query Parameters:**
|
||||
- `after` (optional): A cursor for pagination
|
||||
- `limit` (optional): Limit on number of objects (1-10,000, default: 10,000)
|
||||
- `order` (optional): Sort order by created_at timestamp (`asc` or `desc`, default: `desc`)
|
||||
- `purpose` (optional): Filter files by purpose
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"object": "list",
|
||||
"data": [
|
||||
{
|
||||
"id": "file-abc123",
|
||||
"object": "file",
|
||||
"bytes": 140,
|
||||
"created_at": 1613779121,
|
||||
"filename": "mydata.jsonl",
|
||||
"purpose": "fine-tune"
|
||||
}
|
||||
],
|
||||
"has_more": false
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
import requests
|
||||
|
||||
# List all files
|
||||
response = requests.get("http://localhost:8000/v1/openai/v1/files")
|
||||
files = response.json()
|
||||
|
||||
# List files with pagination
|
||||
response = requests.get(
|
||||
"http://localhost:8000/v1/openAi/v1/files",
|
||||
params={"limit": 10, "after": "file-abc123"},
|
||||
)
|
||||
files = response.json()
|
||||
|
||||
# Filter by purpose
|
||||
response = requests.get(
|
||||
"http://localhost:8000/v1/openAi/v1/files", params={"purpose": "fine-tune"}
|
||||
)
|
||||
files = response.json()
|
||||
```
|
||||
|
||||
### Retrieve File
|
||||
|
||||
**GET** `/v1/openAi/v1/files/{file_id}`
|
||||
|
||||
Returns information about a specific file.
|
||||
|
||||
**Path Parameters:**
|
||||
- `file_id`: The ID of the file to retrieve
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "file-abc123",
|
||||
"object": "file",
|
||||
"bytes": 140,
|
||||
"created_at": 1613779121,
|
||||
"filename": "mydata.jsonl",
|
||||
"purpose": "fine-tune"
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
import requests
|
||||
|
||||
file_id = "file-abc123"
|
||||
response = requests.get(f"http://localhost:8000/v1/openAi/v1/files/{file_id}")
|
||||
file_info = response.json()
|
||||
```
|
||||
|
||||
### Delete File
|
||||
|
||||
**DELETE** `/v1/openAi/v1/files/{file_id}`
|
||||
|
||||
Delete a file.
|
||||
|
||||
**Path Parameters:**
|
||||
- `file_id`: The ID of the file to delete
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
{
|
||||
"id": "file-abc123",
|
||||
"object": "file",
|
||||
"deleted": true
|
||||
}
|
||||
```
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
import requests
|
||||
|
||||
file_id = "file-abc123"
|
||||
response = requests.delete(f"http://localhost:8000/v1/openAi/v1/files/{file_id}")
|
||||
result = response.json()
|
||||
```
|
||||
|
||||
### Retrieve File Content
|
||||
|
||||
**GET** `/v1/openAi/v1/files/{file_id}/content`
|
||||
|
||||
Returns the raw file content as a binary response.
|
||||
|
||||
**Path Parameters:**
|
||||
- `file_id`: The ID of the file to retrieve content from
|
||||
|
||||
**Response:**
|
||||
Binary file content with appropriate headers:
|
||||
- `Content-Type`: `application/octet-stream`
|
||||
- `Content-Disposition`: `attachment; filename="filename"`
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
import requests
|
||||
|
||||
file_id = "file-abc123"
|
||||
response = requests.get(f"http://localhost:8000/v1/openAi/v1/files/{file_id}/content")
|
||||
|
||||
# Save content to file
|
||||
with open("downloaded_file.jsonl", "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
# Or process content directly
|
||||
content = response.content
|
||||
```
|
||||
|
||||
## Vector Store Integration
|
||||
|
||||
The Files API integrates with Vector Stores to enable document processing and search. For detailed information about this integration, see [File Operations and Vector Store Integration](../concepts/file_operations_vector_stores.md).
|
||||
|
||||
### Vector Store File Operations
|
||||
|
||||
**List Vector Store Files:**
|
||||
- **GET** `/v1/openAi/v1/vector_stores/{vector_store_id}/files`
|
||||
|
||||
**Retrieve Vector Store File Content:**
|
||||
- **GET** `/v1/openAi/v1/vector_stores/{vector_store_id}/files/{file_id}/content`
|
||||
|
||||
**Attach File to Vector Store:**
|
||||
- **POST** `/v1/openAi/v1/vector_stores/{vector_store_id}/files`
|
||||
|
||||
## Error Handling
|
||||
|
||||
The Files API returns standard HTTP status codes and error responses:
|
||||
|
||||
- `400 Bad Request`: Invalid request parameters
|
||||
- `404 Not Found`: File not found
|
||||
- `429 Too Many Requests`: Rate limit exceeded
|
||||
- `500 Internal Server Error`: Server error
|
||||
|
||||
**Error Response Format:**
|
||||
```json
|
||||
{
|
||||
"error": {
|
||||
"message": "Error description",
|
||||
"type": "invalid_request_error",
|
||||
"code": "file_not_found"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limits
|
||||
|
||||
The Files API implements rate limiting to ensure fair usage:
|
||||
- File uploads: 100 files per minute
|
||||
- File retrievals: 1000 requests per minute
|
||||
- File deletions: 100 requests per minute
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **File Organization**: Use descriptive filenames and appropriate purpose classifications
|
||||
2. **Batch Operations**: For multiple files, consider using batch endpoints when available
|
||||
3. **Error Handling**: Always check response status codes and handle errors gracefully
|
||||
4. **Content Types**: Ensure files are uploaded with appropriate content types
|
||||
5. **Cleanup**: Regularly delete unused files to manage storage costs
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### With Python Client
|
||||
|
||||
```python
|
||||
from llama_stack import LlamaStackClient
|
||||
|
||||
client = LlamaStackClient("http://localhost:8000")
|
||||
|
||||
# Upload a file
|
||||
with open("data.jsonl", "rb") as f:
|
||||
file_info = await client.files.upload(file=f, purpose="fine-tune")
|
||||
|
||||
# List files
|
||||
files = await client.files.list(purpose="fine-tune")
|
||||
|
||||
# Retrieve file content
|
||||
content = await client.files.retrieve_content(file_info.id)
|
||||
```
|
||||
|
||||
### With cURL
|
||||
|
||||
```bash
|
||||
# Upload file
|
||||
curl -X POST http://localhost:8000/v1/openAi/v1/files \
|
||||
-F "file=@data.jsonl" \
|
||||
-F "purpose=fine-tune"
|
||||
|
||||
# List files
|
||||
curl http://localhost:8000/v1/openAi/v1/files
|
||||
|
||||
# Download file content
|
||||
curl http://localhost:8000/v1/openAi/v1/files/file-abc123/content \
|
||||
-o downloaded_file.jsonl
|
||||
```
|
||||
|
||||
## Provider Support
|
||||
|
||||
The Files API supports multiple storage backends:
|
||||
|
||||
- **Local Filesystem**: Store files on local disk (inline provider)
|
||||
- **S3**: Store files in AWS S3 or S3-compatible services (remote provider)
|
||||
- **Custom Backends**: Extensible architecture for custom storage providers
|
||||
|
||||
See the [Files Providers](index.md) documentation for detailed configuration options.
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
---
|
||||
description: "Files
|
||||
description: |
|
||||
Files
|
||||
|
||||
This API is used to upload documents that can be used with other Llama Stack APIs."
|
||||
This API is used to upload documents that can be used with other Llama Stack APIs.
|
||||
sidebar_label: Files
|
||||
title: Files
|
||||
---
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ Local filesystem-based file storage provider for managing files and documents lo
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `storage_dir` | `<class 'str'>` | No | | Directory to store uploaded files |
|
||||
| `metadata_store` | `<class 'llama_stack.core.storage.datatypes.SqlStoreReference'>` | No | | SQL store configuration for file metadata |
|
||||
| `ttl_secs` | `<class 'int'>` | No | 31536000 | |
|
||||
| `storage_dir` | `str` | No | | Directory to store uploaded files |
|
||||
| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
|
||||
| `ttl_secs` | `int` | No | 31536000 | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,80 @@
|
|||
# File Operations Quick Reference
|
||||
|
||||
## Overview
|
||||
|
||||
As of release 0.2.14, Llama Stack provides comprehensive file operations and Vector Store API integration, following the [OpenAI Vector Store Files API specification](https://platform.openai.com/docs/api-reference/vector-stores-files).
|
||||
|
||||
> **Note**: For detailed overview and implementation details, see [Overview](../openai_file_operations_support.md#overview) in the full documentation.
|
||||
|
||||
## Supported Providers
|
||||
|
||||
> **Note**: For complete provider details and features, see [Supported Providers](../openai_file_operations_support.md#supported-providers) in the full documentation.
|
||||
|
||||
**Inline Providers**: FAISS, SQLite-vec, Milvus
|
||||
**Remote Providers**: ChromaDB, Qdrant, Weaviate, PGVector
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Upload File
|
||||
```python
|
||||
file_info = await client.files.upload(
|
||||
file=open("document.pdf", "rb"), purpose="assistants"
|
||||
)
|
||||
```
|
||||
|
||||
### 2. Create Vector Store
|
||||
```python
|
||||
vector_store = client.vector_stores.create(name="my_docs")
|
||||
```
|
||||
|
||||
### 3. Attach File
|
||||
```python
|
||||
await client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id, file_id=file_info.id
|
||||
)
|
||||
```
|
||||
|
||||
### 4. Search
|
||||
```python
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id, query="What is the main topic?", max_num_results=5
|
||||
)
|
||||
```
|
||||
|
||||
## File Processing & Search
|
||||
|
||||
**Processing**: 800 tokens default chunk size, 400 token overlap
|
||||
**Formats**: PDF, DOCX, TXT, Code files, etc.
|
||||
**Search**: Vector similarity, Hybrid (SQLite-vec), Filtered with metadata
|
||||
|
||||
## Configuration
|
||||
|
||||
> **Note**: For detailed configuration examples and options, see [Configuration Examples](../openai_file_operations_support.md#configuration-examples) in the full documentation.
|
||||
|
||||
**Basic Setup**: Configure vector_io and files providers in your run.yaml
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
- **RAG Systems**: Document Q&A with file uploads
|
||||
- **Knowledge Bases**: Searchable document collections
|
||||
- **Content Analysis**: Document similarity and clustering
|
||||
- **Research Tools**: Literature review and analysis
|
||||
|
||||
## Performance Tips
|
||||
|
||||
> **Note**: For detailed performance optimization strategies, see [Performance Considerations](../openai_file_operations_support.md#performance-considerations) in the full documentation.
|
||||
|
||||
**Quick Tips**: Choose provider based on your needs (speed vs. storage vs. scalability)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
> **Note**: For comprehensive troubleshooting, see [Troubleshooting](../openai_file_operations_support.md#troubleshooting) in the full documentation.
|
||||
|
||||
**Quick Fixes**: Check file format compatibility, optimize chunk sizes, monitor storage
|
||||
|
||||
## Resources
|
||||
|
||||
- [Full Documentation](openai_file_operations_support.md)
|
||||
- [Integration Guide](../concepts/file_operations_vector_stores.md)
|
||||
- [Files API](files_api.md)
|
||||
- [Provider Details](../vector_io/index.md)
|
||||
291
docs/docs/providers/files/openai_file_operations_support.md
Normal file
291
docs/docs/providers/files/openai_file_operations_support.md
Normal file
|
|
@ -0,0 +1,291 @@
|
|||
# File Operations Support in Vector Store Providers
|
||||
|
||||
## Overview
|
||||
|
||||
This document provides a comprehensive overview of file operations and Vector Store API support across all available vector store providers in Llama Stack. As of release 0.2.24, the following providers support full file operations integration.
|
||||
|
||||
## Supported Providers
|
||||
|
||||
### ✅ Full File Operations Support
|
||||
|
||||
The following providers support complete file operations integration, including file upload, automatic processing, and search:
|
||||
|
||||
#### Inline Providers (Single Node)
|
||||
|
||||
| Provider | File Operations | Key Features |
|
||||
|----------|----------------|--------------|
|
||||
| **FAISS** | ✅ Full Support | Fast in-memory search, GPU acceleration |
|
||||
| **SQLite-vec** | ✅ Full Support | Hybrid search, disk-based storage |
|
||||
| **Milvus** | ✅ Full Support | High-performance, scalable indexing |
|
||||
|
||||
#### Remote Providers (Hosted)
|
||||
|
||||
| Provider | File Operations | Key Features |
|
||||
|----------|----------------|--------------|
|
||||
| **ChromaDB** | ✅ Full Support | Metadata filtering, persistent storage |
|
||||
| **Qdrant** | ✅ Full Support | Payload filtering, advanced search |
|
||||
| **Weaviate** | ✅ Full Support | GraphQL interface, schema management |
|
||||
| **Postgres (PGVector)** | ✅ Full Support | SQL integration, ACID compliance |
|
||||
|
||||
### 🔄 Partial Support
|
||||
|
||||
Some providers may support basic vector operations but lack full file operations integration:
|
||||
|
||||
| Provider | Status | Notes |
|
||||
|----------|--------|-------|
|
||||
| **Meta Reference** | 🔄 Basic | Core vector operations only |
|
||||
|
||||
## File Operations Features
|
||||
|
||||
All supported providers offer the following file operations capabilities:
|
||||
|
||||
### Core Functionality
|
||||
|
||||
- **File Upload & Processing**: Automatic document ingestion and chunking
|
||||
- **Vector Storage**: Embedding generation and storage
|
||||
- **Search & Retrieval**: Semantic search with metadata filtering
|
||||
- **File Management**: List, retrieve, and manage files in vector stores
|
||||
|
||||
### Advanced Features
|
||||
|
||||
- **Automatic Chunking**: Configurable chunk sizes and overlap
|
||||
- **Metadata Preservation**: File attributes and chunk metadata
|
||||
- **Status Tracking**: Monitor file processing progress
|
||||
- **Error Handling**: Comprehensive error reporting and recovery
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### File Processing Pipeline
|
||||
|
||||
1. **Upload**: File uploaded via Files API
|
||||
2. **Extraction**: Text content extracted from various formats
|
||||
3. **Chunking**: Content split into optimal chunks (default: 800 tokens)
|
||||
4. **Embedding**: Chunks converted to vector embeddings
|
||||
5. **Storage**: Vectors stored with metadata in vector database
|
||||
6. **Indexing**: Search index updated for fast retrieval
|
||||
|
||||
### Supported File Formats
|
||||
|
||||
- **Documents**: PDF, DOCX, DOC
|
||||
- **Text**: TXT, MD, RST
|
||||
- **Code**: Python, JavaScript, Java, C++, etc.
|
||||
- **Data**: JSON, CSV, XML
|
||||
- **Web**: HTML files
|
||||
|
||||
### Chunking Strategies
|
||||
|
||||
- **Default**: 800 tokens with 400 token overlap
|
||||
- **Custom**: Configurable chunk sizes and overlap
|
||||
- **Static**: Fixed-size chunks with overlap
|
||||
|
||||
## Provider-Specific Features
|
||||
|
||||
### FAISS
|
||||
|
||||
- **Storage**: In-memory with optional persistence
|
||||
- **Performance**: Optimized for speed and GPU acceleration
|
||||
- **Use Case**: High-performance, memory-constrained environments
|
||||
|
||||
### SQLite-vec
|
||||
|
||||
- **Storage**: Disk-based with SQLite backend
|
||||
- **Search**: Hybrid vector + keyword search
|
||||
- **Use Case**: Large document collections, frequent updates
|
||||
|
||||
### Milvus
|
||||
|
||||
- **Storage**: Scalable distributed storage
|
||||
- **Indexing**: Multiple index types (IVF, HNSW)
|
||||
- **Use Case**: Production deployments, large-scale applications
|
||||
|
||||
### ChromaDB
|
||||
|
||||
- **Storage**: Persistent storage with metadata
|
||||
- **Filtering**: Advanced metadata filtering
|
||||
- **Use Case**: Applications requiring rich metadata
|
||||
|
||||
### Qdrant
|
||||
|
||||
- **Storage**: High-performance vector database
|
||||
- **Filtering**: Payload-based filtering
|
||||
- **Use Case**: Real-time applications, complex queries
|
||||
|
||||
### Weaviate
|
||||
|
||||
- **Storage**: GraphQL-native vector database
|
||||
- **Schema**: Flexible schema management
|
||||
- **Use Case**: Applications requiring complex data relationships
|
||||
|
||||
### Postgres (PGVector)
|
||||
|
||||
- **Storage**: SQL database with vector extensions
|
||||
- **Integration**: ACID compliance, existing SQL workflows
|
||||
- **Use Case**: Applications requiring transactional guarantees
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
```yaml
|
||||
vector_io:
|
||||
- provider_id: faiss
|
||||
provider_type: inline::faiss
|
||||
config:
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ~/.llama/faiss_store.db
|
||||
```
|
||||
|
||||
### With FileResponse Support
|
||||
|
||||
```yaml
|
||||
vector_io:
|
||||
- provider_id: faiss
|
||||
provider_type: inline::faiss
|
||||
config:
|
||||
kvstore:
|
||||
type: sqlite
|
||||
db_path: ~/.llama/faiss_store.db
|
||||
|
||||
files:
|
||||
- provider_id: local-files
|
||||
provider_type: inline::localfs
|
||||
config:
|
||||
storage_dir: ~/.llama/files
|
||||
metadata_store:
|
||||
type: sqlite
|
||||
db_path: ~/.llama/files_metadata.db
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Python Client
|
||||
|
||||
```python
|
||||
from llama_stack import LlamaStackClient
|
||||
|
||||
client = LlamaStackClient("http://localhost:8000")
|
||||
|
||||
# Create vector store
|
||||
vector_store = client.vector_stores.create(name="documents")
|
||||
|
||||
# Upload and process file
|
||||
with open("document.pdf", "rb") as f:
|
||||
file_info = await client.files.upload(file=f, purpose="assistants")
|
||||
|
||||
# Attach to vector store
|
||||
await client.vector_stores.files.create(
|
||||
vector_store_id=vector_store.id, file_id=file_info.id
|
||||
)
|
||||
|
||||
# Search
|
||||
results = await client.vector_stores.search(
|
||||
vector_store_id=vector_store.id, query="What is the main topic?", max_num_results=5
|
||||
)
|
||||
```
|
||||
|
||||
### cURL Commands
|
||||
|
||||
```bash
|
||||
# Upload file
|
||||
curl -X POST http://localhost:8000/v1/openai/v1/files \
|
||||
-F "file=@document.pdf" \
|
||||
-F "purpose=assistants"
|
||||
|
||||
# Create vector store
|
||||
curl -X POST http://localhost:8000/v1/openai/v1/vector_stores \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name": "documents"}'
|
||||
|
||||
# Attach file to vector store
|
||||
curl -X POST http://localhost:8000/v1/openai/v1/vector_stores/{store_id}/files \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"file_id": "file-abc123"}'
|
||||
|
||||
# Search vector store
|
||||
curl -X POST http://localhost:8000/v1/openai/v1/vector_stores/{store_id}/search \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"query": "What is the main topic?", "max_num_results": 5}'
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Chunk Size Optimization
|
||||
|
||||
- **Small chunks (400-600 tokens)**: Better precision, more results
|
||||
- **Large chunks (800-1200 tokens)**: Better context, fewer results
|
||||
- **Overlap (50%)**: Maintains context between chunks
|
||||
|
||||
### Storage Efficiency
|
||||
|
||||
- **FAISS**: Fastest, but memory-limited
|
||||
- **SQLite-vec**: Good balance of performance and storage
|
||||
- **Milvus**: Scalable, production-ready
|
||||
- **Remote providers**: Managed, but network-dependent
|
||||
|
||||
### Search Performance
|
||||
|
||||
- **Vector search**: Fastest for semantic queries
|
||||
- **Hybrid search**: Best accuracy (SQLite-vec only)
|
||||
- **Filtered search**: Fast with metadata constraints
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **File Processing Failures**
|
||||
- Check file format compatibility
|
||||
- Verify file size limits
|
||||
- Review error messages in file status
|
||||
|
||||
2. **Search Performance**
|
||||
- Optimize chunk sizes for your use case
|
||||
- Use filters to narrow search scope
|
||||
- Monitor vector store metrics
|
||||
|
||||
3. **Storage Issues**
|
||||
- Check available disk space
|
||||
- Verify database permissions
|
||||
- Monitor memory usage (for in-memory providers)
|
||||
|
||||
### Monitoring
|
||||
|
||||
```python
|
||||
# Check file processing status
|
||||
file_status = await client.vector_stores.files.retrieve(
|
||||
vector_store_id=vector_store.id, file_id=file_info.id
|
||||
)
|
||||
|
||||
if file_status.status == "failed":
|
||||
print(f"Error: {file_status.last_error.message}")
|
||||
|
||||
# Monitor vector store health
|
||||
health = await client.vector_stores.health(vector_store_id=vector_store.id)
|
||||
print(f"Status: {health.status}")
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **File Organization**: Use descriptive names and organize by purpose
|
||||
2. **Chunking Strategy**: Test different sizes for your specific use case
|
||||
3. **Metadata**: Add relevant attributes for better filtering
|
||||
4. **Monitoring**: Track processing status and search performance
|
||||
5. **Cleanup**: Regularly remove unused files to manage storage
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
Planned improvements for file operations support:
|
||||
|
||||
- **Batch Processing**: Process multiple files simultaneously
|
||||
- **Advanced Chunking**: More sophisticated chunking algorithms
|
||||
- **Custom Embeddings**: Support for custom embedding models
|
||||
- **Real-time Updates**: Live file processing and indexing
|
||||
- **Multi-format Support**: Enhanced file format support
|
||||
|
||||
## Support and Resources
|
||||
|
||||
- **Documentation**: [File Operations and Vector Store Integration](../../concepts/file_operations_vector_stores.mdx)
|
||||
- **API Reference**: [Files API](files_api.md)
|
||||
- **Provider Docs**: [Vector Store Providers](../vector_io/index.md)
|
||||
- **Examples**: [Getting Started](../getting_started/index.md)
|
||||
- **Community**: [GitHub Discussions](https://github.com/meta-llama/llama-stack/discussions)
|
||||
27
docs/docs/providers/files/remote_openai.mdx
Normal file
27
docs/docs/providers/files/remote_openai.mdx
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
---
|
||||
description: "OpenAI Files API provider for managing files through OpenAI's native file storage service."
|
||||
sidebar_label: Remote - Openai
|
||||
title: remote::openai
|
||||
---
|
||||
|
||||
# remote::openai
|
||||
|
||||
## Description
|
||||
|
||||
OpenAI Files API provider for managing files through OpenAI's native file storage service.
|
||||
|
||||
## Configuration
|
||||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `api_key` | `str` | No | | OpenAI API key for authentication |
|
||||
| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
api_key: ${env.OPENAI_API_KEY}
|
||||
metadata_store:
|
||||
table_name: openai_files_metadata
|
||||
backend: sql_default
|
||||
```
|
||||
|
|
@ -14,13 +14,13 @@ AWS S3-based file storage provider for scalable cloud file management with metad
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `bucket_name` | `<class 'str'>` | No | | S3 bucket name to store files |
|
||||
| `region` | `<class 'str'>` | No | us-east-1 | AWS region where the bucket is located |
|
||||
| `bucket_name` | `str` | No | | S3 bucket name to store files |
|
||||
| `region` | `str` | No | us-east-1 | AWS region where the bucket is located |
|
||||
| `aws_access_key_id` | `str \| None` | No | | AWS access key ID (optional if using IAM roles) |
|
||||
| `aws_secret_access_key` | `str \| None` | No | | AWS secret access key (optional if using IAM roles) |
|
||||
| `endpoint_url` | `str \| None` | No | | Custom S3 endpoint URL (for MinIO, LocalStack, etc.) |
|
||||
| `auto_create_bucket` | `<class 'bool'>` | No | False | Automatically create the S3 bucket if it doesn't exist |
|
||||
| `metadata_store` | `<class 'llama_stack.core.storage.datatypes.SqlStoreReference'>` | No | | SQL store configuration for file metadata |
|
||||
| `auto_create_bucket` | `bool` | No | False | Automatically create the S3 bucket if it doesn't exist |
|
||||
| `metadata_store` | `SqlStoreReference` | No | | SQL store configuration for file metadata |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ Importantly, Llama Stack always strives to provide at least one fully inline pro
|
|||
## Provider Categories
|
||||
|
||||
- **[External Providers](external/index.mdx)** - Guide for building and using external providers
|
||||
- **[OpenAI Compatibility](../api-openai/index.mdx)** - OpenAI API compatibility layer
|
||||
- **[Inference](inference/index.mdx)** - LLM and embedding model providers
|
||||
- **[Agents](agents/index.mdx)** - Agentic system providers
|
||||
- **[DatasetIO](datasetio/index.mdx)** - Dataset and data loader providers
|
||||
|
|
@ -30,6 +31,16 @@ Importantly, Llama Stack always strives to provide at least one fully inline pro
|
|||
- **[Tool Runtime](tool_runtime/index.mdx)** - Tool and protocol providers
|
||||
- **[Files](files/index.mdx)** - File system and storage providers
|
||||
|
||||
## Other information about Providers
|
||||
- **[OpenAI Compatibility](./openai.mdx)** - OpenAI API compatibility layer
|
||||
## API Documentation
|
||||
|
||||
For comprehensive API documentation and reference:
|
||||
|
||||
- **[API Reference](../api/index.mdx)** - Complete API documentation
|
||||
- **[Experimental APIs](../api-experimental/index.mdx)** - APIs in development
|
||||
- **[Deprecated APIs](../api-deprecated/index.mdx)** - Legacy APIs being phased out
|
||||
- **[OpenAI Compatibility](../api-openai/index.mdx)** - OpenAI API compatibility guide
|
||||
|
||||
## Additional Provider Information
|
||||
|
||||
- **[OpenAI Implementation Guide](./openai.mdx)** - Code examples and implementation details for OpenAI APIs
|
||||
- **[OpenAI-Compatible Responses Limitations](./openai_responses_limitations.mdx)** - Known limitations of the Responses API in Llama Stack
|
||||
|
|
|
|||
|
|
@ -1,12 +1,13 @@
|
|||
---
|
||||
description: "Inference
|
||||
description: |
|
||||
Inference
|
||||
|
||||
Llama Stack Inference API for generating completions, chat completions, and embeddings.
|
||||
|
||||
This API provides the raw interface to the underlying models. Three kinds of models are supported:
|
||||
- LLM models: these models generate \"raw\" and \"chat\" (conversational) completions.
|
||||
- LLM models: these models generate "raw" and "chat" (conversational) completions.
|
||||
- Embedding models: these models generate embeddings to be used for semantic search.
|
||||
- Rerank models: these models reorder the documents based on their relevance to a query."
|
||||
- Rerank models: these models reorder the documents based on their relevance to a query.
|
||||
sidebar_label: Inference
|
||||
title: Inference
|
||||
---
|
||||
|
|
|
|||
|
|
@ -16,12 +16,12 @@ Meta's reference implementation of inference with support for various model form
|
|||
|-------|------|----------|---------|-------------|
|
||||
| `model` | `str \| None` | No | | |
|
||||
| `torch_seed` | `int \| None` | No | | |
|
||||
| `max_seq_len` | `<class 'int'>` | No | 4096 | |
|
||||
| `max_batch_size` | `<class 'int'>` | No | 1 | |
|
||||
| `max_seq_len` | `int` | No | 4096 | |
|
||||
| `max_batch_size` | `int` | No | 1 | |
|
||||
| `model_parallel_size` | `int \| None` | No | | |
|
||||
| `create_distributed_process_group` | `<class 'bool'>` | No | True | |
|
||||
| `create_distributed_process_group` | `bool` | No | True | |
|
||||
| `checkpoint_dir` | `str \| None` | No | | |
|
||||
| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig, annotation=NoneType, required=True, discriminator='type'` | No | | |
|
||||
| `quantization` | `Bf16QuantizationConfig \| Fp8QuantizationConfig \| Int4QuantizationConfig \| None` | No | | |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ Anthropic inference provider for accessing Claude models and Anthropic's AI serv
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -21,10 +21,10 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `api_base` | `<class 'pydantic.networks.HttpUrl'>` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com) |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `HttpUrl \| None` | No | | Azure API base for Azure (e.g., https://your-resource-name.openai.azure.com/openai/v1) |
|
||||
| `api_version` | `str \| None` | No | | Azure API version for Azure (e.g., 2024-12-01-preview) |
|
||||
| `api_type` | `str \| None` | No | azure | Azure API type for Azure (e.g., azure) |
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ https://learn.microsoft.com/en-us/azure/ai-foundry/openai/overview
|
|||
|
||||
```yaml
|
||||
api_key: ${env.AZURE_API_KEY:=}
|
||||
api_base: ${env.AZURE_API_BASE:=}
|
||||
base_url: ${env.AZURE_API_BASE:=}
|
||||
api_version: ${env.AZURE_API_VERSION:=}
|
||||
api_type: ${env.AZURE_API_TYPE:=}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
description: "AWS Bedrock inference provider for accessing various AI models through AWS's managed service."
|
||||
description: "AWS Bedrock inference provider using OpenAI compatible endpoint."
|
||||
sidebar_label: Remote - Bedrock
|
||||
title: remote::bedrock
|
||||
---
|
||||
|
|
@ -8,27 +8,20 @@ title: remote::bedrock
|
|||
|
||||
## Description
|
||||
|
||||
AWS Bedrock inference provider for accessing various AI models through AWS's managed service.
|
||||
AWS Bedrock inference provider using OpenAI compatible endpoint.
|
||||
|
||||
## Configuration
|
||||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `aws_access_key_id` | `str \| None` | No | | The AWS access key to use. Default use environment variable: AWS_ACCESS_KEY_ID |
|
||||
| `aws_secret_access_key` | `str \| None` | No | | The AWS secret access key to use. Default use environment variable: AWS_SECRET_ACCESS_KEY |
|
||||
| `aws_session_token` | `str \| None` | No | | The AWS session token to use. Default use environment variable: AWS_SESSION_TOKEN |
|
||||
| `region_name` | `str \| None` | No | | The default AWS Region to use, for example, us-west-1 or us-west-2.Default use environment variable: AWS_DEFAULT_REGION |
|
||||
| `profile_name` | `str \| None` | No | | The profile name that contains credentials to use.Default use environment variable: AWS_PROFILE |
|
||||
| `total_max_attempts` | `int \| None` | No | | An integer representing the maximum number of attempts that will be made for a single request, including the initial attempt. Default use environment variable: AWS_MAX_ATTEMPTS |
|
||||
| `retry_mode` | `str \| None` | No | | A string representing the type of retries Boto3 will perform.Default use environment variable: AWS_RETRY_MODE |
|
||||
| `connect_timeout` | `float \| None` | No | 60.0 | The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds. |
|
||||
| `read_timeout` | `float \| None` | No | 60.0 | The time in seconds till a timeout exception is thrown when attempting to read from a connection.The default is 60 seconds. |
|
||||
| `session_ttl` | `int \| None` | No | 3600 | The time in seconds till a session expires. The default is 3600 seconds (1 hour). |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `region_name` | `str` | No | us-east-2 | AWS Region for the Bedrock Runtime endpoint |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
{}
|
||||
api_key: ${env.AWS_BEARER_TOKEN_BEDROCK:=}
|
||||
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ Cerebras inference provider for running models on Cerebras Cloud platform.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `<class 'str'>` | No | https://api.cerebras.ai | Base URL for the Cerebras API |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `HttpUrl \| None` | No | https://api.cerebras.ai/v1 | Base URL for the Cerebras API |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
base_url: https://api.cerebras.ai
|
||||
base_url: https://api.cerebras.ai/v1
|
||||
api_key: ${env.CEREBRAS_API_KEY:=}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ Databricks inference provider for running models on Databricks' unified analytic
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | The Databricks API token |
|
||||
| `url` | `str \| None` | No | | The URL for the Databricks model serving endpoint |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_token` | `SecretStr \| None` | No | | The Databricks API token |
|
||||
| `base_url` | `HttpUrl \| None` | No | | The URL for the Databricks model serving endpoint (should include /serving-endpoints path) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
url: ${env.DATABRICKS_HOST:=}
|
||||
base_url: ${env.DATABRICKS_HOST:=}
|
||||
api_token: ${env.DATABRICKS_TOKEN:=}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ Fireworks AI inference provider for Llama models and other AI models on the Fire
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `url` | `<class 'str'>` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `HttpUrl \| None` | No | https://api.fireworks.ai/inference/v1 | The URL for the Fireworks server |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
url: https://api.fireworks.ai/inference/v1
|
||||
base_url: https://api.fireworks.ai/inference/v1
|
||||
api_key: ${env.FIREWORKS_API_KEY:=}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -14,9 +14,9 @@ Google Gemini inference provider for accessing Gemini models and Google's AI ser
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ Groq inference provider for ultra-fast inference using Groq's LPU technology.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `url` | `<class 'str'>` | No | https://api.groq.com | The URL for the Groq AI server |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `HttpUrl \| None` | No | https://api.groq.com/openai/v1 | The URL for the Groq AI server |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
url: https://api.groq.com
|
||||
base_url: https://api.groq.com/openai/v1
|
||||
api_key: ${env.GROQ_API_KEY:=}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@ HuggingFace Inference Endpoints provider for dedicated model serving.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `endpoint_name` | `<class 'str'>` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. |
|
||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
||||
| `endpoint_name` | `str` | No | | The name of the Hugging Face Inference Endpoint in the format of '{namespace}/{endpoint_name}' (e.g. 'my-cool-org/meta-llama-3-1-8b-instruct-rce'). Namespace is optional and will default to the user account if not provided. |
|
||||
| `api_token` | `SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,8 +14,8 @@ HuggingFace Inference API serverless provider for on-demand model inference.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `huggingface_repo` | `<class 'str'>` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') |
|
||||
| `api_token` | `pydantic.types.SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
||||
| `huggingface_repo` | `str` | No | | The model ID of the model on the Hugging Face Hub (e.g. 'meta-llama/Meta-Llama-3.1-70B-Instruct') |
|
||||
| `api_token` | `SecretStr \| None` | No | | Your Hugging Face user access token (will default to locally saved token if not provided) |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
|
|
|
|||
|
|
@ -14,14 +14,14 @@ Llama OpenAI-compatible provider for using Llama models with OpenAI API format.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `openai_compat_api_base` | `<class 'str'>` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `HttpUrl \| None` | No | https://api.llama.com/compat/v1/ | The URL for the Llama API server |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
openai_compat_api_base: https://api.llama.com/compat/v1/
|
||||
base_url: https://api.llama.com/compat/v1/
|
||||
api_key: ${env.LLAMA_API_KEY}
|
||||
```
|
||||
|
|
|
|||
|
|
@ -14,17 +14,16 @@ NVIDIA inference provider for accessing NVIDIA NIM models and AI services.
|
|||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `allowed_models` | `list[str \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `<class 'bool'>` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `pydantic.types.SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `url` | `<class 'str'>` | No | https://integrate.api.nvidia.com | A base url for accessing the NVIDIA NIM |
|
||||
| `timeout` | `<class 'int'>` | No | 60 | Timeout for the HTTP requests |
|
||||
| `append_api_version` | `<class 'bool'>` | No | True | When set to false, the API version will not be appended to the base_url. By default, it is true. |
|
||||
| `allowed_models` | `list[str] \| None` | No | | List of models that should be registered with the model registry. If None, all models are allowed. |
|
||||
| `refresh_models` | `bool` | No | False | Whether to refresh models periodically from the provider |
|
||||
| `api_key` | `SecretStr \| None` | No | | Authentication credential for the provider |
|
||||
| `base_url` | `HttpUrl \| None` | No | https://integrate.api.nvidia.com/v1 | A base url for accessing the NVIDIA NIM |
|
||||
| `timeout` | `int` | No | 60 | Timeout for the HTTP requests |
|
||||
| `rerank_model_to_url` | `dict[str, str]` | No | `{'nv-rerank-qa-mistral-4b:1': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking', 'nvidia/nv-rerankqa-mistral-4b-v3': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/nv-rerankqa-mistral-4b-v3/reranking', 'nvidia/llama-3.2-nv-rerankqa-1b-v2': 'https://ai.api.nvidia.com/v1/retrieval/nvidia/llama-3_2-nv-rerankqa-1b-v2/reranking'}` | Mapping of rerank model identifiers to their API endpoints. |
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
```yaml
|
||||
url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
|
||||
base_url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com/v1}
|
||||
api_key: ${env.NVIDIA_API_KEY:=}
|
||||
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
|
||||
```
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue