mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-05 18:22:41 +00:00
Merge branch 'meta-llama:main' into add-unit-tests-and-fix-cli
This commit is contained in:
commit
696bcf6051
459 changed files with 39114 additions and 10751 deletions
|
@ -1,9 +0,0 @@
|
||||||
---
|
|
||||||
description: General rules always applicable across the project
|
|
||||||
globs:
|
|
||||||
alwaysApply: true
|
|
||||||
---
|
|
||||||
# Style
|
|
||||||
|
|
||||||
- Comments must add value to code. Don't write filler comments explaining what you are doing next; they just add noise.
|
|
||||||
- Add a comment to clarify surprising behavior which would not be obvious. Good variable naming and clear code organization is more important.
|
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -2,4 +2,4 @@
|
||||||
|
|
||||||
# These owners will be the default owners for everything in
|
# These owners will be the default owners for everything in
|
||||||
# the repo. Unless a later match takes precedence,
|
# the repo. Unless a later match takes precedence,
|
||||||
* @ashwinb @yanxi0830 @hardikjshah @dltn @raghotham @dineshyv @vladimirivic @sixianyi0721 @ehhuang @terrytangyuan @SLR722
|
* @ashwinb @yanxi0830 @hardikjshah @dltn @raghotham @dineshyv @vladimirivic @sixianyi0721 @ehhuang @terrytangyuan @SLR722 @leseb
|
||||||
|
|
2
.github/TRIAGERS.md
vendored
Normal file
2
.github/TRIAGERS.md
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
# This file documents Triage members in the Llama Stack community
|
||||||
|
@franciscojavierarceo @leseb
|
17
.github/dependabot.yml
vendored
17
.github/dependabot.yml
vendored
|
@ -5,4 +5,19 @@ updates:
|
||||||
- package-ecosystem: "github-actions"
|
- package-ecosystem: "github-actions"
|
||||||
directory: "/" # Will use the default workflow location of `.github/workflows`
|
directory: "/" # Will use the default workflow location of `.github/workflows`
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "weekly"
|
||||||
|
day: "saturday"
|
||||||
|
commit-message:
|
||||||
|
prefix: chore(github-deps)
|
||||||
|
- package-ecosystem: "uv"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
day: "saturday"
|
||||||
|
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
|
||||||
|
open-pull-requests-limit: 0
|
||||||
|
labels:
|
||||||
|
- type/dependencies
|
||||||
|
- python
|
||||||
|
commit-message:
|
||||||
|
prefix: chore(python-deps)
|
||||||
|
|
29
.github/workflows/changelog.yml
vendored
Normal file
29
.github/workflows/changelog.yml
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
name: Update Changelog
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published, unpublished, created, edited, deleted, released]
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate_changelog:
|
||||||
|
name: Generate changelog
|
||||||
|
permissions:
|
||||||
|
contents: write # for peter-evans/create-pull-request to create branch
|
||||||
|
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: main
|
||||||
|
fetch-depth: 0
|
||||||
|
- run: |
|
||||||
|
python ./scripts/gen-changelog.py
|
||||||
|
- uses: peter-evans/create-pull-request@v7
|
||||||
|
with:
|
||||||
|
title: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
|
||||||
|
commit-message: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
|
||||||
|
branch: create-pull-request/changelog
|
||||||
|
signoff: true
|
114
.github/workflows/integration-tests.yml
vendored
Normal file
114
.github/workflows/integration-tests.yml
vendored
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
name: Integration Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ main ]
|
||||||
|
paths:
|
||||||
|
- 'distributions/**'
|
||||||
|
- 'llama_stack/**'
|
||||||
|
- 'tests/integration/**'
|
||||||
|
- 'uv.lock'
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- '.github/workflows/integration-tests.yml' # This workflow
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-matrix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
# Listing tests manually since some of them currently fail
|
||||||
|
# TODO: generate matrix list from tests/integration when fixed
|
||||||
|
test-type: [agents, inference, datasets, inspect, scoring, post_training, providers]
|
||||||
|
client-type: [library, http]
|
||||||
|
fail-fast: false # we want to run all tests regardless of failure
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
|
||||||
|
- name: Install Ollama
|
||||||
|
run: |
|
||||||
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
|
|
||||||
|
- name: Pull Ollama image
|
||||||
|
run: |
|
||||||
|
ollama pull llama3.2:3b-instruct-fp16
|
||||||
|
|
||||||
|
- name: Start Ollama in background
|
||||||
|
run: |
|
||||||
|
nohup ollama run llama3.2:3b-instruct-fp16 > ollama.log 2>&1 &
|
||||||
|
|
||||||
|
- name: Set Up Environment and Install Dependencies
|
||||||
|
run: |
|
||||||
|
uv sync --extra dev --extra test
|
||||||
|
uv pip install ollama faiss-cpu
|
||||||
|
# always test against the latest version of the client
|
||||||
|
# TODO: this is not necessarily a good idea. we need to test against both published and latest
|
||||||
|
# to find out backwards compatibility issues.
|
||||||
|
uv pip install git+https://github.com/meta-llama/llama-stack-client-python.git@main
|
||||||
|
uv pip install -e .
|
||||||
|
llama stack build --template ollama --image-type venv
|
||||||
|
|
||||||
|
- name: Wait for Ollama to start
|
||||||
|
run: |
|
||||||
|
echo "Waiting for Ollama..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if curl -s http://localhost:11434 | grep -q "Ollama is running"; then
|
||||||
|
echo "Ollama is running!"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo "Ollama failed to start"
|
||||||
|
ollama ps
|
||||||
|
ollama.log
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
- name: Start Llama Stack server in background
|
||||||
|
if: matrix.client-type == 'http'
|
||||||
|
env:
|
||||||
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
||||||
|
run: |
|
||||||
|
source .venv/bin/activate
|
||||||
|
nohup uv run llama stack run ./llama_stack/templates/ollama/run.yaml --image-type venv > server.log 2>&1 &
|
||||||
|
|
||||||
|
- name: Wait for Llama Stack server to be ready
|
||||||
|
if: matrix.client-type == 'http'
|
||||||
|
run: |
|
||||||
|
echo "Waiting for Llama Stack server..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if curl -s http://localhost:8321/v1/health | grep -q "OK"; then
|
||||||
|
echo "Llama Stack server is up!"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo "Llama Stack server failed to start"
|
||||||
|
cat server.log
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
- name: Run Integration Tests
|
||||||
|
env:
|
||||||
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
||||||
|
run: |
|
||||||
|
if [ "${{ matrix.client-type }}" == "library" ]; then
|
||||||
|
stack_config="ollama"
|
||||||
|
else
|
||||||
|
stack_config="http://localhost:8321"
|
||||||
|
fi
|
||||||
|
uv run pytest -v tests/integration/${{ matrix.test-type }} --stack-config=${stack_config} \
|
||||||
|
-k "not(builtin_tool or safety_with_image or code_interpreter or test_rag)" \
|
||||||
|
--text-model="meta-llama/Llama-3.2-3B-Instruct" \
|
||||||
|
--embedding-model=all-MiniLM-L6-v2
|
4
.github/workflows/pre-commit.yml
vendored
4
.github/workflows/pre-commit.yml
vendored
|
@ -5,6 +5,10 @@ on:
|
||||||
push:
|
push:
|
||||||
branches: [main]
|
branches: [main]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pre-commit:
|
pre-commit:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
83
.github/workflows/providers-build.yml
vendored
Normal file
83
.github/workflows/providers-build.yml
vendored
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
name: Test Llama Stack Build
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'llama_stack/cli/stack/build.py'
|
||||||
|
- 'llama_stack/cli/stack/_build.py'
|
||||||
|
- 'llama_stack/distribution/build.*'
|
||||||
|
- 'llama_stack/distribution/*.sh'
|
||||||
|
- '.github/workflows/providers-build.yml'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- 'llama_stack/cli/stack/build.py'
|
||||||
|
- 'llama_stack/cli/stack/_build.py'
|
||||||
|
- 'llama_stack/distribution/build.*'
|
||||||
|
- 'llama_stack/distribution/*.sh'
|
||||||
|
- '.github/workflows/providers-build.yml'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate-matrix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
templates: ${{ steps.set-matrix.outputs.templates }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Generate Template List
|
||||||
|
id: set-matrix
|
||||||
|
run: |
|
||||||
|
templates=$(ls llama_stack/templates/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
||||||
|
echo "templates=$templates" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: generate-matrix
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
template: ${{ fromJson(needs.generate-matrix.outputs.templates) }}
|
||||||
|
image-type: [venv, container]
|
||||||
|
fail-fast: false # We want to run all jobs even if some fail
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
|
||||||
|
- name: Install uv
|
||||||
|
uses: astral-sh/setup-uv@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
|
||||||
|
- name: Install LlamaStack
|
||||||
|
run: |
|
||||||
|
uv venv
|
||||||
|
source .venv/bin/activate
|
||||||
|
uv pip install -e .
|
||||||
|
|
||||||
|
- name: Print build dependencies
|
||||||
|
run: |
|
||||||
|
uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
|
||||||
|
|
||||||
|
- name: Run Llama Stack Build
|
||||||
|
run: |
|
||||||
|
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
||||||
|
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
||||||
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test
|
||||||
|
|
||||||
|
- name: Print dependencies in the image
|
||||||
|
if: matrix.image-type == 'venv'
|
||||||
|
run: |
|
||||||
|
source test/bin/activate
|
||||||
|
uv pip list
|
4
.github/workflows/semantic-pr.yml
vendored
4
.github/workflows/semantic-pr.yml
vendored
|
@ -8,6 +8,10 @@ on:
|
||||||
- reopened
|
- reopened
|
||||||
- synchronize
|
- synchronize
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
|
|
45
.github/workflows/stale_bot.yml
vendored
Normal file
45
.github/workflows/stale_bot.yml
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
name: Close stale issues and PRs
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 0 * * *' # every day at midnight
|
||||||
|
|
||||||
|
env:
|
||||||
|
LC_ALL: en_US.UTF-8
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Stale Action
|
||||||
|
uses: actions/stale@v9
|
||||||
|
with:
|
||||||
|
stale-issue-label: 'stale'
|
||||||
|
stale-issue-message: >
|
||||||
|
This issue has been automatically marked as stale because it has not had activity within 60 days.
|
||||||
|
It will be automatically closed if no further activity occurs within 30 days.
|
||||||
|
close-issue-message: >
|
||||||
|
This issue has been automatically closed due to inactivity.
|
||||||
|
Please feel free to reopen if you feel it is still relevant!
|
||||||
|
days-before-issue-stale: 60
|
||||||
|
days-before-issue-close: 30
|
||||||
|
stale-pr-label: 'stale'
|
||||||
|
stale-pr-message: >
|
||||||
|
This pull request has been automatically marked as stale because it has not had activity within 60 days.
|
||||||
|
It will be automatically closed if no further activity occurs within 30 days.
|
||||||
|
close-pr-message: >
|
||||||
|
This pull request has been automatically closed due to inactivity.
|
||||||
|
Please feel free to reopen if you intend to continue working on it!
|
||||||
|
days-before-pr-stale: 60
|
||||||
|
days-before-pr-close: 30
|
||||||
|
operations-per-run: 300
|
35
.github/workflows/unit-tests.yml
vendored
35
.github/workflows/unit-tests.yml
vendored
|
@ -1,36 +1,59 @@
|
||||||
name: Unit Tests
|
name: Unit Tests
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ main ]
|
branches: [ main ]
|
||||||
|
paths:
|
||||||
|
- 'distributions/**'
|
||||||
|
- 'llama_stack/**'
|
||||||
|
- 'tests/unit/**'
|
||||||
|
- 'uv.lock'
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- '.github/workflows/unit-tests.yml' # This workflow
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
unit-tests:
|
unit-tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
python:
|
||||||
|
- "3.10"
|
||||||
|
- "3.11"
|
||||||
|
- "3.12"
|
||||||
|
- "3.13"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python ${{ matrix.python }}
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10.16'
|
python-version: ${{ matrix.python }}
|
||||||
|
|
||||||
- uses: astral-sh/setup-uv@v5
|
- uses: astral-sh/setup-uv@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10.16'
|
python-version: ${{ matrix.python }}
|
||||||
enable-cache: false
|
enable-cache: false
|
||||||
|
|
||||||
- name: Run unit tests
|
- name: Run unit tests
|
||||||
run: |
|
run: |
|
||||||
uv run -p 3.10.16 --with-editable . --with-editable ".[dev]" --with-editable ".[unit]" pytest --cov=llama_stack -s -v tests/unit/ --junitxml=pytest-report.xml
|
PYTHON_VERSION=${{ matrix.python }} ./scripts/unit-tests.sh --cov=llama_stack --junitxml=pytest-report-${{ matrix.python }}.xml --cov-report=html:htmlcov-${{ matrix.python }}
|
||||||
|
|
||||||
- name: Upload test results
|
- name: Upload test results
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: test-results
|
name: test-results-${{ matrix.python }}
|
||||||
path: |
|
path: |
|
||||||
.pytest_cache/
|
.pytest_cache/
|
||||||
pytest-report.xml
|
pytest-report-${{ matrix.python }}.xml
|
||||||
|
htmlcov-${{ matrix.python }}/
|
||||||
retention-days: 7
|
retention-days: 7
|
||||||
|
|
4
.github/workflows/update-readthedocs.yml
vendored
4
.github/workflows/update-readthedocs.yml
vendored
|
@ -22,6 +22,10 @@ on:
|
||||||
- 'pyproject.toml'
|
- 'pyproject.toml'
|
||||||
- '.github/workflows/update-readthedocs.yml'
|
- '.github/workflows/update-readthedocs.yml'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
update-readthedocs:
|
update-readthedocs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -22,3 +22,4 @@ pyrightconfig.json
|
||||||
venv/
|
venv/
|
||||||
pytest-report.xml
|
pytest-report.xml
|
||||||
.coverage
|
.coverage
|
||||||
|
.python-version
|
||||||
|
|
|
@ -8,6 +8,7 @@ repos:
|
||||||
rev: v5.0.0 # Latest stable version
|
rev: v5.0.0 # Latest stable version
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-merge-conflict
|
- id: check-merge-conflict
|
||||||
|
args: ['--assume-in-merge']
|
||||||
- id: trailing-whitespace
|
- id: trailing-whitespace
|
||||||
exclude: '\.py$' # Exclude Python files as Ruff already handles them
|
exclude: '\.py$' # Exclude Python files as Ruff already handles them
|
||||||
- id: check-added-large-files
|
- id: check-added-large-files
|
||||||
|
@ -76,12 +77,24 @@ repos:
|
||||||
name: Distribution Template Codegen
|
name: Distribution Template Codegen
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- uv==0.6.0
|
- uv==0.6.0
|
||||||
entry: uv run --extra codegen python -m llama_stack.scripts.distro_codegen
|
entry: uv run --extra codegen ./scripts/distro_codegen.py
|
||||||
language: python
|
language: python
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
require_serial: true
|
require_serial: true
|
||||||
files: ^llama_stack/templates/.*$|^llama_stack/providers/.*/inference/.*/models\.py$
|
files: ^llama_stack/templates/.*$|^llama_stack/providers/.*/inference/.*/models\.py$
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: openapi-codegen
|
||||||
|
name: API Spec Codegen
|
||||||
|
additional_dependencies:
|
||||||
|
- uv==0.6.2
|
||||||
|
entry: sh -c 'uv run --with ".[dev]" ./docs/openapi_generator/run_openapi_generator.sh > /dev/null'
|
||||||
|
language: python
|
||||||
|
pass_filenames: false
|
||||||
|
require_serial: true
|
||||||
|
files: ^llama_stack/apis/|^docs/openapi_generator/
|
||||||
|
|
||||||
ci:
|
ci:
|
||||||
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks
|
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks
|
||||||
autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate
|
autoupdate_commit_msg: ⬆ [pre-commit.ci] pre-commit autoupdate
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
3.10
|
|
71
CHANGELOG.md
71
CHANGELOG.md
|
@ -1,5 +1,76 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
# v0.1.8
|
||||||
|
Published on: 2025-03-24T01:28:50Z
|
||||||
|
|
||||||
|
# v0.1.8 Release Notes
|
||||||
|
|
||||||
|
### Build and Test Agents
|
||||||
|
* Safety: Integrated NVIDIA as a safety provider.
|
||||||
|
* VectorDB: Added Qdrant as an inline provider.
|
||||||
|
* Agents: Added support for multiple tool groups in agents.
|
||||||
|
* Agents: Simplified imports for Agents in client package
|
||||||
|
|
||||||
|
|
||||||
|
### Agent Evals and Model Customization
|
||||||
|
* Introduced DocVQA and IfEval benchmarks.
|
||||||
|
|
||||||
|
### Deploying and Monitoring Agents
|
||||||
|
* Introduced a Containerfile and image workflow for the Playground.
|
||||||
|
* Implemented support for Bearer (API Key) authentication.
|
||||||
|
* Added attribute-based access control for resources.
|
||||||
|
* Fixes on docker deployments: use --pull always and standardized the default port to 8321
|
||||||
|
* Deprecated: /v1/inspect/providers use /v1/providers/ instead
|
||||||
|
|
||||||
|
### Better Engineering
|
||||||
|
* Consolidated scripts under the ./scripts directory.
|
||||||
|
* Addressed mypy violations in various modules.
|
||||||
|
* Added Dependabot scans for Python dependencies.
|
||||||
|
* Implemented a scheduled workflow to update the changelog automatically.
|
||||||
|
* Enforced concurrency to reduce CI loads.
|
||||||
|
|
||||||
|
|
||||||
|
### New Contributors
|
||||||
|
* @cmodi-meta made their first contribution in https://github.com/meta-llama/llama-stack/pull/1650
|
||||||
|
* @jeffmaury made their first contribution in https://github.com/meta-llama/llama-stack/pull/1671
|
||||||
|
* @derekhiggins made their first contribution in https://github.com/meta-llama/llama-stack/pull/1698
|
||||||
|
* @Bobbins228 made their first contribution in https://github.com/meta-llama/llama-stack/pull/1745
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.7...v0.1.8
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# v0.1.7
|
||||||
|
Published on: 2025-03-14T22:30:51Z
|
||||||
|
|
||||||
|
## 0.1.7 Release Notes
|
||||||
|
|
||||||
|
### Build and Test Agents
|
||||||
|
* Inference: ImageType is now refactored to LlamaStackImageType
|
||||||
|
* Inference: Added tests to measure TTFT
|
||||||
|
* Inference: Bring back usage metrics
|
||||||
|
* Agents: Added endpoint for get agent, list agents and list sessions
|
||||||
|
* Agents: Automated conversion of type hints in client tool for lite llm format
|
||||||
|
* Agents: Deprecated ToolResponseMessage in agent.resume API
|
||||||
|
* Added Provider API for listing and inspecting provider info
|
||||||
|
|
||||||
|
### Agent Evals and Model Customization
|
||||||
|
* Eval: Added new eval benchmarks Math 500 and BFCL v3
|
||||||
|
* Deploy and Monitoring of Agents
|
||||||
|
* Telemetry: Fix tracing to work across coroutines
|
||||||
|
|
||||||
|
### Better Engineering
|
||||||
|
* Display code coverage for unit tests
|
||||||
|
* Updated call sites (inference, tool calls, agents) to move to async non blocking calls
|
||||||
|
* Unit tests also run on Python 3.11, 3.12, and 3.13
|
||||||
|
* Added ollama inference to Integration tests CI
|
||||||
|
* Improved documentation across examples, testing, CLI, updated providers table )
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
# v0.1.6
|
# v0.1.6
|
||||||
Published on: 2025-03-08T04:35:08Z
|
Published on: 2025-03-08T04:35:08Z
|
||||||
|
|
||||||
|
|
|
@ -61,6 +61,7 @@ outlined on that page and do not file a public issue.
|
||||||
|
|
||||||
We use [uv](https://github.com/astral-sh/uv) to manage python dependencies and virtual environments.
|
We use [uv](https://github.com/astral-sh/uv) to manage python dependencies and virtual environments.
|
||||||
You can install `uv` by following this [guide](https://docs.astral.sh/uv/getting-started/installation/).
|
You can install `uv` by following this [guide](https://docs.astral.sh/uv/getting-started/installation/).
|
||||||
|
|
||||||
You can install the dependencies by running:
|
You can install the dependencies by running:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -70,17 +71,24 @@ uv pip install -e .
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> You can pin a specific version of Python to use for `uv` by adding a `.python-version` file in the root project directory.
|
||||||
|
> Otherwise, `uv` will automatically select a Python version according to the `requires-python` section of the `pyproject.toml`.
|
||||||
|
> For more info, see the [uv docs around Python versions](https://docs.astral.sh/uv/concepts/python-versions/).
|
||||||
|
|
||||||
Note that you can create a dotenv file `.env` that includes necessary environment variables:
|
Note that you can create a dotenv file `.env` that includes necessary environment variables:
|
||||||
```
|
```
|
||||||
LLAMA_STACK_BASE_URL=http://localhost:8321
|
LLAMA_STACK_BASE_URL=http://localhost:8321
|
||||||
LLAMA_STACK_CLIENT_LOG=debug
|
LLAMA_STACK_CLIENT_LOG=debug
|
||||||
LLAMA_STACK_PORT=8321
|
LLAMA_STACK_PORT=8321
|
||||||
LLAMA_STACK_CONFIG=
|
LLAMA_STACK_CONFIG=<provider-name>
|
||||||
|
TAVILY_SEARCH_API_KEY=
|
||||||
|
BRAVE_SEARCH_API_KEY=
|
||||||
```
|
```
|
||||||
|
|
||||||
And then use this dotenv file when running client SDK tests via the following:
|
And then use this dotenv file when running client SDK tests via the following:
|
||||||
```bash
|
```bash
|
||||||
uv run --env-file .env -- pytest -v tests/api/inference/test_text_inference.py
|
uv run --env-file .env -- pytest -v tests/integration/inference/test_text_inference.py --text-model=meta-llama/Llama-3.1-8B-Instruct
|
||||||
```
|
```
|
||||||
|
|
||||||
## Pre-commit Hooks
|
## Pre-commit Hooks
|
||||||
|
@ -102,6 +110,26 @@ uv run pre-commit run --all-files
|
||||||
> [!CAUTION]
|
> [!CAUTION]
|
||||||
> Before pushing your changes, make sure that the pre-commit hooks have passed successfully.
|
> Before pushing your changes, make sure that the pre-commit hooks have passed successfully.
|
||||||
|
|
||||||
|
## Running unit tests
|
||||||
|
|
||||||
|
You can run the unit tests by running:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
source .venv/bin/activate
|
||||||
|
./scripts/unit-tests.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
If you'd like to run for a non-default version of Python (currently 3.10), pass `PYTHON_VERSION` variable as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
source .venv/bin/activate
|
||||||
|
PYTHON_VERSION=3.13 ./scripts/unit-tests.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running integration tests
|
||||||
|
|
||||||
|
You can run integration tests following the instructions [here](tests/integration/README.md).
|
||||||
|
|
||||||
## Adding a new dependency to the project
|
## Adding a new dependency to the project
|
||||||
|
|
||||||
To add a new dependency to the project, you can use the `uv` command. For example, to add `foo` to the project, you can run:
|
To add a new dependency to the project, you can use the `uv` command. For example, to add `foo` to the project, you can run:
|
||||||
|
@ -113,9 +141,11 @@ uv sync
|
||||||
|
|
||||||
## Coding Style
|
## Coding Style
|
||||||
|
|
||||||
|
* Comments should provide meaningful insights into the code. Avoid filler comments that simply describe the next step, as they create unnecessary clutter, same goes for docstrings.
|
||||||
|
* Prefer comments to clarify surprising behavior and/or relationships between parts of the code rather than explain what the next line of code does.
|
||||||
|
* Catching exceptions, prefer using a specific exception type rather than a broad catch-all like `Exception`.
|
||||||
|
* Error messages should be prefixed with "Failed to ..."
|
||||||
* 4 spaces for indentation rather than tabs
|
* 4 spaces for indentation rather than tabs
|
||||||
* 80 character line length
|
|
||||||
* ...
|
|
||||||
|
|
||||||
## Common Tasks
|
## Common Tasks
|
||||||
|
|
||||||
|
@ -137,14 +167,14 @@ LLAMA_STACK_DIR=$(pwd) LLAMA_STACK_CLIENT_DIR=../llama-stack-client-python llama
|
||||||
|
|
||||||
### Updating Provider Configurations
|
### Updating Provider Configurations
|
||||||
|
|
||||||
If you have made changes to a provider's configuration in any form (introducing a new config key, or changing models, etc.), you should run `python llama_stack/scripts/distro_codegen.py` to re-generate various YAML files as well as the documentation. You should not change `docs/source/.../distributions/` files manually as they are auto-generated.
|
If you have made changes to a provider's configuration in any form (introducing a new config key, or changing models, etc.), you should run `./scripts/distro_codegen.py` to re-generate various YAML files as well as the documentation. You should not change `docs/source/.../distributions/` files manually as they are auto-generated.
|
||||||
|
|
||||||
### Building the Documentation
|
### Building the Documentation
|
||||||
|
|
||||||
If you are making changes to the documentation at [https://llama-stack.readthedocs.io/en/latest/](https://llama-stack.readthedocs.io/en/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme.
|
If you are making changes to the documentation at [https://llama-stack.readthedocs.io/en/latest/](https://llama-stack.readthedocs.io/en/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd llama-stack/docs
|
cd docs
|
||||||
uv sync --extra docs
|
uv sync --extra docs
|
||||||
|
|
||||||
# This rebuilds the documentation pages.
|
# This rebuilds the documentation pages.
|
||||||
|
@ -159,8 +189,7 @@ uv run sphinx-autobuild source build/html --write-all
|
||||||
If you modify or add new API endpoints, update the API documentation accordingly. You can do this by running the following command:
|
If you modify or add new API endpoints, update the API documentation accordingly. You can do this by running the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv sync --extra dev
|
uv run --with ".[dev]" ./docs/openapi_generator/run_openapi_generator.sh
|
||||||
uv run ./docs/openapi_generator/run_openapi_generator.sh
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing.
|
The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
include pyproject.toml
|
include pyproject.toml
|
||||||
include distributions/dependencies.json
|
include llama_stack/templates/dependencies.json
|
||||||
include llama_stack/models/llama/llama3/tokenizer.model
|
include llama_stack/models/llama/llama3/tokenizer.model
|
||||||
include llama_stack/distribution/*.sh
|
include llama_stack/distribution/*.sh
|
||||||
include llama_stack/cli/scripts/*.sh
|
include llama_stack/cli/scripts/*.sh
|
||||||
|
|
26
README.md
26
README.md
|
@ -4,6 +4,8 @@
|
||||||
[](https://pypi.org/project/llama-stack/)
|
[](https://pypi.org/project/llama-stack/)
|
||||||
[](https://github.com/meta-llama/llama-stack/blob/main/LICENSE)
|
[](https://github.com/meta-llama/llama-stack/blob/main/LICENSE)
|
||||||
[](https://discord.gg/llama-stack)
|
[](https://discord.gg/llama-stack)
|
||||||
|
[](https://github.com/meta-llama/llama-stack/actions/workflows/unit-tests.yml?query=branch%3Amain)
|
||||||
|
[](https://github.com/meta-llama/llama-stack/actions/workflows/integration-tests.yml?query=branch%3Amain)
|
||||||
|
|
||||||
[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb)
|
[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb)
|
||||||
|
|
||||||
|
@ -50,6 +52,10 @@ Here is a list of the various API providers and available distributions that can
|
||||||
| PG Vector | Single Node | | | ✅ | | |
|
| PG Vector | Single Node | | | ✅ | | |
|
||||||
| PyTorch ExecuTorch | On-device iOS | ✅ | ✅ | | | |
|
| PyTorch ExecuTorch | On-device iOS | ✅ | ✅ | | | |
|
||||||
| vLLM | Hosted and Single Node | | ✅ | | | |
|
| vLLM | Hosted and Single Node | | ✅ | | | |
|
||||||
|
| OpenAI | Hosted | | ✅ | | | |
|
||||||
|
| Anthropic | Hosted | | ✅ | | | |
|
||||||
|
| Gemini | Hosted | | ✅ | | | |
|
||||||
|
|
||||||
|
|
||||||
### Distributions
|
### Distributions
|
||||||
|
|
||||||
|
@ -67,26 +73,6 @@ A Llama Stack Distribution (or "distro") is a pre-configured bundle of provider
|
||||||
| Fireworks | [llamastack/distribution-fireworks](https://hub.docker.com/repository/docker/llamastack/distribution-fireworks/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/fireworks.html) |
|
| Fireworks | [llamastack/distribution-fireworks](https://hub.docker.com/repository/docker/llamastack/distribution-fireworks/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/fireworks.html) |
|
||||||
| vLLM | [llamastack/distribution-remote-vllm](https://hub.docker.com/repository/docker/llamastack/distribution-remote-vllm/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/remote-vllm.html) |
|
| vLLM | [llamastack/distribution-remote-vllm](https://hub.docker.com/repository/docker/llamastack/distribution-remote-vllm/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/remote-vllm.html) |
|
||||||
|
|
||||||
### Installation
|
|
||||||
|
|
||||||
You have two ways to install this repository:
|
|
||||||
|
|
||||||
* **Install as a package**:
|
|
||||||
You can install the repository directly from [PyPI](https://pypi.org/project/llama-stack/) by running the following command:
|
|
||||||
```bash
|
|
||||||
pip install llama-stack
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Install from source**:
|
|
||||||
If you prefer to install from the source code, we recommend using [uv](https://github.com/astral-sh/uv).
|
|
||||||
Then, run the following commands:
|
|
||||||
```bash
|
|
||||||
git clone git@github.com:meta-llama/llama-stack.git
|
|
||||||
cd llama-stack
|
|
||||||
|
|
||||||
uv sync
|
|
||||||
uv pip install -e .
|
|
||||||
```
|
|
||||||
|
|
||||||
### Documentation
|
### Documentation
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/bedrock/build.yaml
|
|
|
@ -1,15 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: distribution-bedrock
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/llamastack-run-bedrock.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-bedrock.yaml"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/bedrock/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/cerebras/build.yaml
|
|
|
@ -1,16 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-cerebras
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/llamastack-run-cerebras.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-cerebras.yaml"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/cerebras/run.yaml
|
|
|
@ -1,50 +0,0 @@
|
||||||
services:
|
|
||||||
text-generation-inference:
|
|
||||||
image: registry.dell.huggingface.co/enterprise-dell-inference-meta-llama-meta-llama-3.1-8b-instruct
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- $HOME/.cache/huggingface:/data
|
|
||||||
ports:
|
|
||||||
- "5009:5009"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=0,1,2,3,4
|
|
||||||
- NUM_SHARD=4
|
|
||||||
- MAX_BATCH_PREFILL_TOKENS=32768
|
|
||||||
- MAX_INPUT_TOKENS=8000
|
|
||||||
- MAX_TOTAL_TOKENS=8192
|
|
||||||
command: []
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
# that's the closest analogue to --gpus; provide
|
|
||||||
# an integer amount of devices or 'all'
|
|
||||||
count: all
|
|
||||||
# Devices are reserved using a list of capabilities, making
|
|
||||||
# capabilities the only required field. A device MUST
|
|
||||||
# satisfy all the requested capabilities for a successful
|
|
||||||
# reservation.
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
llamastack:
|
|
||||||
depends_on:
|
|
||||||
text-generation-inference:
|
|
||||||
condition: service_healthy
|
|
||||||
image: llamastack/distribution-tgi
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
# Link to TGI run.yaml file
|
|
||||||
- ./run.yaml:/root/my-run.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
# Hack: wait for TGI server to start before starting docker
|
|
||||||
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1,44 +0,0 @@
|
||||||
version: '2'
|
|
||||||
image_name: local
|
|
||||||
container_image: null
|
|
||||||
conda_env: local
|
|
||||||
apis:
|
|
||||||
- shields
|
|
||||||
- agents
|
|
||||||
- models
|
|
||||||
- memory
|
|
||||||
- memory_banks
|
|
||||||
- inference
|
|
||||||
- safety
|
|
||||||
providers:
|
|
||||||
inference:
|
|
||||||
- provider_id: tgi0
|
|
||||||
provider_type: remote::tgi
|
|
||||||
config:
|
|
||||||
url: http://127.0.0.1:80
|
|
||||||
safety:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::llama-guard
|
|
||||||
config:
|
|
||||||
model: Llama-Guard-3-1B
|
|
||||||
excluded_categories: []
|
|
||||||
- provider_id: meta1
|
|
||||||
provider_type: inline::prompt-guard
|
|
||||||
config:
|
|
||||||
model: Prompt-Guard-86M
|
|
||||||
memory:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::faiss
|
|
||||||
config: {}
|
|
||||||
agents:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config:
|
|
||||||
persistence_store:
|
|
||||||
namespace: null
|
|
||||||
type: sqlite
|
|
||||||
db_path: ~/.llama/runtime/kvstore.db
|
|
||||||
telemetry:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config: {}
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/fireworks/build.yaml
|
|
|
@ -1,14 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-fireworks
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
environment:
|
|
||||||
- FIREWORKS_API_KEY=${FIREWORKS_API_KEY}
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --template fireworks"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/fireworks/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/meta-reference-gpu/build.yaml
|
|
|
@ -1,34 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-meta-reference-gpu
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/my-run.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=0
|
|
||||||
command: []
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
# that's the closest analogue to --gpus; provide
|
|
||||||
# an integer amount of devices or 'all'
|
|
||||||
count: 1
|
|
||||||
# Devices are reserved using a list of capabilities, making
|
|
||||||
# capabilities the only required field. A device MUST
|
|
||||||
# satisfy all the requested capabilities for a successful
|
|
||||||
# reservation.
|
|
||||||
capabilities: [gpu]
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
||||||
runtime: nvidia
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/meta-reference-gpu/run-with-safety.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/meta-reference-gpu/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/meta-reference-quantized-gpu/build.yaml
|
|
|
@ -1,35 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-meta-reference-quantized-gpu
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/my-run.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=0
|
|
||||||
command: []
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
# that's the closest analogue to --gpus; provide
|
|
||||||
# an integer amount of devices or 'all'
|
|
||||||
count: 1
|
|
||||||
# Devices are reserved using a list of capabilities, making
|
|
||||||
# capabilities the only required field. A device MUST
|
|
||||||
# satisfy all the requested capabilities for a successful
|
|
||||||
# reservation.
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1,58 +0,0 @@
|
||||||
version: '2'
|
|
||||||
image_name: local
|
|
||||||
container_image: null
|
|
||||||
conda_env: local
|
|
||||||
apis:
|
|
||||||
- shields
|
|
||||||
- agents
|
|
||||||
- models
|
|
||||||
- memory
|
|
||||||
- memory_banks
|
|
||||||
- inference
|
|
||||||
- safety
|
|
||||||
providers:
|
|
||||||
inference:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference-quantized
|
|
||||||
config:
|
|
||||||
model: Llama3.2-3B-Instruct:int4-qlora-eo8
|
|
||||||
quantization:
|
|
||||||
type: int4
|
|
||||||
torch_seed: null
|
|
||||||
max_seq_len: 2048
|
|
||||||
max_batch_size: 1
|
|
||||||
- provider_id: meta1
|
|
||||||
provider_type: inline::meta-reference-quantized
|
|
||||||
config:
|
|
||||||
# not a quantized model !
|
|
||||||
model: Llama-Guard-3-1B
|
|
||||||
quantization: null
|
|
||||||
torch_seed: null
|
|
||||||
max_seq_len: 2048
|
|
||||||
max_batch_size: 1
|
|
||||||
safety:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::llama-guard
|
|
||||||
config:
|
|
||||||
model: Llama-Guard-3-1B
|
|
||||||
excluded_categories: []
|
|
||||||
- provider_id: meta1
|
|
||||||
provider_type: inline::prompt-guard
|
|
||||||
config:
|
|
||||||
model: Prompt-Guard-86M
|
|
||||||
memory:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config: {}
|
|
||||||
agents:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config:
|
|
||||||
persistence_store:
|
|
||||||
namespace: null
|
|
||||||
type: sqlite
|
|
||||||
db_path: ~/.llama/runtime/kvstore.db
|
|
||||||
telemetry:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config: {}
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/ollama/build.yaml
|
|
|
@ -1,71 +0,0 @@
|
||||||
services:
|
|
||||||
ollama:
|
|
||||||
image: ollama/ollama:latest
|
|
||||||
network_mode: ${NETWORK_MODE:-bridge}
|
|
||||||
volumes:
|
|
||||||
- ~/.ollama:/root/.ollama
|
|
||||||
ports:
|
|
||||||
- "11434:11434"
|
|
||||||
environment:
|
|
||||||
OLLAMA_DEBUG: 1
|
|
||||||
command: []
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
limits:
|
|
||||||
memory: 8G # Set maximum memory
|
|
||||||
reservations:
|
|
||||||
memory: 8G # Set minimum memory reservation
|
|
||||||
# healthcheck:
|
|
||||||
# # ugh, no CURL in ollama image
|
|
||||||
# test: ["CMD", "curl", "-f", "http://ollama:11434"]
|
|
||||||
# interval: 10s
|
|
||||||
# timeout: 5s
|
|
||||||
# retries: 5
|
|
||||||
|
|
||||||
ollama-init:
|
|
||||||
image: ollama/ollama:latest
|
|
||||||
depends_on:
|
|
||||||
- ollama
|
|
||||||
# condition: service_healthy
|
|
||||||
network_mode: ${NETWORK_MODE:-bridge}
|
|
||||||
environment:
|
|
||||||
- OLLAMA_HOST=ollama
|
|
||||||
- INFERENCE_MODEL=${INFERENCE_MODEL}
|
|
||||||
- SAFETY_MODEL=${SAFETY_MODEL:-}
|
|
||||||
volumes:
|
|
||||||
- ~/.ollama:/root/.ollama
|
|
||||||
- ./pull-models.sh:/pull-models.sh
|
|
||||||
entrypoint: ["/pull-models.sh"]
|
|
||||||
|
|
||||||
llamastack:
|
|
||||||
depends_on:
|
|
||||||
ollama:
|
|
||||||
condition: service_started
|
|
||||||
ollama-init:
|
|
||||||
condition: service_started
|
|
||||||
image: ${LLAMA_STACK_IMAGE:-llamastack/distribution-ollama}
|
|
||||||
network_mode: ${NETWORK_MODE:-bridge}
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
# Link to ollama run.yaml file
|
|
||||||
- ~/local/llama-stack/:/app/llama-stack-source
|
|
||||||
- ./run${SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml
|
|
||||||
ports:
|
|
||||||
- "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}"
|
|
||||||
environment:
|
|
||||||
- INFERENCE_MODEL=${INFERENCE_MODEL}
|
|
||||||
- SAFETY_MODEL=${SAFETY_MODEL:-}
|
|
||||||
- OLLAMA_URL=http://ollama:11434
|
|
||||||
entrypoint: >
|
|
||||||
python -m llama_stack.distribution.server.server /root/my-run.yaml \
|
|
||||||
--port ${LLAMA_STACK_PORT:-5001}
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 10s
|
|
||||||
max_attempts: 3
|
|
||||||
window: 60s
|
|
||||||
volumes:
|
|
||||||
ollama:
|
|
||||||
ollama-init:
|
|
||||||
llamastack:
|
|
|
@ -1,18 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
|
||||||
# the root directory of this source tree.
|
|
||||||
|
|
||||||
echo "Preloading (${INFERENCE_MODEL}, ${SAFETY_MODEL})..."
|
|
||||||
for model in ${INFERENCE_MODEL} ${SAFETY_MODEL}; do
|
|
||||||
echo "Preloading $model..."
|
|
||||||
if ! ollama run "$model"; then
|
|
||||||
echo "Failed to pull and run $model"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "All models pulled successfully"
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/ollama/run-with-safety.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/ollama/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/nvidia/build.yaml
|
|
|
@ -1,19 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: distribution-nvidia:dev
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/llamastack-run-nvidia.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
environment:
|
|
||||||
- INFERENCE_MODEL=${INFERENCE_MODEL:-Llama3.1-8B-Instruct}
|
|
||||||
- NVIDIA_API_KEY=${NVIDIA_API_KEY:-}
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml-config /root/llamastack-run-nvidia.yaml"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/nvidia/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/remote-vllm/build.yaml
|
|
|
@ -1,100 +0,0 @@
|
||||||
services:
|
|
||||||
vllm-inference:
|
|
||||||
image: vllm/vllm-openai:latest
|
|
||||||
volumes:
|
|
||||||
- $HOME/.cache/huggingface:/root/.cache/huggingface
|
|
||||||
network_mode: ${NETWORK_MODE:-bridged}
|
|
||||||
ports:
|
|
||||||
- "${VLLM_INFERENCE_PORT:-5100}:${VLLM_INFERENCE_PORT:-5100}"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=${VLLM_INFERENCE_GPU:-0}
|
|
||||||
- HUGGING_FACE_HUB_TOKEN=$HF_TOKEN
|
|
||||||
command: >
|
|
||||||
--gpu-memory-utilization 0.75
|
|
||||||
--model ${VLLM_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct}
|
|
||||||
--enforce-eager
|
|
||||||
--max-model-len 8192
|
|
||||||
--max-num-seqs 16
|
|
||||||
--port ${VLLM_INFERENCE_PORT:-5100}
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:${VLLM_INFERENCE_PORT:-5100}/v1/health"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 5
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
|
|
||||||
# A little trick:
|
|
||||||
# if VLLM_SAFETY_MODEL is set, we will create a service for the safety model
|
|
||||||
# otherwise, the entry will end in a hyphen which gets ignored by docker compose
|
|
||||||
vllm-${VLLM_SAFETY_MODEL:+safety}:
|
|
||||||
image: vllm/vllm-openai:latest
|
|
||||||
volumes:
|
|
||||||
- $HOME/.cache/huggingface:/root/.cache/huggingface
|
|
||||||
network_mode: ${NETWORK_MODE:-bridged}
|
|
||||||
ports:
|
|
||||||
- "${VLLM_SAFETY_PORT:-5101}:${VLLM_SAFETY_PORT:-5101}"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=${VLLM_SAFETY_GPU:-1}
|
|
||||||
- HUGGING_FACE_HUB_TOKEN=$HF_TOKEN
|
|
||||||
command: >
|
|
||||||
--gpu-memory-utilization 0.75
|
|
||||||
--model ${VLLM_SAFETY_MODEL}
|
|
||||||
--enforce-eager
|
|
||||||
--max-model-len 8192
|
|
||||||
--max-num-seqs 16
|
|
||||||
--port ${VLLM_SAFETY_PORT:-5101}
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:${VLLM_SAFETY_PORT:-5101}/v1/health"]
|
|
||||||
interval: 30s
|
|
||||||
timeout: 10s
|
|
||||||
retries: 5
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
llamastack:
|
|
||||||
depends_on:
|
|
||||||
- vllm-inference:
|
|
||||||
condition: service_healthy
|
|
||||||
- vllm-${VLLM_SAFETY_MODEL:+safety}:
|
|
||||||
condition: service_healthy
|
|
||||||
# image: llamastack/distribution-remote-vllm
|
|
||||||
image: llamastack/distribution-remote-vllm:test-0.0.52rc3
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run${VLLM_SAFETY_MODEL:+-with-safety}.yaml:/root/llamastack-run-remote-vllm.yaml
|
|
||||||
network_mode: ${NETWORK_MODE:-bridged}
|
|
||||||
environment:
|
|
||||||
- VLLM_URL=http://vllm-inference:${VLLM_INFERENCE_PORT:-5100}/v1
|
|
||||||
- VLLM_SAFETY_URL=http://vllm-safety:${VLLM_SAFETY_PORT:-5101}/v1
|
|
||||||
- INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct}
|
|
||||||
- MAX_TOKENS=${MAX_TOKENS:-4096}
|
|
||||||
- SQLITE_STORE_DIR=${SQLITE_STORE_DIR:-$HOME/.llama/distributions/remote-vllm}
|
|
||||||
- SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B}
|
|
||||||
ports:
|
|
||||||
- "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}"
|
|
||||||
# Hack: wait for vLLM server to start before starting docker
|
|
||||||
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml --port 5001"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
||||||
volumes:
|
|
||||||
vllm-inference:
|
|
||||||
vllm-safety:
|
|
||||||
llamastack:
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/remote-vllm/run-with-safety.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/remote-vllm/run.yaml
|
|
|
@ -1,9 +0,0 @@
|
||||||
name: runpod
|
|
||||||
distribution_spec:
|
|
||||||
description: Use Runpod for running LLM inference
|
|
||||||
providers:
|
|
||||||
inference: remote::runpod
|
|
||||||
memory: meta-reference
|
|
||||||
safety: meta-reference
|
|
||||||
agents: meta-reference
|
|
||||||
telemetry: meta-reference
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/sambanova/build.yaml
|
|
|
@ -1,16 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-sambanova
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/llamastack-run-sambanova.yaml
|
|
||||||
ports:
|
|
||||||
- "5000:5000"
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-sambanova.yaml"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/sambanova/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/tgi/build.yaml
|
|
|
@ -1,103 +0,0 @@
|
||||||
services:
|
|
||||||
tgi-inference:
|
|
||||||
image: ghcr.io/huggingface/text-generation-inference:latest
|
|
||||||
volumes:
|
|
||||||
- $HOME/.cache/huggingface:/data
|
|
||||||
network_mode: ${NETWORK_MODE:-bridged}
|
|
||||||
ports:
|
|
||||||
- "${TGI_INFERENCE_PORT:-8080}:${TGI_INFERENCE_PORT:-8080}"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=${TGI_INFERENCE_GPU:-0}
|
|
||||||
- HF_TOKEN=$HF_TOKEN
|
|
||||||
- HF_HOME=/data
|
|
||||||
- HF_DATASETS_CACHE=/data
|
|
||||||
- HF_MODULES_CACHE=/data
|
|
||||||
- HF_HUB_CACHE=/data
|
|
||||||
command: >
|
|
||||||
--dtype bfloat16
|
|
||||||
--usage-stats off
|
|
||||||
--sharded false
|
|
||||||
--model-id ${TGI_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct}
|
|
||||||
--port ${TGI_INFERENCE_PORT:-8080}
|
|
||||||
--cuda-memory-fraction 0.75
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://tgi-inference:${TGI_INFERENCE_PORT:-8080}/health"]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 30
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
|
|
||||||
tgi-${TGI_SAFETY_MODEL:+safety}:
|
|
||||||
image: ghcr.io/huggingface/text-generation-inference:latest
|
|
||||||
volumes:
|
|
||||||
- $HOME/.cache/huggingface:/data
|
|
||||||
network_mode: ${NETWORK_MODE:-bridged}
|
|
||||||
ports:
|
|
||||||
- "${TGI_SAFETY_PORT:-8081}:${TGI_SAFETY_PORT:-8081}"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=${TGI_SAFETY_GPU:-1}
|
|
||||||
- HF_TOKEN=$HF_TOKEN
|
|
||||||
- HF_HOME=/data
|
|
||||||
- HF_DATASETS_CACHE=/data
|
|
||||||
- HF_MODULES_CACHE=/data
|
|
||||||
- HF_HUB_CACHE=/data
|
|
||||||
command: >
|
|
||||||
--dtype bfloat16
|
|
||||||
--usage-stats off
|
|
||||||
--sharded false
|
|
||||||
--model-id ${TGI_SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B}
|
|
||||||
--port ${TGI_SAFETY_PORT:-8081}
|
|
||||||
--cuda-memory-fraction 0.75
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD", "curl", "-f", "http://tgi-safety:${TGI_SAFETY_PORT:-8081}/health"]
|
|
||||||
interval: 5s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 30
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
|
|
||||||
llamastack:
|
|
||||||
depends_on:
|
|
||||||
tgi-inference:
|
|
||||||
condition: service_healthy
|
|
||||||
tgi-${TGI_SAFETY_MODEL:+safety}:
|
|
||||||
condition: service_healthy
|
|
||||||
image: llamastack/distribution-tgi:test-0.0.52rc3
|
|
||||||
network_mode: ${NETWORK_MODE:-bridged}
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run${TGI_SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml
|
|
||||||
ports:
|
|
||||||
- "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}"
|
|
||||||
# Hack: wait for TGI server to start before starting docker
|
|
||||||
entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
||||||
environment:
|
|
||||||
- TGI_URL=http://tgi-inference:${TGI_INFERENCE_PORT:-8080}
|
|
||||||
- SAFETY_TGI_URL=http://tgi-safety:${TGI_SAFETY_PORT:-8081}
|
|
||||||
- INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct}
|
|
||||||
- SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B}
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
tgi-inference:
|
|
||||||
tgi-safety:
|
|
||||||
llamastack:
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/tgi/run-with-safety.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/tgi/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/together/build.yaml
|
|
|
@ -1,14 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-together
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
environment:
|
|
||||||
- TOGETHER_API_KEY=${TOGETHER_API_KEY}
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --template together"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/together/run.yaml
|
|
|
@ -1 +0,0 @@
|
||||||
../../llama_stack/templates/inline-vllm/build.yaml
|
|
|
@ -1,35 +0,0 @@
|
||||||
services:
|
|
||||||
llamastack:
|
|
||||||
image: llamastack/distribution-inline-vllm
|
|
||||||
network_mode: "host"
|
|
||||||
volumes:
|
|
||||||
- ~/.llama:/root/.llama
|
|
||||||
- ./run.yaml:/root/my-run.yaml
|
|
||||||
ports:
|
|
||||||
- "8321:8321"
|
|
||||||
devices:
|
|
||||||
- nvidia.com/gpu=all
|
|
||||||
environment:
|
|
||||||
- CUDA_VISIBLE_DEVICES=0
|
|
||||||
command: []
|
|
||||||
deploy:
|
|
||||||
resources:
|
|
||||||
reservations:
|
|
||||||
devices:
|
|
||||||
- driver: nvidia
|
|
||||||
# that's the closest analogue to --gpus; provide
|
|
||||||
# an integer amount of devices or 'all'
|
|
||||||
count: 1
|
|
||||||
# Devices are reserved using a list of capabilities, making
|
|
||||||
# capabilities the only required field. A device MUST
|
|
||||||
# satisfy all the requested capabilities for a successful
|
|
||||||
# reservation.
|
|
||||||
capabilities: [gpu]
|
|
||||||
runtime: nvidia
|
|
||||||
entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml"
|
|
||||||
deploy:
|
|
||||||
restart_policy:
|
|
||||||
condition: on-failure
|
|
||||||
delay: 3s
|
|
||||||
max_attempts: 5
|
|
||||||
window: 60s
|
|
|
@ -1,66 +0,0 @@
|
||||||
version: '2'
|
|
||||||
image_name: local
|
|
||||||
container_image: null
|
|
||||||
conda_env: local
|
|
||||||
apis:
|
|
||||||
- shields
|
|
||||||
- agents
|
|
||||||
- models
|
|
||||||
- memory
|
|
||||||
- memory_banks
|
|
||||||
- inference
|
|
||||||
- safety
|
|
||||||
providers:
|
|
||||||
inference:
|
|
||||||
- provider_id: vllm-inference
|
|
||||||
provider_type: inline::vllm
|
|
||||||
config:
|
|
||||||
model: Llama3.2-3B-Instruct
|
|
||||||
tensor_parallel_size: 1
|
|
||||||
gpu_memory_utilization: 0.4
|
|
||||||
enforce_eager: true
|
|
||||||
max_tokens: 4096
|
|
||||||
- provider_id: vllm-inference-safety
|
|
||||||
provider_type: inline::vllm
|
|
||||||
config:
|
|
||||||
model: Llama-Guard-3-1B
|
|
||||||
tensor_parallel_size: 1
|
|
||||||
gpu_memory_utilization: 0.2
|
|
||||||
enforce_eager: true
|
|
||||||
max_tokens: 4096
|
|
||||||
safety:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::llama-guard
|
|
||||||
config:
|
|
||||||
model: Llama-Guard-3-1B
|
|
||||||
excluded_categories: []
|
|
||||||
# Uncomment to use prompt guard
|
|
||||||
# - provider_id: meta1
|
|
||||||
# provider_type: inline::prompt-guard
|
|
||||||
# config:
|
|
||||||
# model: Prompt-Guard-86M
|
|
||||||
memory:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config: {}
|
|
||||||
# Uncomment to use pgvector
|
|
||||||
# - provider_id: pgvector
|
|
||||||
# provider_type: remote::pgvector
|
|
||||||
# config:
|
|
||||||
# host: 127.0.0.1
|
|
||||||
# port: 5432
|
|
||||||
# db: postgres
|
|
||||||
# user: postgres
|
|
||||||
# password: mysecretpassword
|
|
||||||
agents:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config:
|
|
||||||
persistence_store:
|
|
||||||
namespace: null
|
|
||||||
type: sqlite
|
|
||||||
db_path: ~/.llama/runtime/agents_store.db
|
|
||||||
telemetry:
|
|
||||||
- provider_id: meta0
|
|
||||||
provider_type: inline::meta-reference
|
|
||||||
config: {}
|
|
1587
docs/_static/llama-stack-spec.html
vendored
1587
docs/_static/llama-stack-spec.html
vendored
File diff suppressed because it is too large
Load diff
1048
docs/_static/llama-stack-spec.yaml
vendored
1048
docs/_static/llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
BIN
docs/_static/providers/vector_io/read_time_comparison_sqlite-vec-faiss.png
vendored
Normal file
BIN
docs/_static/providers/vector_io/read_time_comparison_sqlite-vec-faiss.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 33 KiB |
BIN
docs/_static/providers/vector_io/write_time_comparison_sqlite-vec-faiss.png
vendored
Normal file
BIN
docs/_static/providers/vector_io/write_time_comparison_sqlite-vec-faiss.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 37 KiB |
BIN
docs/_static/providers/vector_io/write_time_sequence_sqlite-vec-faiss.png
vendored
Normal file
BIN
docs/_static/providers/vector_io/write_time_sequence_sqlite-vec-faiss.png
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
|
@ -4,6 +4,21 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
def pytest_collection_modifyitems(items):
|
def pytest_collection_modifyitems(items):
|
||||||
for item in items:
|
for item in items:
|
||||||
item.name = item.name.replace(' ', '_')
|
item.name = item.name.replace(' ', '_')
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_runtest_teardown(item):
|
||||||
|
interval_seconds = os.getenv("LLAMA_STACK_TEST_INTERVAL_SECONDS")
|
||||||
|
if interval_seconds:
|
||||||
|
time.sleep(float(interval_seconds))
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.option.tbstyle = "short"
|
||||||
|
config.option.disable_warnings = True
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -47,9 +47,8 @@
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack_client import LlamaStackClient\n",
|
"from llama_stack_client import LlamaStackClient, Agent\n",
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
|
||||||
"from rich.pretty import pprint\n",
|
"from rich.pretty import pprint\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import uuid\n",
|
"import uuid\n",
|
||||||
|
|
|
@ -84,16 +84,14 @@
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Not in Google Colab environment\n",
|
"Not in Google Colab environment\n"
|
||||||
"\u001b[33mWarning: `bwrap` is not available. Code interpreter tool will not work correctly.\u001b[0m\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "stderr",
|
"name": "stderr",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"/opt/anaconda3/envs/master/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
"Warning: `bwrap` is not available. Code interpreter tool will not work correctly.\n"
|
||||||
" from .autonotebook import tqdm as notebook_tqdm\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -117,76 +115,146 @@
|
||||||
"- datasetio\n",
|
"- datasetio\n",
|
||||||
"- eval\n",
|
"- eval\n",
|
||||||
"- inference\n",
|
"- inference\n",
|
||||||
"- memory\n",
|
|
||||||
"- safety\n",
|
"- safety\n",
|
||||||
"- scoring\n",
|
"- scoring\n",
|
||||||
"- telemetry\n",
|
"- telemetry\n",
|
||||||
"- tool_runtime\n",
|
"- tool_runtime\n",
|
||||||
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
"- vector_io\n",
|
||||||
"container_image: null\n",
|
|
||||||
"benchmarks: <span style=\"font-weight: bold\">[]</span>\n",
|
"benchmarks: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
|
"container_image: null\n",
|
||||||
|
"datasets: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: <span style=\"font-weight: bold\">[]</span>\n",
|
"logging: null\n",
|
||||||
"metadata_store:\n",
|
"metadata_store:\n",
|
||||||
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">registry.db</span>\n",
|
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">registry.db</span>\n",
|
||||||
" namespace: null\n",
|
" namespace: null\n",
|
||||||
" type: sqlite\n",
|
" type: sqlite\n",
|
||||||
"models:\n",
|
"models:\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-FP8\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-FP8\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct\n",
|
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
|
||||||
|
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision\n",
|
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
|
" provider_model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
|
||||||
"- metadata:\n",
|
"- metadata:\n",
|
||||||
|
" context_length: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">8192</span>\n",
|
||||||
|
" embedding_dimension: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">768</span>\n",
|
||||||
|
" model_id: togethercomputer/m2-bert-80M-8k-retrieval\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - embedding\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: togethercomputer/m2-bert-80M-8k-retrieval\n",
|
||||||
|
"- metadata:\n",
|
||||||
|
" context_length: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">32768</span>\n",
|
||||||
|
" embedding_dimension: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">768</span>\n",
|
||||||
|
" model_id: togethercomputer/m2-bert-80M-32k-retrieval\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - embedding\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: togethercomputer/m2-bert-80M-32k-retrieval\n",
|
||||||
|
"- metadata:\n",
|
||||||
" embedding_dimension: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">384</span>\n",
|
" embedding_dimension: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">384</span>\n",
|
||||||
" model_id: all-MiniLM-L6-v2\n",
|
" model_id: all-MiniLM-L6-v2\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
@ -203,14 +271,26 @@
|
||||||
" provider_id: meta-reference\n",
|
" provider_id: meta-reference\n",
|
||||||
" provider_type: inline::meta-reference\n",
|
" provider_type: inline::meta-reference\n",
|
||||||
" datasetio:\n",
|
" datasetio:\n",
|
||||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">huggingface_datasetio.db</span>\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
" provider_id: huggingface\n",
|
" provider_id: huggingface\n",
|
||||||
" provider_type: remote::huggingface\n",
|
" provider_type: remote::huggingface\n",
|
||||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">localfs_datasetio.db</span>\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
" provider_id: localfs\n",
|
" provider_id: localfs\n",
|
||||||
" provider_type: inline::localfs\n",
|
" provider_type: inline::localfs\n",
|
||||||
" eval:\n",
|
" eval:\n",
|
||||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">meta_reference_eval.db</span>\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
" provider_id: meta-reference\n",
|
" provider_id: meta-reference\n",
|
||||||
" provider_type: inline::meta-reference\n",
|
" provider_type: inline::meta-reference\n",
|
||||||
" inference:\n",
|
" inference:\n",
|
||||||
|
@ -222,16 +302,9 @@
|
||||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" provider_id: sentence-transformers\n",
|
" provider_id: sentence-transformers\n",
|
||||||
" provider_type: inline::sentence-transformers\n",
|
" provider_type: inline::sentence-transformers\n",
|
||||||
" memory:\n",
|
|
||||||
" - config:\n",
|
|
||||||
" kvstore:\n",
|
|
||||||
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">faiss_store.db</span>\n",
|
|
||||||
" namespace: null\n",
|
|
||||||
" type: sqlite\n",
|
|
||||||
" provider_id: faiss\n",
|
|
||||||
" provider_type: inlin<span style=\"color: #00ff00; text-decoration-color: #00ff00; font-weight: bold\">e::fa</span>iss\n",
|
|
||||||
" safety:\n",
|
" safety:\n",
|
||||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
" - config:\n",
|
||||||
|
" excluded_categories: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
" provider_id: llama-guard\n",
|
" provider_id: llama-guard\n",
|
||||||
" provider_type: inline::llama-guard\n",
|
" provider_type: inline::llama-guard\n",
|
||||||
" scoring:\n",
|
" scoring:\n",
|
||||||
|
@ -269,7 +342,26 @@
|
||||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
" provider_id: rag-runtime\n",
|
" provider_id: rag-runtime\n",
|
||||||
" provider_type: inline::rag-runtime\n",
|
" provider_type: inline::rag-runtime\n",
|
||||||
|
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
||||||
|
" provider_id: model-context-protocol\n",
|
||||||
|
" provider_type: remote::model-context-protocol\n",
|
||||||
|
" - config:\n",
|
||||||
|
" api_key: <span style=\"color: #008000; text-decoration-color: #008000\">'********'</span>\n",
|
||||||
|
" provider_id: wolfram-alpha\n",
|
||||||
|
" provider_type: remote::wolfram-alpha\n",
|
||||||
|
" vector_io:\n",
|
||||||
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: <span style=\"color: #800080; text-decoration-color: #800080\">/Users/xiyan/.llama/distributions/together/</span><span style=\"color: #ff00ff; text-decoration-color: #ff00ff\">faiss_store.db</span>\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
|
" provider_id: faiss\n",
|
||||||
|
" provider_type: inlin<span style=\"color: #00ff00; text-decoration-color: #00ff00; font-weight: bold\">e::fa</span>iss\n",
|
||||||
"scoring_fns: <span style=\"font-weight: bold\">[]</span>\n",
|
"scoring_fns: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
|
"server:\n",
|
||||||
|
" port: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">8321</span>\n",
|
||||||
|
" tls_certfile: null\n",
|
||||||
|
" tls_keyfile: null\n",
|
||||||
"shields:\n",
|
"shields:\n",
|
||||||
"- params: null\n",
|
"- params: null\n",
|
||||||
" provider_id: null\n",
|
" provider_id: null\n",
|
||||||
|
@ -288,6 +380,11 @@
|
||||||
" mcp_endpoint: null\n",
|
" mcp_endpoint: null\n",
|
||||||
" provider_id: code-interpreter\n",
|
" provider_id: code-interpreter\n",
|
||||||
" toolgroup_id: builtin::code_interpreter\n",
|
" toolgroup_id: builtin::code_interpreter\n",
|
||||||
|
"- args: null\n",
|
||||||
|
" mcp_endpoint: null\n",
|
||||||
|
" provider_id: wolfram-alpha\n",
|
||||||
|
" toolgroup_id: builtin::wolfram_alpha\n",
|
||||||
|
"vector_dbs: <span style=\"font-weight: bold\">[]</span>\n",
|
||||||
"version: <span style=\"color: #008000; text-decoration-color: #008000\">'2'</span>\n",
|
"version: <span style=\"color: #008000; text-decoration-color: #008000\">'2'</span>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"</pre>\n"
|
"</pre>\n"
|
||||||
|
@ -298,76 +395,146 @@
|
||||||
"- datasetio\n",
|
"- datasetio\n",
|
||||||
"- eval\n",
|
"- eval\n",
|
||||||
"- inference\n",
|
"- inference\n",
|
||||||
"- memory\n",
|
|
||||||
"- safety\n",
|
"- safety\n",
|
||||||
"- scoring\n",
|
"- scoring\n",
|
||||||
"- telemetry\n",
|
"- telemetry\n",
|
||||||
"- tool_runtime\n",
|
"- tool_runtime\n",
|
||||||
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"- vector_io\n",
|
||||||
"container_image: null\n",
|
|
||||||
"benchmarks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"benchmarks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
|
"container_image: null\n",
|
||||||
|
"datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"image_name: together\n",
|
"image_name: together\n",
|
||||||
"memory_banks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"logging: null\n",
|
||||||
"metadata_store:\n",
|
"metadata_store:\n",
|
||||||
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mregistry.db\u001b[0m\n",
|
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mregistry.db\u001b[0m\n",
|
||||||
" namespace: null\n",
|
" namespace: null\n",
|
||||||
" type: sqlite\n",
|
" type: sqlite\n",
|
||||||
"models:\n",
|
"models:\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-FP8\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-FP8\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct\n",
|
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - llm\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
|
||||||
|
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision\n",
|
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
" - llm\n",
|
" - llm\n",
|
||||||
" provider_id: together\n",
|
" provider_id: together\n",
|
||||||
" provider_model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
|
" provider_model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
|
||||||
"- metadata:\n",
|
"- metadata:\n",
|
||||||
|
" context_length: \u001b[1;36m8192\u001b[0m\n",
|
||||||
|
" embedding_dimension: \u001b[1;36m768\u001b[0m\n",
|
||||||
|
" model_id: togethercomputer/m2-bert-80M-8k-retrieval\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - embedding\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: togethercomputer/m2-bert-80M-8k-retrieval\n",
|
||||||
|
"- metadata:\n",
|
||||||
|
" context_length: \u001b[1;36m32768\u001b[0m\n",
|
||||||
|
" embedding_dimension: \u001b[1;36m768\u001b[0m\n",
|
||||||
|
" model_id: togethercomputer/m2-bert-80M-32k-retrieval\n",
|
||||||
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
" - embedding\n",
|
||||||
|
" provider_id: together\n",
|
||||||
|
" provider_model_id: togethercomputer/m2-bert-80M-32k-retrieval\n",
|
||||||
|
"- metadata:\n",
|
||||||
" embedding_dimension: \u001b[1;36m384\u001b[0m\n",
|
" embedding_dimension: \u001b[1;36m384\u001b[0m\n",
|
||||||
" model_id: all-MiniLM-L6-v2\n",
|
" model_id: all-MiniLM-L6-v2\n",
|
||||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||||
|
@ -384,14 +551,26 @@
|
||||||
" provider_id: meta-reference\n",
|
" provider_id: meta-reference\n",
|
||||||
" provider_type: inline::meta-reference\n",
|
" provider_type: inline::meta-reference\n",
|
||||||
" datasetio:\n",
|
" datasetio:\n",
|
||||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mhuggingface_datasetio.db\u001b[0m\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
" provider_id: huggingface\n",
|
" provider_id: huggingface\n",
|
||||||
" provider_type: remote::huggingface\n",
|
" provider_type: remote::huggingface\n",
|
||||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mlocalfs_datasetio.db\u001b[0m\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
" provider_id: localfs\n",
|
" provider_id: localfs\n",
|
||||||
" provider_type: inline::localfs\n",
|
" provider_type: inline::localfs\n",
|
||||||
" eval:\n",
|
" eval:\n",
|
||||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mmeta_reference_eval.db\u001b[0m\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
" provider_id: meta-reference\n",
|
" provider_id: meta-reference\n",
|
||||||
" provider_type: inline::meta-reference\n",
|
" provider_type: inline::meta-reference\n",
|
||||||
" inference:\n",
|
" inference:\n",
|
||||||
|
@ -403,16 +582,9 @@
|
||||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" provider_id: sentence-transformers\n",
|
" provider_id: sentence-transformers\n",
|
||||||
" provider_type: inline::sentence-transformers\n",
|
" provider_type: inline::sentence-transformers\n",
|
||||||
" memory:\n",
|
|
||||||
" - config:\n",
|
|
||||||
" kvstore:\n",
|
|
||||||
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mfaiss_store.db\u001b[0m\n",
|
|
||||||
" namespace: null\n",
|
|
||||||
" type: sqlite\n",
|
|
||||||
" provider_id: faiss\n",
|
|
||||||
" provider_type: inlin\u001b[1;92me::fa\u001b[0miss\n",
|
|
||||||
" safety:\n",
|
" safety:\n",
|
||||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
" - config:\n",
|
||||||
|
" excluded_categories: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
" provider_id: llama-guard\n",
|
" provider_id: llama-guard\n",
|
||||||
" provider_type: inline::llama-guard\n",
|
" provider_type: inline::llama-guard\n",
|
||||||
" scoring:\n",
|
" scoring:\n",
|
||||||
|
@ -450,7 +622,26 @@
|
||||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
" provider_id: rag-runtime\n",
|
" provider_id: rag-runtime\n",
|
||||||
" provider_type: inline::rag-runtime\n",
|
" provider_type: inline::rag-runtime\n",
|
||||||
|
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
" provider_id: model-context-protocol\n",
|
||||||
|
" provider_type: remote::model-context-protocol\n",
|
||||||
|
" - config:\n",
|
||||||
|
" api_key: \u001b[32m'********'\u001b[0m\n",
|
||||||
|
" provider_id: wolfram-alpha\n",
|
||||||
|
" provider_type: remote::wolfram-alpha\n",
|
||||||
|
" vector_io:\n",
|
||||||
|
" - config:\n",
|
||||||
|
" kvstore:\n",
|
||||||
|
" db_path: \u001b[35m/Users/xiyan/.llama/distributions/together/\u001b[0m\u001b[95mfaiss_store.db\u001b[0m\n",
|
||||||
|
" namespace: null\n",
|
||||||
|
" type: sqlite\n",
|
||||||
|
" provider_id: faiss\n",
|
||||||
|
" provider_type: inlin\u001b[1;92me::fa\u001b[0miss\n",
|
||||||
"scoring_fns: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
"scoring_fns: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
|
"server:\n",
|
||||||
|
" port: \u001b[1;36m8321\u001b[0m\n",
|
||||||
|
" tls_certfile: null\n",
|
||||||
|
" tls_keyfile: null\n",
|
||||||
"shields:\n",
|
"shields:\n",
|
||||||
"- params: null\n",
|
"- params: null\n",
|
||||||
" provider_id: null\n",
|
" provider_id: null\n",
|
||||||
|
@ -469,6 +660,11 @@
|
||||||
" mcp_endpoint: null\n",
|
" mcp_endpoint: null\n",
|
||||||
" provider_id: code-interpreter\n",
|
" provider_id: code-interpreter\n",
|
||||||
" toolgroup_id: builtin::code_interpreter\n",
|
" toolgroup_id: builtin::code_interpreter\n",
|
||||||
|
"- args: null\n",
|
||||||
|
" mcp_endpoint: null\n",
|
||||||
|
" provider_id: wolfram-alpha\n",
|
||||||
|
" toolgroup_id: builtin::wolfram_alpha\n",
|
||||||
|
"vector_dbs: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
"version: \u001b[32m'2'\u001b[0m\n",
|
"version: \u001b[32m'2'\u001b[0m\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
|
@ -532,7 +728,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 3,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"base_uri": "https://localhost:8080/",
|
"base_uri": "https://localhost:8080/",
|
||||||
|
@ -643,17 +839,7 @@
|
||||||
"id": "DJkmoG2kq1_P",
|
"id": "DJkmoG2kq1_P",
|
||||||
"outputId": "8493ee59-c6ff-4bb6-d787-f295944db1cf"
|
"outputId": "8493ee59-c6ff-4bb6-d787-f295944db1cf"
|
||||||
},
|
},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Generating dev split: 100%|██████████| 5/5 [00:00<00:00, 139.81 examples/s]\n",
|
|
||||||
"Generating validation split: 100%|██████████| 30/30 [00:00<00:00, 258.29 examples/s]\n",
|
|
||||||
"Generating test split: 100%|██████████| 287/287 [00:01<00:00, 197.69 examples/s]\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import datasets\n",
|
"import datasets\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
@ -676,7 +862,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": 4,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"base_uri": "https://localhost:8080/",
|
"base_uri": "https://localhost:8080/",
|
||||||
|
@ -691,7 +877,7 @@
|
||||||
"name": "stderr",
|
"name": "stderr",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"100%|██████████| 5/5 [00:42<00:00, 8.60s/it]\n"
|
"100%|██████████| 5/5 [00:33<00:00, 6.71s/it]\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -699,16 +885,18 @@
|
||||||
"text/html": [
|
"text/html": [
|
||||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">EvaluateResponse</span><span style=\"font-weight: bold\">(</span>\n",
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">EvaluateResponse</span><span style=\"font-weight: bold\">(</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">generations</span>=<span style=\"font-weight: bold\">[</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">generations</span>=<span style=\"font-weight: bold\">[</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'Answer: D'</span><span style=\"font-weight: bold\">}</span>,\n",
|
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'The image shows a sunflower leaf with small, dark spots and white powdery patches. The dark spots are likely caused by a fungal pathogen, such as rust or septoria leaf spot, while the white powdery patches are likely caused by a fungal pathogen, such as powdery mildew.\\n\\nSince there are two distinct types of lesions on the leaf, it is likely that there are two different pathogens infecting the leaf.\\n\\n**Answer:** B) Two pathogens'</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'**Potato Pests**\\n\\nThe two insects depicted are:\\n\\n* **Colorado Potato Beetle (Leptinotarsa decemlineata)**: Characterized by black and yellow stripes, this beetle is a significant pest of potatoes. It feeds on the leaves and can cause substantial damage to the crop.\\n* **False Potato Beetle (Leptinotarsa juncta)**: Also known as the false Colorado beetle, this species has similar coloring but is not as harmful to potatoes as the Colorado potato beetle.'</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"The question requires the identification of the reason behind the massive gum production on the trunks of grapefruit trees in Cyprus, despite appearing healthy from a distance. The correct answer can be deduced by analyzing the symptoms and considering the possible causes.\\n\\nTo determine the correct answer, let's evaluate each option:\\n\\nA) Don't know or not sure: This option is incorrect because it does not provide a specific reason for the gum production.\\n\\nB) Physiological stress: This option is also incorrect because it is too broad and does not specifically explain the gum production.\\n\\nC) Bacterial disease: This option is incorrect because bacterial diseases typically cause different symptoms such as leaf spots, blights, or wilting.\\n\\nD) Harvesting damage when cutting with knives: This option is incorrect because harvesting damage would likely cause wounds or scars on the tree, but it would not lead to massive gum production.\\n\\nE) Fungal gummosis: This option is the most likely cause of the gum production. Fungal gummosis is a common disease in citrus trees, including grapefruit, that causes the production of gum or sap on the trunks and branches. The disease is typically caused by fungi such as Phytophthora or Diplodia, which infect the tree through wounds or natural openings. The gum production is a defense mechanism by the tree to try to seal off the infection and prevent further damage.\\n\\nTherefore, the correct answer is:\\n\\nAnswer: E\"</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"The image shows a sunflower leaf with a powdery mildew, which is a fungal disease caused by various species of fungi. The white powdery coating on the leaves is a characteristic symptom of this disease. The leaf also has some black spots, which could be indicative of a secondary infection or another type of disease. However, without more information or a closer examination, it's difficult to determine the exact cause of the black spots.\\n\\nBased on the image alone, we can see at least two types of symptoms: the powdery mildew and the black spots. This suggests that there may be more than one pathogen involved, but it's also possible that the black spots are a result of the same fungal infection causing the powdery mildew.\\n\\nAnswer: B) Two pathogens\"</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'The symptoms observed, characterized by the massive gum production on the trunks of the grapefruit trees in Cyprus, suggest a physiological or pathological response. Given the absence of visible signs of damage or pests from a higher point on a hillside, and considering the specific nature of the symptom (gum production), we can infer that the cause is more likely related to an internal process within the tree rather than external damage from harvesting. While physiological stress (B) could lead to such symptoms, the primary reason for gum production in trees, especially in citrus species, is typically linked to disease. Among the options provided, fungal gummosis (E) is a condition known to cause gumming in citrus trees, which aligns with the observed symptoms. Therefore, without direct evidence of external damage (harvesting) or confirmation of physiological stress being the primary cause, the most appropriate answer based on the information given is:\\n\\nAnswer: E'</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'Answer: D'</span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'Answer: D'</span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'**Causes of Splitting Petioles in Rhubarb**\\n\\nThe following factors can cause the petioles of rhubarb to split:\\n\\n* **Physiological Problems**: Issues such as water stress, nutrient deficiencies, or extreme temperatures can lead to splitting.\\n* **Phytoplasma Infection**: A bacterial infection caused by phytoplasma can lead to splitting of the petioles.\\n* **Animal Damage**: Pests like slugs, snails, or rodents can damage the plant and cause splitting.\\n* **Bacterial Infection**: Bacterial infections can also cause splitting.\\n\\nAs a result, the correct answer is:\\n\\n*Answer*: A) Physiological problems'</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"**Analysis of the Image**\\n\\nThe image provided shows a rhubarb plant with split petioles. To determine the cause of this issue, we need to consider various factors that could lead to such damage.\\n\\n**Possible Causes of Petiole Splitting**\\n\\n* **Physiological Problems**: Rhubarb plants can experience physiological stress due to environmental factors like extreme temperatures, waterlogging, or nutrient deficiencies. This stress can cause the petioles to split.\\n* **Phytoplasma Infection**: Phytoplasma is a type of bacteria that can infect plants, including rhubarb. It can cause symptoms such as yellowing leaves, stunted growth, and splitting of petioles.\\n* **Animal Damage**: Animals like rabbits, deer, or insects can damage rhubarb plants by eating the leaves or stems, which can lead to splitting of the petioles.\\n* **Bacteria**: Bacterial infections can also cause damage to rhubarb plants, including splitting of the petioles.\\n\\n**Conclusion**\\n\\nBased on the analysis, it is clear that all the options listed (A) Physiological problems, B) Phytoplasma infection, D) Animal damage, and E) Bacteria) could potentially cause the petioles of the rhubarb plant to split. Therefore, there is no single option that would not be a cause for the petioles splitting.\\n\\n**Answer**: C) I don't know and don't want to guess.\"</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"font-weight: bold\">]</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"font-weight: bold\">]</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">scores</span>=<span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">scores</span>=<span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
@ -723,16 +911,18 @@
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"\u001b[1;35mEvaluateResponse\u001b[0m\u001b[1m(\u001b[0m\n",
|
"\u001b[1;35mEvaluateResponse\u001b[0m\u001b[1m(\u001b[0m\n",
|
||||||
"\u001b[2;32m│ \u001b[0m\u001b[33mgenerations\u001b[0m=\u001b[1m[\u001b[0m\n",
|
"\u001b[2;32m│ \u001b[0m\u001b[33mgenerations\u001b[0m=\u001b[1m[\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'Answer: D'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'The image shows a sunflower leaf with small, dark spots and white powdery patches. The dark spots are likely caused by a fungal pathogen, such as rust or septoria leaf spot, while the white powdery patches are likely caused by a fungal pathogen, such as powdery mildew.\\n\\nSince there are two distinct types of lesions on the leaf, it is likely that there are two different pathogens infecting the leaf.\\n\\n**Answer:** B\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Two pathogens'\u001b[0m\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'**Potato Pests**\\n\\nThe two insects depicted are:\\n\\n* **Colorado Potato Beetle \u001b[0m\u001b[32m(\u001b[0m\u001b[32mLeptinotarsa decemlineata\u001b[0m\u001b[32m)\u001b[0m\u001b[32m**: Characterized by black and yellow stripes, this beetle is a significant pest of potatoes. It feeds on the leaves and can cause substantial damage to the crop.\\n* **False Potato Beetle \u001b[0m\u001b[32m(\u001b[0m\u001b[32mLeptinotarsa juncta\u001b[0m\u001b[32m)\u001b[0m\u001b[32m**: Also known as the false Colorado beetle, this species has similar coloring but is not as harmful to potatoes as the Colorado potato beetle.'\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"The question requires the identification of the reason behind the massive gum production on the trunks of grapefruit trees in Cyprus, despite appearing healthy from a distance. The correct answer can be deduced by analyzing the symptoms and considering the possible causes.\\n\\nTo determine the correct answer, let's evaluate each option:\\n\\nA\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Don't know or not sure: This option is incorrect because it does not provide a specific reason for the gum production.\\n\\nB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Physiological stress: This option is also incorrect because it is too broad and does not specifically explain the gum production.\\n\\nC\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Bacterial disease: This option is incorrect because bacterial diseases typically cause different symptoms such as leaf spots, blights, or wilting.\\n\\nD\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Harvesting damage when cutting with knives: This option is incorrect because harvesting damage would likely cause wounds or scars on the tree, but it would not lead to massive gum production.\\n\\nE\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Fungal gummosis: This option is the most likely cause of the gum production. Fungal gummosis is a common disease in citrus trees, including grapefruit, that causes the production of gum or sap on the trunks and branches. The disease is typically caused by fungi such as Phytophthora or Diplodia, which infect the tree through wounds or natural openings. The gum production is a defense mechanism by the tree to try to seal off the infection and prevent further damage.\\n\\nTherefore, the correct answer is:\\n\\nAnswer: E\"\u001b[0m\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"The image shows a sunflower leaf with a powdery mildew, which is a fungal disease caused by various species of fungi. The white powdery coating on the leaves is a characteristic symptom of this disease. The leaf also has some black spots, which could be indicative of a secondary infection or another type of disease. However, without more information or a closer examination, it's difficult to determine the exact cause of the black spots.\\n\\nBased on the image alone, we can see at least two types of symptoms: the powdery mildew and the black spots. This suggests that there may be more than one pathogen involved, but it's also possible that the black spots are a result of the same fungal infection causing the powdery mildew.\\n\\nAnswer: B\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Two pathogens\"\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'The symptoms observed, characterized by the massive gum production on the trunks of the grapefruit trees in Cyprus, suggest a physiological or pathological response. Given the absence of visible signs of damage or pests from a higher point on a hillside, and considering the specific nature of the symptom \u001b[0m\u001b[32m(\u001b[0m\u001b[32mgum production\u001b[0m\u001b[32m)\u001b[0m\u001b[32m, we can infer that the cause is more likely related to an internal process within the tree rather than external damage from harvesting. While physiological stress \u001b[0m\u001b[32m(\u001b[0m\u001b[32mB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m could lead to such symptoms, the primary reason for gum production in trees, especially in citrus species, is typically linked to disease. Among the options provided, fungal gummosis \u001b[0m\u001b[32m(\u001b[0m\u001b[32mE\u001b[0m\u001b[32m)\u001b[0m\u001b[32m is a condition known to cause gumming in citrus trees, which aligns with the observed symptoms. Therefore, without direct evidence of external damage \u001b[0m\u001b[32m(\u001b[0m\u001b[32mharvesting\u001b[0m\u001b[32m)\u001b[0m\u001b[32m or confirmation of physiological stress being the primary cause, the most appropriate answer based on the information given is:\\n\\nAnswer: E'\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'Answer: D'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'Answer: D'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'**Causes of Splitting Petioles in Rhubarb**\\n\\nThe following factors can cause the petioles of rhubarb to split:\\n\\n* **Physiological Problems**: Issues such as water stress, nutrient deficiencies, or extreme temperatures can lead to splitting.\\n* **Phytoplasma Infection**: A bacterial infection caused by phytoplasma can lead to splitting of the petioles.\\n* **Animal Damage**: Pests like slugs, snails, or rodents can damage the plant and cause splitting.\\n* **Bacterial Infection**: Bacterial infections can also cause splitting.\\n\\nAs a result, the correct answer is:\\n\\n*Answer*: A\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Physiological problems'\u001b[0m\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"**Analysis of the Image**\\n\\nThe image provided shows a rhubarb plant with split petioles. To determine the cause of this issue, we need to consider various factors that could lead to such damage.\\n\\n**Possible Causes of Petiole Splitting**\\n\\n* **Physiological Problems**: Rhubarb plants can experience physiological stress due to environmental factors like extreme temperatures, waterlogging, or nutrient deficiencies. This stress can cause the petioles to split.\\n* **Phytoplasma Infection**: Phytoplasma is a type of bacteria that can infect plants, including rhubarb. It can cause symptoms such as yellowing leaves, stunted growth, and splitting of petioles.\\n* **Animal Damage**: Animals like rabbits, deer, or insects can damage rhubarb plants by eating the leaves or stems, which can lead to splitting of the petioles.\\n* **Bacteria**: Bacterial infections can also cause damage to rhubarb plants, including splitting of the petioles.\\n\\n**Conclusion**\\n\\nBased on the analysis, it is clear that all the options listed \u001b[0m\u001b[32m(\u001b[0m\u001b[32mA\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Physiological problems, B\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Phytoplasma infection, D\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Animal damage, and E\u001b[0m\u001b[32m)\u001b[0m\u001b[32m Bacteria\u001b[0m\u001b[32m)\u001b[0m\u001b[32m could potentially cause the petioles of the rhubarb plant to split. Therefore, there is no single option that would not be a cause for the petioles splitting.\\n\\n**Answer**: C\u001b[0m\u001b[32m)\u001b[0m\u001b[32m I don't know and don't want to guess.\"\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
"\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n",
|
"\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ \u001b[0m\u001b[33mscores\u001b[0m=\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ \u001b[0m\u001b[33mscores\u001b[0m=\u001b[1m{\u001b[0m\n",
|
||||||
|
@ -773,16 +963,19 @@
|
||||||
"\n",
|
"\n",
|
||||||
"client.benchmarks.register(\n",
|
"client.benchmarks.register(\n",
|
||||||
" benchmark_id=\"meta-reference::mmmu\",\n",
|
" benchmark_id=\"meta-reference::mmmu\",\n",
|
||||||
|
" # Note: we can use any value as `dataset_id` because we'll be using the `evaluate_rows` API which accepts the \n",
|
||||||
|
" # `input_rows` argument and does not fetch data from the dataset.\n",
|
||||||
" dataset_id=f\"mmmu-{subset}-{split}\",\n",
|
" dataset_id=f\"mmmu-{subset}-{split}\",\n",
|
||||||
" scoring_functions=[\"basic::regex_parser_multiple_choice_answer\"],\n",
|
" # Note: for the same reason as above, we can use any value as `scoring_functions`.\n",
|
||||||
|
" scoring_functions=[],\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"response = client.eval.evaluate_rows_alpha(\n",
|
"response = client.eval.evaluate_rows(\n",
|
||||||
" benchmark_id=\"meta-reference::mmmu\",\n",
|
" benchmark_id=\"meta-reference::mmmu\",\n",
|
||||||
" input_rows=eval_rows,\n",
|
" input_rows=eval_rows,\n",
|
||||||
|
" # Note: Here we define the actual scoring functions.\n",
|
||||||
" scoring_functions=[\"basic::regex_parser_multiple_choice_answer\"],\n",
|
" scoring_functions=[\"basic::regex_parser_multiple_choice_answer\"],\n",
|
||||||
" benchmark_config={\n",
|
" benchmark_config={\n",
|
||||||
" \"type\": \"benchmark\",\n",
|
|
||||||
" \"eval_candidate\": {\n",
|
" \"eval_candidate\": {\n",
|
||||||
" \"type\": \"model\",\n",
|
" \"type\": \"model\",\n",
|
||||||
" \"model\": \"meta-llama/Llama-3.2-90B-Vision-Instruct\",\n",
|
" \"model\": \"meta-llama/Llama-3.2-90B-Vision-Instruct\",\n",
|
||||||
|
@ -815,7 +1008,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": 5,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "HXmZf3Ymw-aX"
|
"id": "HXmZf3Ymw-aX"
|
||||||
},
|
},
|
||||||
|
@ -823,39 +1016,33 @@
|
||||||
"source": [
|
"source": [
|
||||||
"simpleqa_dataset_id = \"huggingface::simpleqa\"\n",
|
"simpleqa_dataset_id = \"huggingface::simpleqa\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"_ = client.datasets.register(\n",
|
"register_dataset_response = client.datasets.register(\n",
|
||||||
|
" purpose=\"eval/messages-answer\",\n",
|
||||||
|
" source={\n",
|
||||||
|
" \"type\": \"uri\",\n",
|
||||||
|
" \"uri\": \"huggingface://datasets/llamastack/simpleqa?split=train\",\n",
|
||||||
|
" },\n",
|
||||||
" dataset_id=simpleqa_dataset_id,\n",
|
" dataset_id=simpleqa_dataset_id,\n",
|
||||||
" provider_id=\"huggingface\",\n",
|
")"
|
||||||
" url={\"uri\": \"https://huggingface.co/datasets/llamastack/simpleqa\"},\n",
|
|
||||||
" metadata={\n",
|
|
||||||
" \"path\": \"llamastack/simpleqa\",\n",
|
|
||||||
" \"split\": \"train\",\n",
|
|
||||||
" },\n",
|
|
||||||
" dataset_schema={\n",
|
|
||||||
" \"input_query\": {\"type\": \"string\"},\n",
|
|
||||||
" \"expected_answer\": {\"type\": \"string\"},\n",
|
|
||||||
" \"chat_completion_input\": {\"type\": \"chat_completion_input\"},\n",
|
|
||||||
" },\n",
|
|
||||||
")\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 6,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"id": "Gc8azb4Rxr5J"
|
"id": "Gc8azb4Rxr5J"
|
||||||
},
|
},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"eval_rows = client.datasetio.get_rows_paginated(\n",
|
"eval_rows = client.datasets.iterrows(\n",
|
||||||
" dataset_id=simpleqa_dataset_id,\n",
|
" dataset_id=simpleqa_dataset_id,\n",
|
||||||
" rows_in_page=5,\n",
|
" limit=5,\n",
|
||||||
")\n"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 12,
|
"execution_count": 7,
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"colab": {
|
"colab": {
|
||||||
"base_uri": "https://localhost:8080/",
|
"base_uri": "https://localhost:8080/",
|
||||||
|
@ -876,7 +1063,7 @@
|
||||||
"name": "stderr",
|
"name": "stderr",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"100%|██████████| 5/5 [00:31<00:00, 6.38s/it]\n"
|
"100%|██████████| 5/5 [00:13<00:00, 2.71s/it]\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -889,14 +1076,14 @@
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"Radcliffe College was a women's liberal arts college in Cambridge, Massachusetts. However, it merged with Harvard University in 1977 and is now known as the Radcliffe Institute for Advanced Study at Harvard University.\"</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"Radcliffe College was a women's liberal arts college in Cambridge, Massachusetts. However, it merged with Harvard University in 1977 and is now known as the Radcliffe Institute for Advanced Study at Harvard University.\"</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'I do not have information on the Leipzig 1877 tournament.'</span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'I am unable to verify in whose honor the Leipzig 1877 tournament was organized.'</span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"I am unable to verify what Empress Elizabeth of Austria's favorite sculpture depicted at her villa Achilleion at Corfu, according to Karl Küchler.\"</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'generated_answer'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"I am unable to verify what Empress Elizabeth of Austria's favorite sculpture depicted at her villa Achilleion at Corfu, according to Karl Küchler.\"</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"font-weight: bold\">]</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"font-weight: bold\">]</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">scores</span>=<span style=\"font-weight: bold\">{</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">scores</span>=<span style=\"font-weight: bold\">{</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'llm-as-judge::405b-simpleqa'</span>: <span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">ScoringResult</span><span style=\"font-weight: bold\">(</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'llm-as-judge::405b-simpleqa'</span>: <span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">ScoringResult</span><span style=\"font-weight: bold\">(</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">aggregated_results</span>=<span style=\"font-weight: bold\">{}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">aggregated_results</span>=<span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'categorical_count'</span>: <span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'categorical_count'</span>: <span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'A'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">1</span>, <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">4</span><span style=\"font-weight: bold\">}}}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">score_rows</span>=<span style=\"font-weight: bold\">[</span>\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">score_rows</span>=<span style=\"font-weight: bold\">[</span>\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span>, <span style=\"color: #008000; text-decoration-color: #008000\">'judge_feedback'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span>, <span style=\"color: #008000; text-decoration-color: #008000\">'judge_feedback'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span>, <span style=\"color: #008000; text-decoration-color: #008000\">'judge_feedback'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span><span style=\"font-weight: bold\">}</span>,\n",
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span>, <span style=\"color: #008000; text-decoration-color: #008000\">'judge_feedback'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'C'</span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
|
@ -917,14 +1104,14 @@
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"Radcliffe College was a women's liberal arts college in Cambridge, Massachusetts. However, it merged with Harvard University in 1977 and is now known as the Radcliffe Institute for Advanced Study at Harvard University.\"\u001b[0m\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"Radcliffe College was a women's liberal arts college in Cambridge, Massachusetts. However, it merged with Harvard University in 1977 and is now known as the Radcliffe Institute for Advanced Study at Harvard University.\"\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'I do not have information on the Leipzig 1877 tournament.'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m'I am unable to verify in whose honor the Leipzig 1877 tournament was organized.'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"I am unable to verify what Empress Elizabeth of Austria's favorite sculpture depicted at her villa Achilleion at Corfu, according to Karl Küchler.\"\u001b[0m\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[32m'generated_answer'\u001b[0m: \u001b[32m\"I am unable to verify what Empress Elizabeth of Austria's favorite sculpture depicted at her villa Achilleion at Corfu, according to Karl Küchler.\"\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
"\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n",
|
"\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ \u001b[0m\u001b[33mscores\u001b[0m=\u001b[1m{\u001b[0m\n",
|
"\u001b[2;32m│ \u001b[0m\u001b[33mscores\u001b[0m=\u001b[1m{\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ \u001b[0m\u001b[32m'llm-as-judge::405b-simpleqa'\u001b[0m: \u001b[1;35mScoringResult\u001b[0m\u001b[1m(\u001b[0m\n",
|
"\u001b[2;32m│ │ \u001b[0m\u001b[32m'llm-as-judge::405b-simpleqa'\u001b[0m: \u001b[1;35mScoringResult\u001b[0m\u001b[1m(\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[33maggregated_results\u001b[0m=\u001b[1m{\u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[33maggregated_results\u001b[0m=\u001b[1m{\u001b[0m\u001b[32m'categorical_count'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'categorical_count'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'A'\u001b[0m: \u001b[1;36m1\u001b[0m, \u001b[32m'C'\u001b[0m: \u001b[1;36m4\u001b[0m\u001b[1m}\u001b[0m\u001b[1m}\u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ │ \u001b[0m\u001b[33mscore_rows\u001b[0m=\u001b[1m[\u001b[0m\n",
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[33mscore_rows\u001b[0m=\u001b[1m[\u001b[0m\n",
|
||||||
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'score'\u001b[0m: \u001b[32m'C'\u001b[0m, \u001b[32m'judge_feedback'\u001b[0m: \u001b[32m'C'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'score'\u001b[0m: \u001b[32m'C'\u001b[0m, \u001b[32m'judge_feedback'\u001b[0m: \u001b[32m'C'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'score'\u001b[0m: \u001b[32m'C'\u001b[0m, \u001b[32m'judge_feedback'\u001b[0m: \u001b[32m'C'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\u001b[32m'score'\u001b[0m: \u001b[32m'C'\u001b[0m, \u001b[32m'judge_feedback'\u001b[0m: \u001b[32m'C'\u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
|
@ -955,12 +1142,11 @@
|
||||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"response = client.eval.evaluate_rows_alpha(\n",
|
"response = client.eval.evaluate_rows(\n",
|
||||||
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
||||||
" input_rows=eval_rows.rows,\n",
|
" input_rows=eval_rows.data,\n",
|
||||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||||
" benchmark_config={\n",
|
" benchmark_config={\n",
|
||||||
" \"type\": \"benchmark\",\n",
|
|
||||||
" \"eval_candidate\": {\n",
|
" \"eval_candidate\": {\n",
|
||||||
" \"type\": \"model\",\n",
|
" \"type\": \"model\",\n",
|
||||||
" \"model\": \"meta-llama/Llama-3.2-90B-Vision-Instruct\",\n",
|
" \"model\": \"meta-llama/Llama-3.2-90B-Vision-Instruct\",\n",
|
||||||
|
@ -1104,12 +1290,11 @@
|
||||||
" \"enable_session_persistence\": False,\n",
|
" \"enable_session_persistence\": False,\n",
|
||||||
"}\n",
|
"}\n",
|
||||||
"\n",
|
"\n",
|
||||||
"response = client.eval.evaluate_rows_alpha(\n",
|
"response = client.eval.evaluate_rows(\n",
|
||||||
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
||||||
" input_rows=eval_rows.rows,\n",
|
" input_rows=eval_rows.data,\n",
|
||||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||||
" benchmark_config={\n",
|
" benchmark_config={\n",
|
||||||
" \"type\": \"benchmark\",\n",
|
|
||||||
" \"eval_candidate\": {\n",
|
" \"eval_candidate\": {\n",
|
||||||
" \"type\": \"agent\",\n",
|
" \"type\": \"agent\",\n",
|
||||||
" \"config\": agent_config,\n",
|
" \"config\": agent_config,\n",
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": 13,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
|
@ -34,10 +34,8 @@
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack_client import LlamaStackClient\n",
|
"from llama_stack_client import LlamaStackClient, Agent\n",
|
||||||
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
"from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n",
|
||||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
|
||||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
|
||||||
"from rich.pretty import pprint\n",
|
"from rich.pretty import pprint\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"import uuid\n",
|
"import uuid\n",
|
||||||
|
@ -70,7 +68,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 12,
|
"execution_count": 14,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
@ -1397,6 +1395,349 @@
|
||||||
"pprint(session_response.turns)"
|
"pprint(session_response.turns)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### 3.1 Improved RAG with Long Context\n",
|
||||||
|
"\n",
|
||||||
|
"- Instead of performing reteival tool, we send documents as attachments to the agent and let it use the entire document context. \n",
|
||||||
|
"- Note how that the model is able to understand the entire context from documentation and answers the question with better factuality with improved retrieval. "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 19,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">Question:</span> What precision formats does torchtune support?\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;36mQuestion:\u001b[0m What precision formats does torchtune support?\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">Agent Answer:</span> Torchtune supports two precision formats: `fp32` <span style=\"font-weight: bold\">(</span>full-precision<span style=\"font-weight: bold\">)</span> and `bfloat16` <span style=\"font-weight: bold\">(</span>half-precision<span style=\"font-weight: bold\">)</span>. \n",
|
||||||
|
"The `bfloat16` format uses <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">2</span> bytes per model parameter, which is half the memory of `fp32`, and also improves \n",
|
||||||
|
"training speed.\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;33mAgent Answer:\u001b[0m Torchtune supports two precision formats: `fp32` \u001b[1m(\u001b[0mfull-precision\u001b[1m)\u001b[0m and `bfloat16` \u001b[1m(\u001b[0mhalf-precision\u001b[1m)\u001b[0m. \n",
|
||||||
|
"The `bfloat16` format uses \u001b[1;36m2\u001b[0m bytes per model parameter, which is half the memory of `fp32`, and also improves \n",
|
||||||
|
"training speed.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">Question:</span> What does DoRA stand for in torchtune?\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;36mQuestion:\u001b[0m What does DoRA stand for in torchtune?\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">Agent Answer:</span> DoRA stands for Weight-Decomposed Low-Rank Adaptation. It is a variant of LoRA <span style=\"font-weight: bold\">(</span>Low-Rank Adaptation<span style=\"font-weight: bold\">)</span> \n",
|
||||||
|
"that further decomposes the pre-trained weights into two components: magnitude and direction. The magnitude \n",
|
||||||
|
"component is a scalar vector that adjusts the scale, while the direction component corresponds to the original LoRA\n",
|
||||||
|
"decomposition and updates the orientation of weights. DoRA adds a small overhead to LoRA training due to the \n",
|
||||||
|
"addition of the magnitude parameter, but it has been shown to improve the performance of LoRA, particularly at low \n",
|
||||||
|
"ranks.\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;33mAgent Answer:\u001b[0m DoRA stands for Weight-Decomposed Low-Rank Adaptation. It is a variant of LoRA \u001b[1m(\u001b[0mLow-Rank Adaptation\u001b[1m)\u001b[0m \n",
|
||||||
|
"that further decomposes the pre-trained weights into two components: magnitude and direction. The magnitude \n",
|
||||||
|
"component is a scalar vector that adjusts the scale, while the direction component corresponds to the original LoRA\n",
|
||||||
|
"decomposition and updates the orientation of weights. DoRA adds a small overhead to LoRA training due to the \n",
|
||||||
|
"addition of the magnitude parameter, but it has been shown to improve the performance of LoRA, particularly at low \n",
|
||||||
|
"ranks.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">Question:</span> How does the CPUOffloadOptimizer reduce GPU memory usage?\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;36mQuestion:\u001b[0m How does the CPUOffloadOptimizer reduce GPU memory usage?\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">Agent Answer:</span> The CPUOffloadOptimizer reduces GPU memory usage by offloading optimizer states and gradients to the \n",
|
||||||
|
"CPU, and performing optimizer steps on the CPU. This can significantly reduce GPU memory usage at the cost of CPU \n",
|
||||||
|
"RAM and training speed.\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;33mAgent Answer:\u001b[0m The CPUOffloadOptimizer reduces GPU memory usage by offloading optimizer states and gradients to the \n",
|
||||||
|
"CPU, and performing optimizer steps on the CPU. This can significantly reduce GPU memory usage at the cost of CPU \n",
|
||||||
|
"RAM and training speed.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">Question:</span> How do I ensure only LoRA parameters are trainable when fine-tuning?\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;36mQuestion:\u001b[0m How do I ensure only LoRA parameters are trainable when fine-tuning?\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #808000; text-decoration-color: #808000; font-weight: bold\">Agent Answer:</span> To ensure only LoRA parameters are trainable when fine-tuning, you can use the `set_trainable_params`\n",
|
||||||
|
"function from `torchtune.modules.peft.peft_utils` to set the `requires_grad` attribute of the LoRA parameters to \n",
|
||||||
|
"`<span style=\"color: #00ff00; text-decoration-color: #00ff00; font-style: italic\">True</span>` and the `requires_grad` attribute of the other parameters to `<span style=\"color: #ff0000; text-decoration-color: #ff0000; font-style: italic\">False</span>`.\n",
|
||||||
|
"\n",
|
||||||
|
"Here is an example:\n",
|
||||||
|
"```python\n",
|
||||||
|
"from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\n",
|
||||||
|
"\n",
|
||||||
|
"# Get the LoRA parameters\n",
|
||||||
|
"lora_params = <span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">get_adapter_params</span><span style=\"font-weight: bold\">(</span>model<span style=\"font-weight: bold\">)</span>\n",
|
||||||
|
"\n",
|
||||||
|
"# Set the LoRA parameters to trainable and the other parameters to non-trainable\n",
|
||||||
|
"<span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">set_trainable_params</span><span style=\"font-weight: bold\">(</span>model, lora_params<span style=\"font-weight: bold\">)</span>\n",
|
||||||
|
"```\n",
|
||||||
|
"This will ensure that only the LoRA parameters are updated during fine-tuning, while the other parameters remain \n",
|
||||||
|
"frozen.\n",
|
||||||
|
"\n",
|
||||||
|
"Alternatively, you can also use the `lora_finetune` recipe in torchtune, which automatically sets the LoRA \n",
|
||||||
|
"parameters to trainable and the other parameters to non-trainable. You can run the recipe using the following \n",
|
||||||
|
"command:\n",
|
||||||
|
"```bash\n",
|
||||||
|
"tune run lora_finetune --config llama2/7B_lora\n",
|
||||||
|
"```\n",
|
||||||
|
"This will fine-tune the LoRA parameters of the Llama2 model using the default settings. You can modify the config \n",
|
||||||
|
"file to change the hyperparameters or the model architecture.\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;33mAgent Answer:\u001b[0m To ensure only LoRA parameters are trainable when fine-tuning, you can use the `set_trainable_params`\n",
|
||||||
|
"function from `torchtune.modules.peft.peft_utils` to set the `requires_grad` attribute of the LoRA parameters to \n",
|
||||||
|
"`\u001b[3;92mTrue\u001b[0m` and the `requires_grad` attribute of the other parameters to `\u001b[3;91mFalse\u001b[0m`.\n",
|
||||||
|
"\n",
|
||||||
|
"Here is an example:\n",
|
||||||
|
"```python\n",
|
||||||
|
"from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\n",
|
||||||
|
"\n",
|
||||||
|
"# Get the LoRA parameters\n",
|
||||||
|
"lora_params = \u001b[1;35mget_adapter_params\u001b[0m\u001b[1m(\u001b[0mmodel\u001b[1m)\u001b[0m\n",
|
||||||
|
"\n",
|
||||||
|
"# Set the LoRA parameters to trainable and the other parameters to non-trainable\n",
|
||||||
|
"\u001b[1;35mset_trainable_params\u001b[0m\u001b[1m(\u001b[0mmodel, lora_params\u001b[1m)\u001b[0m\n",
|
||||||
|
"```\n",
|
||||||
|
"This will ensure that only the LoRA parameters are updated during fine-tuning, while the other parameters remain \n",
|
||||||
|
"frozen.\n",
|
||||||
|
"\n",
|
||||||
|
"Alternatively, you can also use the `lora_finetune` recipe in torchtune, which automatically sets the LoRA \n",
|
||||||
|
"parameters to trainable and the other parameters to non-trainable. You can run the recipe using the following \n",
|
||||||
|
"command:\n",
|
||||||
|
"```bash\n",
|
||||||
|
"tune run lora_finetune --config llama2/7B_lora\n",
|
||||||
|
"```\n",
|
||||||
|
"This will fine-tune the LoRA parameters of the Llama2 model using the default settings. You can modify the config \n",
|
||||||
|
"file to change the hyperparameters or the model architecture.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"urls = [\n",
|
||||||
|
" \"memory_optimizations.rst\",\n",
|
||||||
|
" \"chat.rst\",\n",
|
||||||
|
" \"llama3.rst\",\n",
|
||||||
|
" \"datasets.rst\",\n",
|
||||||
|
" \"qat_finetune.rst\",\n",
|
||||||
|
" \"lora_finetune.rst\",\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"attachments = [\n",
|
||||||
|
" {\n",
|
||||||
|
" \"content\": {\n",
|
||||||
|
" \"uri\": f\"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}\",\n",
|
||||||
|
" },\n",
|
||||||
|
" \"mime_type\": \"text/plain\",\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" for i, url in enumerate(urls)\n",
|
||||||
|
"]\n",
|
||||||
|
"\n",
|
||||||
|
"rag_attachment_agent = Agent(\n",
|
||||||
|
" client,\n",
|
||||||
|
" model=MODEL_ID,\n",
|
||||||
|
" instructions=\"You are a helpful assistant that can answer questions about the Torchtune project. Use context from attached documentation for Torchtune to answer questions.\",\n",
|
||||||
|
")\n",
|
||||||
|
"\n",
|
||||||
|
"for example in examples:\n",
|
||||||
|
" session_id = rag_attachment_agent.create_session(session_name=f\"rag_attachment_session_{uuid.uuid4()}\")\n",
|
||||||
|
" response = rag_attachment_agent.create_turn(\n",
|
||||||
|
" messages=[\n",
|
||||||
|
" {\n",
|
||||||
|
" \"role\": \"user\",\n",
|
||||||
|
" \"content\": example[\"input_query\"]\n",
|
||||||
|
" }\n",
|
||||||
|
" ],\n",
|
||||||
|
" session_id=session_id,\n",
|
||||||
|
" documents=attachments,\n",
|
||||||
|
" stream=False\n",
|
||||||
|
" )\n",
|
||||||
|
" rich.print(f\"[bold cyan]Question:[/bold cyan] {example['input_query']}\")\n",
|
||||||
|
" rich.print(f\"[bold yellow]Agent Answer:[/bold yellow] {response.output_message.content}\")\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"><span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">ScoringScoreResponse</span><span style=\"font-weight: bold\">(</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"color: #808000; text-decoration-color: #808000\">results</span>=<span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'braintrust::factuality'</span>: <span style=\"color: #800080; text-decoration-color: #800080; font-weight: bold\">ScoringResult</span><span style=\"font-weight: bold\">(</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">aggregated_results</span>=<span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'average'</span>: <span style=\"font-weight: bold\">{</span><span style=\"color: #008000; text-decoration-color: #008000\">'average'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.6</span><span style=\"font-weight: bold\">}}</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"color: #808000; text-decoration-color: #808000\">score_rows</span>=<span style=\"font-weight: bold\">[</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.6</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'metadata'</span>: <span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'choice'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'B'</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'rationale'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'1. Both the expert and the submitted answers mention that Torchtune supports two precision formats: `fp32` (full-precision) and `bfloat16` (half-precision).\\n2. The expert answer specifies that `fp32` uses 4 bytes per model and optimizer parameter, while `bfloat16` uses 2 bytes per model and optimizer parameter.\\n3. The submitted answer also mentions that `bfloat16` uses 2 bytes per model parameter, which is consistent with the expert answer.\\n4. The submitted answer adds that `bfloat16` improves training speed, which is additional information not present in the expert answer.\\n5. There is no conflict between the submitted answer and the expert answer; the submitted answer simply provides more information.\\n\\nBased on this analysis, the submitted answer is a superset of the expert answer and is fully consistent with it.'</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.6</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'metadata'</span>: <span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'choice'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'B'</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'rationale'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'1. The expert answer provides the definition of DoRA as \"Weight-Decomposed Low-Rank Adaptation.\"\\n2. The submitted answer also states that DoRA stands for \"Weight-Decomposed Low-Rank Adaptation,\" which matches the expert answer.\\n3. The submitted answer includes additional information about DoRA, explaining that it is a variant of LoRA and describing how it decomposes pre-trained weights into magnitude and direction components.\\n4. The submitted answer further explains the role of the magnitude component and the direction component, and mentions the performance improvement and overhead associated with DoRA.\\n5. The additional details in the submitted answer do not contradict the expert answer; instead, they expand upon it.\\n6. Therefore, the submitted answer is a superset of the expert answer and is fully consistent with it.'</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.6</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'metadata'</span>: <span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'choice'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'B'</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'rationale'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'1. The expert answer states that the CPUOffloadOptimizer reduces GPU memory usage by keeping optimizer states on CPU and performing optimizer steps on CPU. It also mentions the optional offloading of gradients to CPU with the parameter offload_gradients=True.\\n\\n2. The submitted answer states that the CPUOffloadOptimizer reduces GPU memory usage by offloading optimizer states and gradients to the CPU, and performing optimizer steps on the CPU. It adds that this can significantly reduce GPU memory usage at the cost of CPU RAM and training speed.\\n\\n3. Comparing both answers:\\n - Both answers agree on offloading optimizer states to the CPU and performing optimizer steps on the CPU.\\n - Both mention the offloading of gradients to the CPU, but the expert answer specifies it as optional with a parameter, while the submission does not specify this detail.\\n - The submission adds additional information about the trade-off involving CPU RAM and training speed, which is not mentioned in the expert answer.\\n\\n4. The submitted answer includes all the details from the expert answer and adds more information about the trade-offs, making it a superset of the expert answer.\\n\\nTherefore, the correct choice is (B) The submitted answer is a superset of the expert answer and is fully consistent with it.'</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">}</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'score'</span>: <span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">0.6</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'metadata'</span>: <span style=\"font-weight: bold\">{</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'choice'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">'B'</span>,\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ │ </span><span style=\"color: #008000; text-decoration-color: #008000\">'rationale'</span>: <span style=\"color: #008000; text-decoration-color: #008000\">\"1. **Expert Answer Analysis**: The expert answer provides a method to ensure only LoRA parameters are trainable by using torchtune's utility functions. It mentions fetching LoRA parameters with `get_adapter_params(lora_model)` and setting them as trainable with `set_trainable_params(lora_model, lora_params)`. It also notes that the LoRA recipe handles this automatically.\\n\\n2. **Submitted Answer Analysis**: The submitted answer provides a similar method using `set_trainable_params` to set the `requires_grad` attribute of LoRA parameters to `True` and other parameters to `False`. It includes a code example demonstrating this process. Additionally, it mentions using the `lora_finetune` recipe in torchtune, which automatically sets the LoRA parameters to trainable.\\n\\n3. **Comparison**: The submitted answer includes all the details from the expert answer regarding the use of `get_adapter_params` and `set_trainable_params`. It also provides additional information about setting the `requires_grad` attribute and using the `lora_finetune` recipe, which is not mentioned in the expert answer.\\n\\n4. **Conclusion**: The submitted answer is a superset of the expert answer as it contains all the information from the expert answer and additional details. There is no conflict between the two answers, and the additional information in the submission is consistent with the expert's explanation.\\n\\nTherefore, the correct choice is (B) The submitted answer is a superset of the expert answer and is fully consistent with it.\"</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ │ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ │ </span><span style=\"font-weight: bold\">]</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ │ </span><span style=\"font-weight: bold\">)</span>\n",
|
||||||
|
"<span style=\"color: #7fbf7f; text-decoration-color: #7fbf7f\">│ </span><span style=\"font-weight: bold\">}</span>\n",
|
||||||
|
"<span style=\"font-weight: bold\">)</span>\n",
|
||||||
|
"</pre>\n"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"\u001b[1;35mScoringScoreResponse\u001b[0m\u001b[1m(\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ \u001b[0m\u001b[33mresults\u001b[0m=\u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ \u001b[0m\u001b[32m'braintrust::factuality'\u001b[0m: \u001b[1;35mScoringResult\u001b[0m\u001b[1m(\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[33maggregated_results\u001b[0m=\u001b[1m{\u001b[0m\u001b[32m'average'\u001b[0m: \u001b[1m{\u001b[0m\u001b[32m'average'\u001b[0m: \u001b[1;36m0.6\u001b[0m\u001b[1m}\u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[33mscore_rows\u001b[0m=\u001b[1m[\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'score'\u001b[0m: \u001b[1;36m0.6\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'metadata'\u001b[0m: \u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'choice'\u001b[0m: \u001b[32m'B'\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'rationale'\u001b[0m: \u001b[32m'1. Both the expert and the submitted answers mention that Torchtune supports two precision formats: `fp32` \u001b[0m\u001b[32m(\u001b[0m\u001b[32mfull-precision\u001b[0m\u001b[32m)\u001b[0m\u001b[32m and `bfloat16` \u001b[0m\u001b[32m(\u001b[0m\u001b[32mhalf-precision\u001b[0m\u001b[32m)\u001b[0m\u001b[32m.\\n2. The expert answer specifies that `fp32` uses 4 bytes per model and optimizer parameter, while `bfloat16` uses 2 bytes per model and optimizer parameter.\\n3. The submitted answer also mentions that `bfloat16` uses 2 bytes per model parameter, which is consistent with the expert answer.\\n4. The submitted answer adds that `bfloat16` improves training speed, which is additional information not present in the expert answer.\\n5. There is no conflict between the submitted answer and the expert answer; the submitted answer simply provides more information.\\n\\nBased on this analysis, the submitted answer is a superset of the expert answer and is fully consistent with it.'\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'score'\u001b[0m: \u001b[1;36m0.6\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'metadata'\u001b[0m: \u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'choice'\u001b[0m: \u001b[32m'B'\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'rationale'\u001b[0m: \u001b[32m'1. The expert answer provides the definition of DoRA as \"Weight-Decomposed Low-Rank Adaptation.\"\\n2. The submitted answer also states that DoRA stands for \"Weight-Decomposed Low-Rank Adaptation,\" which matches the expert answer.\\n3. The submitted answer includes additional information about DoRA, explaining that it is a variant of LoRA and describing how it decomposes pre-trained weights into magnitude and direction components.\\n4. The submitted answer further explains the role of the magnitude component and the direction component, and mentions the performance improvement and overhead associated with DoRA.\\n5. The additional details in the submitted answer do not contradict the expert answer; instead, they expand upon it.\\n6. Therefore, the submitted answer is a superset of the expert answer and is fully consistent with it.'\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'score'\u001b[0m: \u001b[1;36m0.6\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'metadata'\u001b[0m: \u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'choice'\u001b[0m: \u001b[32m'B'\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'rationale'\u001b[0m: \u001b[32m'1. The expert answer states that the CPUOffloadOptimizer reduces GPU memory usage by keeping optimizer states on CPU and performing optimizer steps on CPU. It also mentions the optional offloading of gradients to CPU with the parameter \u001b[0m\u001b[32moffload_gradients\u001b[0m\u001b[32m=\u001b[0m\u001b[32mTrue\u001b[0m\u001b[32m.\\n\\n2. The submitted answer states that the CPUOffloadOptimizer reduces GPU memory usage by offloading optimizer states and gradients to the CPU, and performing optimizer steps on the CPU. It adds that this can significantly reduce GPU memory usage at the cost of CPU RAM and training speed.\\n\\n3. Comparing both answers:\\n - Both answers agree on offloading optimizer states to the CPU and performing optimizer steps on the CPU.\\n - Both mention the offloading of gradients to the CPU, but the expert answer specifies it as optional with a parameter, while the submission does not specify this detail.\\n - The submission adds additional information about the trade-off involving CPU RAM and training speed, which is not mentioned in the expert answer.\\n\\n4. The submitted answer includes all the details from the expert answer and adds more information about the trade-offs, making it a superset of the expert answer.\\n\\nTherefore, the correct choice is \u001b[0m\u001b[32m(\u001b[0m\u001b[32mB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m The submitted answer is a superset of the expert answer and is fully consistent with it.'\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m}\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'score'\u001b[0m: \u001b[1;36m0.6\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[32m'metadata'\u001b[0m: \u001b[1m{\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'choice'\u001b[0m: \u001b[32m'B'\u001b[0m,\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ │ \u001b[0m\u001b[32m'rationale'\u001b[0m: \u001b[32m\"1. **Expert Answer Analysis**: The expert answer provides a method to ensure only LoRA parameters are trainable by using torchtune's utility functions. It mentions fetching LoRA parameters with `get_adapter_params\u001b[0m\u001b[32m(\u001b[0m\u001b[32mlora_model\u001b[0m\u001b[32m)\u001b[0m\u001b[32m` and setting them as trainable with `set_trainable_params\u001b[0m\u001b[32m(\u001b[0m\u001b[32mlora_model, lora_params\u001b[0m\u001b[32m)\u001b[0m\u001b[32m`. It also notes that the LoRA recipe handles this automatically.\\n\\n2. **Submitted Answer Analysis**: The submitted answer provides a similar method using `set_trainable_params` to set the `requires_grad` attribute of LoRA parameters to `True` and other parameters to `False`. It includes a code example demonstrating this process. Additionally, it mentions using the `lora_finetune` recipe in torchtune, which automatically sets the LoRA parameters to trainable.\\n\\n3. **Comparison**: The submitted answer includes all the details from the expert answer regarding the use of `get_adapter_params` and `set_trainable_params`. It also provides additional information about setting the `requires_grad` attribute and using the `lora_finetune` recipe, which is not mentioned in the expert answer.\\n\\n4. **Conclusion**: The submitted answer is a superset of the expert answer as it contains all the information from the expert answer and additional details. There is no conflict between the two answers, and the additional information in the submission is consistent with the expert's explanation.\\n\\nTherefore, the correct choice is \u001b[0m\u001b[32m(\u001b[0m\u001b[32mB\u001b[0m\u001b[32m)\u001b[0m\u001b[32m The submitted answer is a superset of the expert answer and is fully consistent with it.\"\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ │ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ │ \u001b[0m\u001b[1m]\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ │ \u001b[0m\u001b[1m)\u001b[0m\n",
|
||||||
|
"\u001b[2;32m│ \u001b[0m\u001b[1m}\u001b[0m\n",
|
||||||
|
"\u001b[1m)\u001b[0m\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"eval_rows = []\n",
|
||||||
|
"for i, session_id in enumerate(rag_attachment_agent.sessions):\n",
|
||||||
|
" session_response = client.agents.session.retrieve(agent_id=rag_attachment_agent.agent_id, session_id=session_id)\n",
|
||||||
|
" for turn in session_response.turns:\n",
|
||||||
|
" eval_rows.append({\n",
|
||||||
|
" \"input_query\": examples[i][\"input_query\"],\n",
|
||||||
|
" \"expected_answer\": examples[i][\"expected_answer\"],\n",
|
||||||
|
" \"generated_answer\": turn.output_message.content,\n",
|
||||||
|
" })\n",
|
||||||
|
"\n",
|
||||||
|
"scoring_params = {\n",
|
||||||
|
" \"braintrust::factuality\": None,\n",
|
||||||
|
"}\n",
|
||||||
|
"scoring_response = client.scoring.score(\n",
|
||||||
|
" input_rows=eval_rows,\n",
|
||||||
|
" scoring_functions=scoring_params,\n",
|
||||||
|
")\n",
|
||||||
|
"pprint(scoring_response)"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
|
|
|
@ -1,9 +1 @@
|
||||||
The RFC Specification (OpenAPI format) is generated from the set of API endpoints located in `llama_stack/distribution/server/endpoints.py` using the `generate.py` utility.
|
The RFC Specification (OpenAPI format) is generated from the set of API endpoints located in `llama_stack/distribution/server/endpoints.py` using the `generate.py` utility.
|
||||||
|
|
||||||
Please install the following packages before running the script:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip install fire PyYAML
|
|
||||||
```
|
|
||||||
|
|
||||||
Then simply run `sh run_openapi_generator.sh`
|
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import sys
|
||||||
import fire
|
import fire
|
||||||
import ruamel.yaml as yaml
|
import ruamel.yaml as yaml
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ from llama_stack.distribution.stack import LlamaStack # noqa: E402
|
||||||
|
|
||||||
from .pyopenapi.options import Options # noqa: E402
|
from .pyopenapi.options import Options # noqa: E402
|
||||||
from .pyopenapi.specification import Info, Server # noqa: E402
|
from .pyopenapi.specification import Info, Server # noqa: E402
|
||||||
from .pyopenapi.utility import Specification # noqa: E402
|
from .pyopenapi.utility import Specification, validate_api # noqa: E402
|
||||||
|
|
||||||
|
|
||||||
def str_presenter(dumper, data):
|
def str_presenter(dumper, data):
|
||||||
|
@ -39,6 +39,13 @@ def main(output_dir: str):
|
||||||
if not output_dir.exists():
|
if not output_dir.exists():
|
||||||
raise ValueError(f"Directory {output_dir} does not exist")
|
raise ValueError(f"Directory {output_dir} does not exist")
|
||||||
|
|
||||||
|
# Validate API protocols before generating spec
|
||||||
|
return_type_errors = validate_api()
|
||||||
|
if return_type_errors:
|
||||||
|
print("\nAPI Method Return Type Validation Errors:\n")
|
||||||
|
for error in return_type_errors:
|
||||||
|
print(error)
|
||||||
|
sys.exit(1)
|
||||||
now = str(datetime.now())
|
now = str(datetime.now())
|
||||||
print(
|
print(
|
||||||
"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at " + now
|
"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at " + now
|
||||||
|
|
|
@ -457,9 +457,9 @@ class Generator:
|
||||||
"status": 400,
|
"status": 400,
|
||||||
"title": "Bad Request",
|
"title": "Bad Request",
|
||||||
"detail": "The request was invalid or malformed",
|
"detail": "The request was invalid or malformed",
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.responses["TooManyRequests429"] = Response(
|
self.responses["TooManyRequests429"] = Response(
|
||||||
|
@ -471,9 +471,9 @@ class Generator:
|
||||||
"status": 429,
|
"status": 429,
|
||||||
"title": "Too Many Requests",
|
"title": "Too Many Requests",
|
||||||
"detail": "You have exceeded the rate limit. Please try again later.",
|
"detail": "You have exceeded the rate limit. Please try again later.",
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.responses["InternalServerError500"] = Response(
|
self.responses["InternalServerError500"] = Response(
|
||||||
|
@ -485,9 +485,9 @@ class Generator:
|
||||||
"status": 500,
|
"status": 500,
|
||||||
"title": "Internal Server Error",
|
"title": "Internal Server Error",
|
||||||
"detail": "An unexpected error occurred. Our team has been notified.",
|
"detail": "An unexpected error occurred. Our team has been notified.",
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add a default error response for any unhandled error cases
|
# Add a default error response for any unhandled error cases
|
||||||
|
@ -500,9 +500,9 @@ class Generator:
|
||||||
"status": 0,
|
"status": 0,
|
||||||
"title": "Error",
|
"title": "Error",
|
||||||
"detail": "An unexpected error occurred",
|
"detail": "An unexpected error occurred",
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def _build_type_tag(self, ref: str, schema: Schema) -> Tag:
|
def _build_type_tag(self, ref: str, schema: Schema) -> Tag:
|
||||||
|
@ -547,11 +547,14 @@ class Generator:
|
||||||
"SyntheticDataGeneration",
|
"SyntheticDataGeneration",
|
||||||
"PostTraining",
|
"PostTraining",
|
||||||
"BatchInference",
|
"BatchInference",
|
||||||
"Files",
|
|
||||||
]:
|
]:
|
||||||
op.defining_class.__name__ = f"{op.defining_class.__name__} (Coming Soon)"
|
op.defining_class.__name__ = f"{op.defining_class.__name__} (Coming Soon)"
|
||||||
print(op.defining_class.__name__)
|
print(op.defining_class.__name__)
|
||||||
|
|
||||||
|
# TODO (xiyan): temporary fix for datasetio inner impl + datasets api
|
||||||
|
# if op.defining_class.__name__ in ["DatasetIO"]:
|
||||||
|
# op.defining_class.__name__ = "Datasets"
|
||||||
|
|
||||||
doc_string = parse_type(op.func_ref)
|
doc_string = parse_type(op.func_ref)
|
||||||
doc_params = dict(
|
doc_params = dict(
|
||||||
(param.name, param.description) for param in doc_string.params.values()
|
(param.name, param.description) for param in doc_string.params.values()
|
||||||
|
@ -598,7 +601,9 @@ class Generator:
|
||||||
|
|
||||||
# data passed in request body as raw bytes cannot have request parameters
|
# data passed in request body as raw bytes cannot have request parameters
|
||||||
if raw_bytes_request_body and op.request_params:
|
if raw_bytes_request_body and op.request_params:
|
||||||
raise ValueError("Cannot have both raw bytes request body and request parameters")
|
raise ValueError(
|
||||||
|
"Cannot have both raw bytes request body and request parameters"
|
||||||
|
)
|
||||||
|
|
||||||
# data passed in request body as raw bytes
|
# data passed in request body as raw bytes
|
||||||
if raw_bytes_request_body:
|
if raw_bytes_request_body:
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||||
<title>OpenAPI specification</title>
|
<title>OpenAPI specification</title>
|
||||||
<link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
|
<link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
|
||||||
<script type="module" src="https://unpkg.com/@stoplight/elements/web-components.min.js"></script>
|
<script type="module" src="https://cdn.jsdelivr.net/npm/@stoplight/elements/web-components.min.js"></script>
|
||||||
<link rel="stylesheet" href="https://unpkg.com/@stoplight/elements/styles.min.css">
|
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@stoplight/elements/styles.min.css">
|
||||||
<style>
|
<style>
|
||||||
body {
|
body {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
|
|
|
@ -6,16 +6,18 @@
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import typing
|
import typing
|
||||||
|
import inspect
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import TextIO
|
from typing import TextIO
|
||||||
|
from typing import Any, List, Optional, Union, get_type_hints, get_origin, get_args
|
||||||
|
|
||||||
from llama_stack.strong_typing.schema import object_to_json, StrictJsonType
|
from llama_stack.strong_typing.schema import object_to_json, StrictJsonType
|
||||||
|
from llama_stack.distribution.resolver import api_protocol_map
|
||||||
|
|
||||||
from .generator import Generator
|
from .generator import Generator
|
||||||
from .options import Options
|
from .options import Options
|
||||||
from .specification import Document
|
from .specification import Document
|
||||||
|
|
||||||
|
|
||||||
THIS_DIR = Path(__file__).parent
|
THIS_DIR = Path(__file__).parent
|
||||||
|
|
||||||
|
|
||||||
|
@ -114,3 +116,85 @@ class Specification:
|
||||||
)
|
)
|
||||||
|
|
||||||
f.write(html)
|
f.write(html)
|
||||||
|
|
||||||
|
def is_optional_type(type_: Any) -> bool:
|
||||||
|
"""Check if a type is Optional."""
|
||||||
|
origin = get_origin(type_)
|
||||||
|
args = get_args(type_)
|
||||||
|
return origin is Optional or (origin is Union and type(None) in args)
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_api_method_return_type(method) -> str | None:
|
||||||
|
hints = get_type_hints(method)
|
||||||
|
|
||||||
|
if 'return' not in hints:
|
||||||
|
return "has no return type annotation"
|
||||||
|
|
||||||
|
return_type = hints['return']
|
||||||
|
if is_optional_type(return_type):
|
||||||
|
return "returns Optional type"
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_api_delete_method_returns_none(method) -> str | None:
|
||||||
|
hints = get_type_hints(method)
|
||||||
|
|
||||||
|
if 'return' not in hints:
|
||||||
|
return "has no return type annotation"
|
||||||
|
|
||||||
|
return_type = hints['return']
|
||||||
|
if return_type is not None and return_type is not type(None):
|
||||||
|
return "does not return None"
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_list_parameters_contain_data(method) -> str | None:
|
||||||
|
hints = get_type_hints(method)
|
||||||
|
|
||||||
|
if 'return' not in hints:
|
||||||
|
return "has no return type annotation"
|
||||||
|
|
||||||
|
return_type = hints['return']
|
||||||
|
if not inspect.isclass(return_type):
|
||||||
|
return
|
||||||
|
|
||||||
|
if not return_type.__name__.startswith('List'):
|
||||||
|
return
|
||||||
|
|
||||||
|
if 'data' not in return_type.model_fields:
|
||||||
|
return "does not have data attribute"
|
||||||
|
|
||||||
|
|
||||||
|
_VALIDATORS = {
|
||||||
|
"GET": [
|
||||||
|
_validate_api_method_return_type,
|
||||||
|
_validate_list_parameters_contain_data,
|
||||||
|
],
|
||||||
|
"DELETE": [
|
||||||
|
_validate_api_delete_method_returns_none,
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_methods_by_type(protocol, method_type: str):
|
||||||
|
members = inspect.getmembers(protocol, predicate=inspect.isfunction)
|
||||||
|
return {
|
||||||
|
method_name: method
|
||||||
|
for method_name, method in members
|
||||||
|
if (webmethod := getattr(method, '__webmethod__', None))
|
||||||
|
if webmethod and webmethod.method == method_type
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def validate_api() -> List[str]:
|
||||||
|
"""Validate the API protocols."""
|
||||||
|
errors = []
|
||||||
|
protocols = api_protocol_map()
|
||||||
|
|
||||||
|
for target, validators in _VALIDATORS.items():
|
||||||
|
for protocol_name, protocol in protocols.items():
|
||||||
|
for validator in validators:
|
||||||
|
for method_name, method in _get_methods_by_type(protocol, target).items():
|
||||||
|
err = validator(method)
|
||||||
|
if err:
|
||||||
|
errors.append(f"Method {protocol_name}.{method_name} {err}")
|
||||||
|
|
||||||
|
return errors
|
||||||
|
|
|
@ -14,7 +14,7 @@ Agents are configured using the `AgentConfig` class, which includes:
|
||||||
- **Safety Shields**: Guardrails to ensure responsible AI behavior
|
- **Safety Shields**: Guardrails to ensure responsible AI behavior
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack_client.lib.agents.agent import Agent
|
from llama_stack_client import Agent
|
||||||
|
|
||||||
|
|
||||||
# Create the agent
|
# Create the agent
|
||||||
|
@ -44,14 +44,14 @@ Each interaction with an agent is called a "turn" and consists of:
|
||||||
- **Output Message**: The agent's response
|
- **Output Message**: The agent's response
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
from llama_stack_client import AgentEventLogger
|
||||||
|
|
||||||
# Create a turn with streaming response
|
# Create a turn with streaming response
|
||||||
turn_response = agent.create_turn(
|
turn_response = agent.create_turn(
|
||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
messages=[{"role": "user", "content": "Tell me about Llama models"}],
|
messages=[{"role": "user", "content": "Tell me about Llama models"}],
|
||||||
)
|
)
|
||||||
for log in EventLogger().log(turn_response):
|
for log in AgentEventLogger().log(turn_response):
|
||||||
log.print()
|
log.print()
|
||||||
```
|
```
|
||||||
### Non-Streaming
|
### Non-Streaming
|
||||||
|
|
|
@ -67,9 +67,7 @@ sequenceDiagram
|
||||||
Each step in this process can be monitored and controlled through configurations. Here's an example that demonstrates monitoring the agent's execution:
|
Each step in this process can be monitored and controlled through configurations. Here's an example that demonstrates monitoring the agent's execution:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack_client import LlamaStackClient
|
from llama_stack_client import LlamaStackClient, Agent, AgentEventLogger
|
||||||
from llama_stack_client.lib.agents.agent import Agent
|
|
||||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
|
||||||
from rich.pretty import pprint
|
from rich.pretty import pprint
|
||||||
|
|
||||||
# Replace host and port
|
# Replace host and port
|
||||||
|
@ -113,7 +111,7 @@ response = agent.create_turn(
|
||||||
)
|
)
|
||||||
|
|
||||||
# Monitor each step of execution
|
# Monitor each step of execution
|
||||||
for log in EventLogger().log(response):
|
for log in AgentEventLogger().log(response):
|
||||||
log.print()
|
log.print()
|
||||||
|
|
||||||
# Using non-streaming API, the response contains input, steps, and output.
|
# Using non-streaming API, the response contains input, steps, and output.
|
||||||
|
|
|
@ -23,9 +23,7 @@ In this example, we will show you how to:
|
||||||
|
|
||||||
##### Building a Search Agent
|
##### Building a Search Agent
|
||||||
```python
|
```python
|
||||||
from llama_stack_client import LlamaStackClient
|
from llama_stack_client import LlamaStackClient, Agent, AgentEventLogger
|
||||||
from llama_stack_client.lib.agents.agent import Agent
|
|
||||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
|
||||||
|
|
||||||
client = LlamaStackClient(base_url=f"http://{HOST}:{PORT}")
|
client = LlamaStackClient(base_url=f"http://{HOST}:{PORT}")
|
||||||
|
|
||||||
|
@ -54,7 +52,7 @@ for prompt in user_prompts:
|
||||||
session_id=session_id,
|
session_id=session_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
for log in EventLogger().log(response):
|
for log in AgentEventLogger().log(response):
|
||||||
log.print()
|
log.print()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Building AI Applications
|
# Building AI Applications (Examples)
|
||||||
|
|
||||||
Llama Stack provides all the building blocks needed to create sophisticated AI applications.
|
Llama Stack provides all the building blocks needed to create sophisticated AI applications.
|
||||||
|
|
||||||
|
|
|
@ -55,11 +55,11 @@ chunks_response = client.vector_io.query(
|
||||||
A better way to ingest documents is to use the RAG Tool. This tool allows you to ingest documents from URLs, files, etc. and automatically chunks them into smaller pieces.
|
A better way to ingest documents is to use the RAG Tool. This tool allows you to ingest documents from URLs, files, etc. and automatically chunks them into smaller pieces.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack_client.types import Document
|
from llama_stack_client import RAGDocument
|
||||||
|
|
||||||
urls = ["memory_optimizations.rst", "chat.rst", "llama3.rst"]
|
urls = ["memory_optimizations.rst", "chat.rst", "llama3.rst"]
|
||||||
documents = [
|
documents = [
|
||||||
Document(
|
RAGDocument(
|
||||||
document_id=f"num-{i}",
|
document_id=f"num-{i}",
|
||||||
content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}",
|
content=f"https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/{url}",
|
||||||
mime_type="text/plain",
|
mime_type="text/plain",
|
||||||
|
@ -86,7 +86,7 @@ results = client.tool_runtime.rag_tool.query(
|
||||||
One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example:
|
One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack_client.lib.agents.agent import Agent
|
from llama_stack_client import Agent
|
||||||
|
|
||||||
# Create agent with memory
|
# Create agent with memory
|
||||||
agent = Agent(
|
agent = Agent(
|
||||||
|
@ -140,9 +140,9 @@ response = agent.create_turn(
|
||||||
|
|
||||||
You can print the response with below.
|
You can print the response with below.
|
||||||
```python
|
```python
|
||||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
from llama_stack_client import AgentEventLogger
|
||||||
|
|
||||||
for log in EventLogger().log(response):
|
for log in AgentEventLogger().log(response):
|
||||||
log.print()
|
log.print()
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -45,19 +45,21 @@ Here's an example that sends telemetry signals to all three sink types. Your con
|
||||||
- provider_id: meta-reference
|
- provider_id: meta-reference
|
||||||
provider_type: inline::meta-reference
|
provider_type: inline::meta-reference
|
||||||
config:
|
config:
|
||||||
sinks: ['console', 'sqlite', 'otel']
|
sinks: ['console', 'sqlite', 'otel_trace', 'otel_metric']
|
||||||
otel_endpoint: "http://localhost:4318/v1/traces"
|
otel_trace_endpoint: "http://localhost:4318/v1/traces"
|
||||||
|
otel_metric_endpoint: "http://localhost:4318/v1/metrics"
|
||||||
sqlite_db_path: "/path/to/telemetry.db"
|
sqlite_db_path: "/path/to/telemetry.db"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Jaeger to visualize traces
|
### Jaeger to visualize traces
|
||||||
|
|
||||||
The `otel` sink works with any service compatible with the OpenTelemetry collector. Let's use Jaeger to visualize this data.
|
The `otel` sink works with any service compatible with the OpenTelemetry collector, traces and metrics has two separate endpoints.
|
||||||
|
Let's use Jaeger to visualize this data.
|
||||||
|
|
||||||
Start a Jaeger instance with the OTLP HTTP endpoint at 4318 and the Jaeger UI at 16686 using the following command:
|
Start a Jaeger instance with the OTLP HTTP endpoint at 4318 and the Jaeger UI at 16686 using the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ docker run --rm --name jaeger \
|
$ docker run --pull always --rm --name jaeger \
|
||||||
-p 16686:16686 -p 4318:4318 \
|
-p 16686:16686 -p 4318:4318 \
|
||||||
jaegertracing/jaeger:2.1.0
|
jaegertracing/jaeger:2.1.0
|
||||||
```
|
```
|
||||||
|
|
|
@ -110,10 +110,18 @@ MCP tools are special tools that can interact with llama stack over model contex
|
||||||
|
|
||||||
Refer to [https://github.com/modelcontextprotocol/servers](https://github.com/modelcontextprotocol/servers) for available MCP servers.
|
Refer to [https://github.com/modelcontextprotocol/servers](https://github.com/modelcontextprotocol/servers) for available MCP servers.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# start your MCP server
|
||||||
|
mkdir /tmp/content
|
||||||
|
touch /tmp/content/foo
|
||||||
|
touch /tmp/content/bar
|
||||||
|
npx -y supergateway --port 8000 --stdio 'npx -y @modelcontextprotocol/server-filesystem /tmp/content'
|
||||||
|
```
|
||||||
|
|
||||||
|
Then register the MCP server as a tool group,
|
||||||
```python
|
```python
|
||||||
# Register MCP tools
|
|
||||||
client.toolgroups.register(
|
client.toolgroups.register(
|
||||||
toolgroup_id="builtin::filesystem",
|
toolgroup_id="mcp::filesystem",
|
||||||
provider_id="model-context-protocol",
|
provider_id="model-context-protocol",
|
||||||
mcp_endpoint=URL(uri="http://localhost:8000/sse"),
|
mcp_endpoint=URL(uri="http://localhost:8000/sse"),
|
||||||
)
|
)
|
||||||
|
@ -181,7 +189,7 @@ group_tools = client.tools.list_tools(toolgroup_id="search_tools")
|
||||||
## Simple Example: Using an Agent with the Code-Interpreter Tool
|
## Simple Example: Using an Agent with the Code-Interpreter Tool
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from llama_stack_client.lib.agents.agent import Agent
|
from llama_stack_client import Agent
|
||||||
|
|
||||||
# Instantiate the AI agent with the given configuration
|
# Instantiate the AI agent with the given configuration
|
||||||
agent = Agent(
|
agent = Agent(
|
||||||
|
|
|
@ -55,7 +55,7 @@ llama stack run llama_stack/templates/open-benchmark/run.yaml
|
||||||
There are 3 necessary inputs to run a benchmark eval
|
There are 3 necessary inputs to run a benchmark eval
|
||||||
- `list of benchmark_ids`: The list of benchmark ids to run evaluation on
|
- `list of benchmark_ids`: The list of benchmark ids to run evaluation on
|
||||||
- `model-id`: The model id to evaluate on
|
- `model-id`: The model id to evaluate on
|
||||||
- `utput_dir`: Path to store the evaluate results
|
- `output_dir`: Path to store the evaluate results
|
||||||
```
|
```
|
||||||
llama-stack-client eval run-benchmark <benchmark_id_1> <benchmark_id_2> ... \
|
llama-stack-client eval run-benchmark <benchmark_id_1> <benchmark_id_2> ... \
|
||||||
--model_id <model id to evaluate on> \
|
--model_id <model id to evaluate on> \
|
||||||
|
@ -69,7 +69,7 @@ llama-stack-client eval run-benchmark help
|
||||||
to see the description of all the flags that eval run-benchmark has
|
to see the description of all the flags that eval run-benchmark has
|
||||||
|
|
||||||
|
|
||||||
In the output log, you can find the file path that has your evaluation results. Open that file and you can see you aggrgate
|
In the output log, you can find the file path that has your evaluation results. Open that file and you can see you aggregate
|
||||||
evaluation results over there.
|
evaluation results over there.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -71,4 +71,4 @@ While there is a lot of flexibility to mix-and-match providers, often users will
|
||||||
**Locally Hosted Distro**: You may want to run Llama Stack on your own hardware. Typically though, you still need to use Inference via an external service. You can use providers like HuggingFace TGI, Fireworks, Together, etc. for this purpose. Or you may have access to GPUs and can run a [vLLM](https://github.com/vllm-project/vllm) or [NVIDIA NIM](https://build.nvidia.com/nim?filters=nimType%3Anim_type_run_anywhere&q=llama) instance. If you "just" have a regular desktop machine, you can use [Ollama](https://ollama.com/) for inference. To provide convenient quick access to these options, we provide a number of such pre-configured locally-hosted Distros.
|
**Locally Hosted Distro**: You may want to run Llama Stack on your own hardware. Typically though, you still need to use Inference via an external service. You can use providers like HuggingFace TGI, Fireworks, Together, etc. for this purpose. Or you may have access to GPUs and can run a [vLLM](https://github.com/vllm-project/vllm) or [NVIDIA NIM](https://build.nvidia.com/nim?filters=nimType%3Anim_type_run_anywhere&q=llama) instance. If you "just" have a regular desktop machine, you can use [Ollama](https://ollama.com/) for inference. To provide convenient quick access to these options, we provide a number of such pre-configured locally-hosted Distros.
|
||||||
|
|
||||||
|
|
||||||
**On-device Distro**: Finally, you may want to run Llama Stack directly on an edge device (mobile phone or a tablet.) We provide Distros for iOS and Android (coming soon.)
|
**On-device Distro**: To run Llama Stack directly on an edge device (mobile phone or a tablet), we provide Distros for [iOS](https://llama-stack.readthedocs.io/en/latest/distributions/ondevice_distro/ios_sdk.html) and [Android](https://llama-stack.readthedocs.io/en/latest/distributions/ondevice_distro/android_sdk.html)
|
||||||
|
|
|
@ -16,6 +16,7 @@ from docutils import nodes
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import requests
|
import requests
|
||||||
import json
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
# Read version from pyproject.toml
|
# Read version from pyproject.toml
|
||||||
with Path(__file__).parent.parent.parent.joinpath("pyproject.toml").open("rb") as f:
|
with Path(__file__).parent.parent.parent.joinpath("pyproject.toml").open("rb") as f:
|
||||||
|
@ -28,7 +29,7 @@ with Path(__file__).parent.parent.parent.joinpath("pyproject.toml").open("rb") a
|
||||||
llama_stack_version_link = f"<a href='{llama_stack_version_url}'>release notes</a>"
|
llama_stack_version_link = f"<a href='{llama_stack_version_url}'>release notes</a>"
|
||||||
|
|
||||||
project = "llama-stack"
|
project = "llama-stack"
|
||||||
copyright = "2025, Meta"
|
copyright = f"{datetime.now().year}, Meta"
|
||||||
author = "Meta"
|
author = "Meta"
|
||||||
|
|
||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
@ -37,6 +38,7 @@ author = "Meta"
|
||||||
extensions = [
|
extensions = [
|
||||||
"myst_parser",
|
"myst_parser",
|
||||||
"sphinx_rtd_theme",
|
"sphinx_rtd_theme",
|
||||||
|
"sphinx_rtd_dark_mode",
|
||||||
"sphinx_copybutton",
|
"sphinx_copybutton",
|
||||||
"sphinx_tabs.tabs",
|
"sphinx_tabs.tabs",
|
||||||
"sphinx_design",
|
"sphinx_design",
|
||||||
|
@ -103,6 +105,8 @@ source_suffix = {
|
||||||
# html_theme = "alabaster"
|
# html_theme = "alabaster"
|
||||||
html_theme_options = {
|
html_theme_options = {
|
||||||
"canonical_url": "https://github.com/meta-llama/llama-stack",
|
"canonical_url": "https://github.com/meta-llama/llama-stack",
|
||||||
|
'collapse_navigation': False,
|
||||||
|
|
||||||
# "style_nav_header_background": "#c3c9d4",
|
# "style_nav_header_background": "#c3c9d4",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ This guide will walk you through the process of adding a new API provider to Lla
|
||||||
- Begin by reviewing the [core concepts](../concepts/index.md) of Llama Stack and choose the API your provider belongs to (Inference, Safety, VectorIO, etc.)
|
- Begin by reviewing the [core concepts](../concepts/index.md) of Llama Stack and choose the API your provider belongs to (Inference, Safety, VectorIO, etc.)
|
||||||
- Determine the provider type ({repopath}`Remote::llama_stack/providers/remote` or {repopath}`Inline::llama_stack/providers/inline`). Remote providers make requests to external services, while inline providers execute implementation locally.
|
- Determine the provider type ({repopath}`Remote::llama_stack/providers/remote` or {repopath}`Inline::llama_stack/providers/inline`). Remote providers make requests to external services, while inline providers execute implementation locally.
|
||||||
- Add your provider to the appropriate {repopath}`Registry::llama_stack/providers/registry/`. Specify pip dependencies necessary.
|
- Add your provider to the appropriate {repopath}`Registry::llama_stack/providers/registry/`. Specify pip dependencies necessary.
|
||||||
- Update any distribution {repopath}`Templates::llama_stack/templates/` build.yaml and run.yaml files if they should include your provider by default. Run {repopath}`llama_stack/scripts/distro_codegen.py` if necessary. Note that `distro_codegen.py` will fail if the new provider causes any distribution template to attempt to import provider-specific dependencies. This usually means the distribution's `get_distribution_template()` code path should only import any necessary Config or model alias definitions from each provider and not the provider's actual implementation.
|
- Update any distribution {repopath}`Templates::llama_stack/templates/` build.yaml and run.yaml files if they should include your provider by default. Run {repopath}`./scripts/distro_codegen.py` if necessary. Note that `distro_codegen.py` will fail if the new provider causes any distribution template to attempt to import provider-specific dependencies. This usually means the distribution's `get_distribution_template()` code path should only import any necessary Config or model alias definitions from each provider and not the provider's actual implementation.
|
||||||
|
|
||||||
|
|
||||||
Here are some example PRs to help you get started:
|
Here are some example PRs to help you get started:
|
||||||
|
|
|
@ -33,6 +33,8 @@ Can be set to any of the following log levels:
|
||||||
|
|
||||||
The default global log level is `info`. `all` sets the log level for all components.
|
The default global log level is `info`. `all` sets the log level for all components.
|
||||||
|
|
||||||
|
A user can also set `LLAMA_STACK_LOG_FILE` which will pipe the logs to the specified path as well as to the terminal. An example would be: `export LLAMA_STACK_LOG_FILE=server.log`
|
||||||
|
|
||||||
### Llama Stack Build
|
### Llama Stack Build
|
||||||
|
|
||||||
In order to build your own distribution, we recommend you clone the `llama-stack` repository.
|
In order to build your own distribution, we recommend you clone the `llama-stack` repository.
|
||||||
|
@ -65,7 +67,7 @@ options:
|
||||||
Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config. (default:
|
Image Type to use for the build. This can be either conda or container or venv. If not specified, will use the image type from the template config. (default:
|
||||||
conda)
|
conda)
|
||||||
--image-name IMAGE_NAME
|
--image-name IMAGE_NAME
|
||||||
[for image-type=conda|venv] Name of the conda or virtual environment to use for the build. If not specified, currently active Conda environment will be used if
|
[for image-type=conda|container|venv] Name of the conda or virtual environment to use for the build. If not specified, currently active Conda environment will be used if
|
||||||
found. (default: None)
|
found. (default: None)
|
||||||
--print-deps-only Print the dependencies for the stack only, without building the stack (default: False)
|
--print-deps-only Print the dependencies for the stack only, without building the stack (default: False)
|
||||||
--run Run the stack after building using the same image type, name, and other applicable arguments (default: False)
|
--run Run the stack after building using the same image type, name, and other applicable arguments (default: False)
|
||||||
|
@ -183,8 +185,12 @@ llama stack build --config llama_stack/templates/ollama/build.yaml
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::{tab-item} Building Container
|
:::{tab-item} Building Container
|
||||||
> [!TIP]
|
|
||||||
> Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
|
```{admonition} Podman Alternative
|
||||||
|
:class: tip
|
||||||
|
|
||||||
|
Podman is supported as an alternative to Docker. Set `CONTAINER_BINARY` to `podman` in your environment to use Podman.
|
||||||
|
```
|
||||||
|
|
||||||
To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
|
To build a container image, you may start off from a template and use the `--image-type container` flag to specify `container` as the build image type.
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# Configuring a Stack
|
# Configuring a "Stack"
|
||||||
|
|
||||||
The Llama Stack runtime configuration is specified as a YAML file. Here is a simplified version of an example configuration file for the Ollama distribution:
|
The Llama Stack runtime configuration is specified as a YAML file. Here is a simplified version of an example configuration file for the Ollama distribution:
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
# Using Llama Stack as a Library
|
# Using Llama Stack as a Library
|
||||||
|
|
||||||
If you are planning to use an external service for Inference (even Ollama or TGI counts as external), it is often easier to use Llama Stack as a library. This avoids the overhead of setting up a server.
|
## Setup Llama Stack without a Server
|
||||||
|
If you are planning to use an external service for Inference (even Ollama or TGI counts as external), it is often easier to use Llama Stack as a library.
|
||||||
|
This avoids the overhead of setting up a server.
|
||||||
```bash
|
```bash
|
||||||
# setup
|
# setup
|
||||||
uv pip install llama-stack
|
uv pip install llama-stack
|
||||||
llama stack build --template together --image-type venv
|
llama stack build --template ollama --image-type venv
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
|
@ -1,34 +1,18 @@
|
||||||
# Starting a Llama Stack Server
|
# Distributions Overview
|
||||||
|
|
||||||
You can run a Llama Stack server in one of the following ways:
|
A distribution is a pre-packaged set of Llama Stack components that can be deployed together.
|
||||||
|
|
||||||
**As a Library**:
|
|
||||||
|
|
||||||
This is the simplest way to get started. Using Llama Stack as a library means you do not need to start a server. This is especially useful when you are not running inference locally and relying on an external inference service (eg. fireworks, together, groq, etc.) See [Using Llama Stack as a Library](importing_as_library)
|
|
||||||
|
|
||||||
|
|
||||||
**Container**:
|
|
||||||
|
|
||||||
Another simple way to start interacting with Llama Stack is to just spin up a container (via Docker or Podman) which is pre-built with all the providers you need. We provide a number of pre-built images so you can start a Llama Stack server instantly. You can also build your own custom container. Which distribution to choose depends on the hardware you have. See [Selection of a Distribution](selection) for more details.
|
|
||||||
|
|
||||||
|
|
||||||
**Conda**:
|
|
||||||
|
|
||||||
If you have a custom or an advanced setup or you are developing on Llama Stack you can also build a custom Llama Stack server. Using `llama stack build` and `llama stack run` you can build/run a custom Llama Stack server containing the exact combination of providers you wish. We have also provided various templates to make getting started easier. See [Building a Custom Distribution](building_distro) for more details.
|
|
||||||
|
|
||||||
|
|
||||||
**Kubernetes**:
|
|
||||||
|
|
||||||
If you have built a container image and want to deploy it in a Kubernetes cluster instead of starting the Llama Stack server locally. See [Kubernetes Deployment Guide](kubernetes_deployment) for more details.
|
|
||||||
|
|
||||||
|
This section provides an overview of the distributions available in Llama Stack.
|
||||||
|
|
||||||
```{toctree}
|
```{toctree}
|
||||||
:maxdepth: 1
|
:maxdepth: 3
|
||||||
:hidden:
|
|
||||||
|
|
||||||
importing_as_library
|
importing_as_library
|
||||||
building_distro
|
|
||||||
configuration
|
configuration
|
||||||
selection
|
list_of_distributions
|
||||||
kubernetes_deployment
|
kubernetes_deployment
|
||||||
|
building_distro
|
||||||
|
on_device_distro
|
||||||
|
remote_hosted_distro
|
||||||
|
self_hosted_distro
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
# Kubernetes Deployment Guide
|
# Kubernetes Deployment Guide
|
||||||
|
|
||||||
Instead of starting the Llama Stack and vLLM servers locally. We can deploy them in a Kubernetes cluster. In this guide, we'll use a local [Kind](https://kind.sigs.k8s.io/) cluster and a vLLM inference service in the same cluster for demonstration purposes.
|
Instead of starting the Llama Stack and vLLM servers locally. We can deploy them in a Kubernetes cluster.
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
In this guide, we'll use a local [Kind](https://kind.sigs.k8s.io/) cluster and a vLLM inference service in the same cluster for demonstration purposes.
|
||||||
|
|
||||||
First, create a local Kubernetes cluster via Kind:
|
First, create a local Kubernetes cluster via Kind:
|
||||||
|
|
||||||
|
@ -8,7 +11,7 @@ First, create a local Kubernetes cluster via Kind:
|
||||||
kind create cluster --image kindest/node:v1.32.0 --name llama-stack-test
|
kind create cluster --image kindest/node:v1.32.0 --name llama-stack-test
|
||||||
```
|
```
|
||||||
|
|
||||||
Start vLLM server as a Kubernetes Pod and Service:
|
First, create a Kubernetes PVC and Secret for downloading and storing Hugging Face model:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cat <<EOF |kubectl apply -f -
|
cat <<EOF |kubectl apply -f -
|
||||||
|
@ -31,7 +34,13 @@ metadata:
|
||||||
type: Opaque
|
type: Opaque
|
||||||
data:
|
data:
|
||||||
token: $(HF_TOKEN)
|
token: $(HF_TOKEN)
|
||||||
---
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Next, start the vLLM server as a Kubernetes Deployment and Service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat <<EOF |kubectl apply -f -
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -47,28 +56,23 @@ spec:
|
||||||
app.kubernetes.io/name: vllm
|
app.kubernetes.io/name: vllm
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: llama-stack
|
- name: vllm
|
||||||
image: $(VLLM_IMAGE)
|
image: vllm/vllm-openai:latest
|
||||||
command:
|
command: ["/bin/sh", "-c"]
|
||||||
- bash
|
args: [
|
||||||
- -c
|
"vllm serve meta-llama/Llama-3.2-1B-Instruct"
|
||||||
- |
|
]
|
||||||
MODEL="meta-llama/Llama-3.2-1B-Instruct"
|
env:
|
||||||
MODEL_PATH=/app/model/$(basename $MODEL)
|
- name: HUGGING_FACE_HUB_TOKEN
|
||||||
huggingface-cli login --token $HUGGING_FACE_HUB_TOKEN
|
valueFrom:
|
||||||
huggingface-cli download $MODEL --local-dir $MODEL_PATH --cache-dir $MODEL_PATH
|
secretKeyRef:
|
||||||
python3 -m vllm.entrypoints.openai.api_server --model $MODEL_PATH --served-model-name $MODEL --port 8000
|
name: hf-token-secret
|
||||||
|
key: token
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8000
|
- containerPort: 8000
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: llama-storage
|
- name: llama-storage
|
||||||
mountPath: /app/model
|
mountPath: /root/.cache/huggingface
|
||||||
env:
|
|
||||||
- name: HUGGING_FACE_HUB_TOKEN
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: hf-token-secret
|
|
||||||
key: token
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: llama-storage
|
- name: llama-storage
|
||||||
persistentVolumeClaim:
|
persistentVolumeClaim:
|
||||||
|
@ -127,6 +131,7 @@ EOF
|
||||||
podman build -f /tmp/test-vllm-llama-stack/Containerfile.llama-stack-run-k8s -t llama-stack-run-k8s /tmp/test-vllm-llama-stack
|
podman build -f /tmp/test-vllm-llama-stack/Containerfile.llama-stack-run-k8s -t llama-stack-run-k8s /tmp/test-vllm-llama-stack
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Deploying Llama Stack Server in Kubernetes
|
||||||
|
|
||||||
We can then start the Llama Stack server by deploying a Kubernetes Pod and Service:
|
We can then start the Llama Stack server by deploying a Kubernetes Pod and Service:
|
||||||
|
|
||||||
|
@ -187,6 +192,7 @@ spec:
|
||||||
EOF
|
EOF
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Verifying the Deployment
|
||||||
We can check that the LlamaStack server has started:
|
We can check that the LlamaStack server has started:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# List of Distributions
|
# Available List of Distributions
|
||||||
|
|
||||||
Here are a list of distributions you can use to start a Llama Stack server that are provided out of the box.
|
Here are a list of distributions you can use to start a Llama Stack server that are provided out of the box.
|
||||||
|
|
|
@ -8,12 +8,12 @@ Features:
|
||||||
- Remote Inferencing: Perform inferencing tasks remotely with Llama models hosted on a remote connection (or serverless localhost).
|
- Remote Inferencing: Perform inferencing tasks remotely with Llama models hosted on a remote connection (or serverless localhost).
|
||||||
- Simple Integration: With easy-to-use APIs, a developer can quickly integrate Llama Stack in their Android app. The difference with local vs remote inferencing is also minimal.
|
- Simple Integration: With easy-to-use APIs, a developer can quickly integrate Llama Stack in their Android app. The difference with local vs remote inferencing is also minimal.
|
||||||
|
|
||||||
Latest Release Notes: [v0.0.58](https://github.com/meta-llama/llama-stack-client-kotlin/releases/tag/v0.0.58)
|
Latest Release Notes: [link](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release)
|
||||||
|
|
||||||
*Tagged releases are stable versions of the project. While we strive to maintain a stable main branch, it's not guaranteed to be free of bugs or issues.*
|
*Tagged releases are stable versions of the project. While we strive to maintain a stable main branch, it's not guaranteed to be free of bugs or issues.*
|
||||||
|
|
||||||
## Android Demo App
|
## Android Demo App
|
||||||
Check out our demo app to see how to integrate Llama Stack into your Android app: [Android Demo App](https://github.com/meta-llama/llama-stack-apps/tree/android-kotlin-app-latest/examples/android_app)
|
Check out our demo app to see how to integrate Llama Stack into your Android app: [Android Demo App](https://github.com/meta-llama/llama-stack-client-kotlin/tree/examples/android_app)
|
||||||
|
|
||||||
The key files in the app are `ExampleLlamaStackLocalInference.kt`, `ExampleLlamaStackRemoteInference.kts`, and `MainActivity.java`. With encompassed business logic, the app shows how to use Llama Stack for both the environments.
|
The key files in the app are `ExampleLlamaStackLocalInference.kt`, `ExampleLlamaStackRemoteInference.kts`, and `MainActivity.java`. With encompassed business logic, the app shows how to use Llama Stack for both the environments.
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ The key files in the app are `ExampleLlamaStackLocalInference.kt`, `ExampleLlama
|
||||||
Add the following dependency in your `build.gradle.kts` file:
|
Add the following dependency in your `build.gradle.kts` file:
|
||||||
```
|
```
|
||||||
dependencies {
|
dependencies {
|
||||||
implementation("com.llama.llamastack:llama-stack-client-kotlin:0.0.58")
|
implementation("com.llama.llamastack:llama-stack-client-kotlin:0.1.4.2")
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
This will download jar files in your gradle cache in a directory like `~/.gradle/caches/modules-2/files-2.1/com.llama.llamastack/`
|
This will download jar files in your gradle cache in a directory like `~/.gradle/caches/modules-2/files-2.1/com.llama.llamastack/`
|
||||||
|
@ -36,13 +36,13 @@ If you plan on doing remote inferencing this is sufficient to get started.
|
||||||
For local inferencing, it is required to include the ExecuTorch library into your app.
|
For local inferencing, it is required to include the ExecuTorch library into your app.
|
||||||
|
|
||||||
Include the ExecuTorch library by:
|
Include the ExecuTorch library by:
|
||||||
1. Download the `download-prebuilt-et-lib.sh` script file from the [llama-stack-client-kotlin-client-local](https://github.com/meta-llama/llama-stack-client-kotlin/blob/release/0.0.58/llama-stack-client-kotlin-client-local/download-prebuilt-et-lib.sh) directory to your local machine.
|
1. Download the `download-prebuilt-et-lib.sh` script file from the [llama-stack-client-kotlin-client-local](https://github.com/meta-llama/llama-stack-client-kotlin/tree/latest-release/llama-stack-client-kotlin-client-local/download-prebuilt-et-lib.sh) directory to your local machine.
|
||||||
2. Move the script to the top level of your Android app where the app directory resides:
|
2. Move the script to the top level of your Android app where the app directory resides:
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://raw.githubusercontent.com/meta-llama/llama-stack-client-kotlin/refs/heads/release/0.0.58/doc/img/example_android_app_directory.png" style="width:300px">
|
<img src="https://github.com/meta-llama/llama-stack-client-kotlin/blob/latest-release/doc/img/example_android_app_directory.png" style="width:300px">
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
3. Run `sh download-prebuilt-et-lib.sh` to create an `app/libs` directory and download the `executorch.aar` in that path. This generates an ExecuTorch library for the XNNPACK delegate with commit: [0a12e33](https://github.com/pytorch/executorch/commit/0a12e33d22a3d44d1aa2af5f0d0673d45b962553).
|
3. Run `sh download-prebuilt-et-lib.sh` to create an `app/libs` directory and download the `executorch.aar` in that path. This generates an ExecuTorch library for the XNNPACK delegate.
|
||||||
4. Add the `executorch.aar` dependency in your `build.gradle.kts` file:
|
4. Add the `executorch.aar` dependency in your `build.gradle.kts` file:
|
||||||
```
|
```
|
||||||
dependencies {
|
dependencies {
|
||||||
|
@ -60,10 +60,10 @@ Start a Llama Stack server on localhost. Here is an example of how you can do th
|
||||||
```
|
```
|
||||||
conda create -n stack-fireworks python=3.10
|
conda create -n stack-fireworks python=3.10
|
||||||
conda activate stack-fireworks
|
conda activate stack-fireworks
|
||||||
pip install llama-stack=0.0.58
|
pip install --no-cache llama-stack==0.1.4
|
||||||
llama stack build --template fireworks --image-type conda
|
llama stack build --template fireworks --image-type conda
|
||||||
export FIREWORKS_API_KEY=<SOME_KEY>
|
export FIREWORKS_API_KEY=<SOME_KEY>
|
||||||
llama stack run /Users/<your_username>/.llama/distributions/llamastack-fireworks/fireworks-run.yaml --port=5050
|
llama stack run fireworks --port 5050
|
||||||
```
|
```
|
||||||
|
|
||||||
Ensure the Llama Stack server version is the same as the Kotlin SDK Library for maximum compatibility.
|
Ensure the Llama Stack server version is the same as the Kotlin SDK Library for maximum compatibility.
|
||||||
|
@ -146,7 +146,7 @@ The purpose of this section is to share more details with users that would like
|
||||||
### Prerequisite
|
### Prerequisite
|
||||||
|
|
||||||
You must complete the following steps:
|
You must complete the following steps:
|
||||||
1. Clone the repo (`git clone https://github.com/meta-llama/llama-stack-client-kotlin.git -b release/0.0.58`)
|
1. Clone the repo (`git clone https://github.com/meta-llama/llama-stack-client-kotlin.git -b latest-release`)
|
||||||
2. Port the appropriate ExecuTorch libraries over into your Llama Stack Kotlin library environment.
|
2. Port the appropriate ExecuTorch libraries over into your Llama Stack Kotlin library environment.
|
||||||
```
|
```
|
||||||
cd llama-stack-client-kotlin-client-local
|
cd llama-stack-client-kotlin-client-local
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
# iOS SDK
|
# iOS SDK
|
||||||
|
|
||||||
We offer both remote and on-device use of Llama Stack in Swift via two components:
|
We offer both remote and on-device use of Llama Stack in Swift via a single SDK [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift/) that contains two components:
|
||||||
|
1. LlamaStackClient for remote
|
||||||
1. [llama-stack-client-swift](https://github.com/meta-llama/llama-stack-client-swift/)
|
2. Local Inference for on-device
|
||||||
2. [LocalInferenceImpl](https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/inline/ios/inference)
|
|
||||||
|
|
||||||
```{image} ../../../_static/remote_or_local.gif
|
```{image} ../../../_static/remote_or_local.gif
|
||||||
:alt: Seamlessly switching between local, on-device inference and remote hosted inference
|
:alt: Seamlessly switching between local, on-device inference and remote hosted inference
|
||||||
|
@ -42,7 +41,7 @@ let request = Components.Schemas.CreateAgentTurnRequest(
|
||||||
// ...
|
// ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Check out [iOSCalendarAssistant](https://github.com/meta-llama/llama-stack-apps/tree/main/examples/ios_calendar_assistant) for a complete app demo.
|
Check out [iOSCalendarAssistant](https://github.com/meta-llama/llama-stack-client-swift/tree/main/examples/ios_calendar_assistant) for a complete app demo.
|
||||||
|
|
||||||
## LocalInference
|
## LocalInference
|
||||||
|
|
||||||
|
@ -58,7 +57,7 @@ let inference = LocalInference(queue: runnerQueue)
|
||||||
let agents = LocalAgents(inference: self.inference)
|
let agents = LocalAgents(inference: self.inference)
|
||||||
```
|
```
|
||||||
|
|
||||||
Check out [iOSCalendarAssistantWithLocalInf](https://github.com/meta-llama/llama-stack-apps/tree/main/examples/ios_calendar_assistant) for a complete app demo.
|
Check out [iOSCalendarAssistantWithLocalInf](https://github.com/meta-llama/llama-stack-client-swift/tree/main/examples/ios_calendar_assistant) for a complete app demo.
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
|
@ -68,47 +67,6 @@ We're working on making LocalInference easier to set up. For now, you'll need t
|
||||||
1. Install [Cmake](https://cmake.org/) for the executorch build`
|
1. Install [Cmake](https://cmake.org/) for the executorch build`
|
||||||
1. Drag `LocalInference.xcodeproj` into your project
|
1. Drag `LocalInference.xcodeproj` into your project
|
||||||
1. Add `LocalInference` as a framework in your app target
|
1. Add `LocalInference` as a framework in your app target
|
||||||
1. Add a package dependency on https://github.com/pytorch/executorch (branch latest)
|
|
||||||
1. Add all the kernels / backends from executorch (but not exectuorch itself!) as frameworks in your app target:
|
|
||||||
- backend_coreml
|
|
||||||
- backend_mps
|
|
||||||
- backend_xnnpack
|
|
||||||
- kernels_custom
|
|
||||||
- kernels_optimized
|
|
||||||
- kernels_portable
|
|
||||||
- kernels_quantized
|
|
||||||
1. In "Build Settings" > "Other Linker Flags" > "Any iOS Simulator SDK", add:
|
|
||||||
```
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libkernels_optimized-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libkernels_custom-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libkernels_quantized-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libbackend_xnnpack-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libbackend_coreml-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libbackend_mps-simulator-release.a
|
|
||||||
```
|
|
||||||
|
|
||||||
1. In "Build Settings" > "Other Linker Flags" > "Any iOS SDK", add:
|
|
||||||
|
|
||||||
```
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libkernels_optimized-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libkernels_custom-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libkernels_quantized-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libbackend_xnnpack-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libbackend_coreml-simulator-release.a
|
|
||||||
-force_load
|
|
||||||
$(BUILT_PRODUCTS_DIR)/libbackend_mps-simulator-release.a
|
|
||||||
```
|
|
||||||
|
|
||||||
### Preparing a model
|
### Preparing a model
|
||||||
|
|
||||||
|
|
|
@ -6,13 +6,14 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov
|
||||||
| API | Provider(s) |
|
| API | Provider(s) |
|
||||||
|-----|-------------|
|
|-----|-------------|
|
||||||
| agents | `inline::meta-reference` |
|
| agents | `inline::meta-reference` |
|
||||||
| datasetio | `remote::huggingface`, `inline::localfs` |
|
| datasetio | `inline::localfs` |
|
||||||
| eval | `inline::meta-reference` |
|
| eval | `inline::meta-reference` |
|
||||||
| inference | `remote::nvidia` |
|
| inference | `remote::nvidia` |
|
||||||
| safety | `inline::llama-guard` |
|
| post_training | `remote::nvidia` |
|
||||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
| safety | `remote::nvidia` |
|
||||||
|
| scoring | `inline::basic` |
|
||||||
| telemetry | `inline::meta-reference` |
|
| telemetry | `inline::meta-reference` |
|
||||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
| tool_runtime | `inline::rag-runtime` |
|
||||||
| vector_io | `inline::faiss` |
|
| vector_io | `inline::faiss` |
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,8 +21,16 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov
|
||||||
|
|
||||||
The following environment variables can be configured:
|
The following environment variables can be configured:
|
||||||
|
|
||||||
- `LLAMASTACK_PORT`: Port for the Llama Stack distribution server (default: `5001`)
|
|
||||||
- `NVIDIA_API_KEY`: NVIDIA API Key (default: ``)
|
- `NVIDIA_API_KEY`: NVIDIA API Key (default: ``)
|
||||||
|
- `NVIDIA_USER_ID`: NVIDIA User ID (default: `llama-stack-user`)
|
||||||
|
- `NVIDIA_DATASET_NAMESPACE`: NVIDIA Dataset Namespace (default: `default`)
|
||||||
|
- `NVIDIA_ACCESS_POLICIES`: NVIDIA Access Policies (default: `{}`)
|
||||||
|
- `NVIDIA_PROJECT_ID`: NVIDIA Project ID (default: `test-project`)
|
||||||
|
- `NVIDIA_CUSTOMIZER_URL`: NVIDIA Customizer URL (default: `https://customizer.api.nvidia.com`)
|
||||||
|
- `NVIDIA_OUTPUT_MODEL_DIR`: NVIDIA Output Model Directory (default: `test-example-model@v1`)
|
||||||
|
- `GUARDRAILS_SERVICE_URL`: URL for the NeMo Guardrails Service (default: `http://0.0.0.0:7331`)
|
||||||
|
- `INFERENCE_MODEL`: Inference model (default: `Llama3.1-8B-Instruct`)
|
||||||
|
- `SAFETY_MODEL`: Name of the model to use for safety (default: `meta/llama-3.1-8b-instruct`)
|
||||||
|
|
||||||
### Models
|
### Models
|
||||||
|
|
||||||
|
@ -56,9 +65,10 @@ You can do this via Conda (build code) or Docker which has a pre-built image.
|
||||||
This method allows you to get started quickly without having to build the distribution code.
|
This method allows you to get started quickly without having to build the distribution code.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
LLAMA_STACK_PORT=5001
|
LLAMA_STACK_PORT=8321
|
||||||
docker run \
|
docker run \
|
||||||
-it \
|
-it \
|
||||||
|
--pull always \
|
||||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||||
-v ./run.yaml:/root/my-run.yaml \
|
-v ./run.yaml:/root/my-run.yaml \
|
||||||
llamastack/distribution-nvidia \
|
llamastack/distribution-nvidia \
|
||||||
|
@ -72,7 +82,7 @@ docker run \
|
||||||
```bash
|
```bash
|
||||||
llama stack build --template nvidia --image-type conda
|
llama stack build --template nvidia --image-type conda
|
||||||
llama stack run ./run.yaml \
|
llama stack run ./run.yaml \
|
||||||
--port 5001 \
|
--port 8321 \
|
||||||
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
|
--env NVIDIA_API_KEY=$NVIDIA_API_KEY
|
||||||
--env INFERENCE_MODEL=$INFERENCE_MODEL
|
--env INFERENCE_MODEL=$INFERENCE_MODEL
|
||||||
```
|
```
|
||||||
|
|
|
@ -28,7 +28,7 @@ The `llamastack/distribution-bedrock` distribution consists of the following pro
|
||||||
|
|
||||||
The following environment variables can be configured:
|
The following environment variables can be configured:
|
||||||
|
|
||||||
- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `5001`)
|
- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `8321`)
|
||||||
|
|
||||||
### Models
|
### Models
|
||||||
|
|
||||||
|
@ -53,9 +53,10 @@ You can do this via Conda (build code) or Docker which has a pre-built image.
|
||||||
This method allows you to get started quickly without having to build the distribution code.
|
This method allows you to get started quickly without having to build the distribution code.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
LLAMA_STACK_PORT=5001
|
LLAMA_STACK_PORT=8321
|
||||||
docker run \
|
docker run \
|
||||||
-it \
|
-it \
|
||||||
|
--pull always \
|
||||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||||
llamastack/distribution-bedrock \
|
llamastack/distribution-bedrock \
|
||||||
--port $LLAMA_STACK_PORT \
|
--port $LLAMA_STACK_PORT \
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue