diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index e16c2e461..000000000 --- a/.coveragerc +++ /dev/null @@ -1,6 +0,0 @@ -[run] -omit = - */tests/* - */llama_stack/providers/* - */llama_stack/templates/* - .venv/* diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5884f2582..9c5c5486f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -2,4 +2,4 @@ # These owners will be the default owners for everything in # the repo. Unless a later match takes precedence, -* @ashwinb @yanxi0830 @hardikjshah @raghotham @ehhuang @terrytangyuan @leseb @bbrowning +* @ashwinb @yanxi0830 @hardikjshah @dltn @raghotham @dineshyv @vladimirivic @sixianyi0721 @ehhuang @terrytangyuan diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 263828e1c..af2058b9a 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,8 +1,10 @@ # What does this PR do? - +[Provide a short summary of what this PR does and why. Link to relevant issues if applicable.] - - +[//]: # (If resolving an issue, uncomment and update the line below) +[//]: # (Closes #[issue-number]) ## Test Plan - +[Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] + +[//]: # (## Documentation) diff --git a/.github/TRIAGERS.md b/.github/TRIAGERS.md deleted file mode 100644 index 586a5a506..000000000 --- a/.github/TRIAGERS.md +++ /dev/null @@ -1,2 +0,0 @@ -# This file documents Triage members in the Llama Stack community - @bbrowning @booxter @franciscojavierarceo @leseb diff --git a/.github/actions/setup-ollama/action.yml b/.github/actions/setup-ollama/action.yml deleted file mode 100644 index 3dd6c940c..000000000 --- a/.github/actions/setup-ollama/action.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Setup Ollama -description: Start Ollama and cache model -inputs: - models: - description: Comma-separated list of models to pull - default: "llama3.2:3b-instruct-fp16,all-minilm:latest" -runs: - using: "composite" - steps: - - name: Install and start Ollama - shell: bash - run: | - # the ollama installer also starts the ollama service - curl -fsSL https://ollama.com/install.sh | sh - - # Do NOT cache models - pulling the cache is actually slower than just pulling the model. - # It takes ~45 seconds to pull the models from the cache and unpack it, but only 30 seconds to - # pull them directly. - # Maybe this is because the cache is being pulled at the same time by all the matrix jobs? - - name: Pull requested models - if: inputs.models != '' - shell: bash - run: | - for model in $(echo "${{ inputs.models }}" | tr ',' ' '); do - ollama pull "$model" - done diff --git a/.github/actions/setup-runner/action.yml b/.github/actions/setup-runner/action.yml deleted file mode 100644 index 6cba4fdc3..000000000 --- a/.github/actions/setup-runner/action.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Setup runner -description: Prepare a runner for the tests (install uv, python, project dependencies, etc.) -runs: - using: "composite" - steps: - - name: Install uv - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1 - with: - python-version: "3.10" - activate-environment: true - version: 0.7.6 - - - name: Install dependencies - shell: bash - run: | - uv sync --all-groups - uv pip install ollama faiss-cpu - # always test against the latest version of the client - # TODO: this is not necessarily a good idea. we need to test against both published and latest - # to find out backwards compatibility issues. - uv pip install git+https://github.com/meta-llama/llama-stack-client-python.git@main - uv pip install -e . diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index d68af5615..000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,23 +0,0 @@ -# GitHub Dependabot configuration -version: 2 -updates: - # Enable version updates for GitHub Actions - - package-ecosystem: "github-actions" - directory: "/" # Will use the default workflow location of `.github/workflows` - schedule: - interval: "weekly" - day: "saturday" - commit-message: - prefix: chore(github-deps) - - package-ecosystem: "uv" - directory: "/" - schedule: - interval: "weekly" - day: "saturday" - # ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit - open-pull-requests-limit: 0 - labels: - - type/dependencies - - python - commit-message: - prefix: chore(python-deps) diff --git a/.github/workflows/Dockerfile b/.github/workflows/Dockerfile deleted file mode 100644 index 9261bd174..000000000 --- a/.github/workflows/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM localhost:5000/distribution-kvant:dev \ No newline at end of file diff --git a/.github/workflows/ci-playground.yaml b/.github/workflows/ci-playground.yaml deleted file mode 100644 index 251782855..000000000 --- a/.github/workflows/ci-playground.yaml +++ /dev/null @@ -1,73 +0,0 @@ -name: Build and Push playground container -run-name: Build and Push playground container -on: - workflow_dispatch: - #schedule: - # - cron: "0 10 * * *" - push: - branches: - - main - - kvant - tags: - - 'v*' - pull_request: - branches: - - main - - kvant -env: - IMAGE: git.kvant.cloud/${{github.repository}}-playground -jobs: - build-playground: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set current time - uses: https://github.com/gerred/actions/current-time@master - id: current_time - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to git.kvant.cloud registry - uses: docker/login-action@v3 - with: - registry: git.kvant.cloud - username: ${{ vars.ORG_PACKAGE_WRITER_USERNAME }} - password: ${{ secrets.ORG_PACKAGE_WRITER_TOKEN }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - # list of Docker images to use as base name for tags - images: | - ${{env.IMAGE}} - # generate Docker tags based on the following events/attributes - tags: | - type=schedule - type=ref,event=branch - type=ref,event=pr - type=ref,event=tag - type=semver,pattern={{version}} - - - name: Build and push to gitea registry - uses: docker/build-push-action@v6 - with: - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - context: . - file: llama_stack/distribution/ui/Containerfile - provenance: mode=max - sbom: true - build-args: | - BUILD_DATE=${{ steps.current_time.outputs.time }} - cache-from: | - type=registry,ref=${{ env.IMAGE }}:buildcache - type=registry,ref=${{ env.IMAGE }}:${{ github.ref_name }} - type=registry,ref=${{ env.IMAGE }}:main - cache-to: type=registry,ref=${{ env.IMAGE }}:buildcache,mode=max,image-manifest=true diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml deleted file mode 100644 index 87f196cc2..000000000 --- a/.github/workflows/ci.yaml +++ /dev/null @@ -1,98 +0,0 @@ -name: Build and Push container -run-name: Build and Push container -on: - workflow_dispatch: - #schedule: - # - cron: "0 10 * * *" - push: - branches: - - main - - kvant - tags: - - 'v*' - pull_request: - branches: - - main - - kvant -env: - IMAGE: git.kvant.cloud/${{github.repository}} -jobs: - build: - runs-on: ubuntu-latest - services: - registry: - image: registry:2 - ports: - - 5000:5000 - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set current time - uses: https://github.com/gerred/actions/current-time@master - id: current_time - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver-opts: network=host - - - name: Login to git.kvant.cloud registry - uses: docker/login-action@v3 - with: - registry: git.kvant.cloud - username: ${{ vars.ORG_PACKAGE_WRITER_USERNAME }} - password: ${{ secrets.ORG_PACKAGE_WRITER_TOKEN }} - - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - # list of Docker images to use as base name for tags - images: | - ${{env.IMAGE}} - # generate Docker tags based on the following events/attributes - tags: | - type=schedule - type=ref,event=branch - type=ref,event=pr - type=ref,event=tag - type=semver,pattern={{version}} - - - name: Install uv - uses: https://github.com/astral-sh/setup-uv@v5 - with: - # Install a specific version of uv. - version: "0.7.8" - - - name: Build - env: - USE_COPY_NOT_MOUNT: true - LLAMA_STACK_DIR: . - run: | - uvx --from . llama stack build --template kvant --image-type container - - # docker tag distribution-kvant:dev ${{env.IMAGE}}:kvant - # docker push ${{env.IMAGE}}:kvant - - docker tag distribution-kvant:dev localhost:5000/distribution-kvant:dev - docker push localhost:5000/distribution-kvant:dev - - - name: Build and push to gitea registry - uses: docker/build-push-action@v6 - with: - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - context: .github/workflows - provenance: mode=max - sbom: true - build-args: | - BUILD_DATE=${{ steps.current_time.outputs.time }} - cache-from: | - type=registry,ref=${{ env.IMAGE }}:buildcache - type=registry,ref=${{ env.IMAGE }}:${{ github.ref_name }} - type=registry,ref=${{ env.IMAGE }}:main - cache-to: type=registry,ref=${{ env.IMAGE }}:buildcache,mode=max,image-manifest=true diff --git a/.github/workflows_upstream/gha_workflow_llama_stack_tests.yml b/.github/workflows/gha_workflow_llama_stack_tests.yml similarity index 96% rename from .github/workflows_upstream/gha_workflow_llama_stack_tests.yml rename to .github/workflows/gha_workflow_llama_stack_tests.yml index 9eae291e9..89e5edf71 100644 --- a/.github/workflows_upstream/gha_workflow_llama_stack_tests.yml +++ b/.github/workflows/gha_workflow_llama_stack_tests.yml @@ -140,7 +140,7 @@ jobs: ####################### - name: "Checkout 'meta-llama/llama-stack' repository" id: checkout_repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@v4 with: ref: ${{ inputs.branch }} @@ -302,7 +302,7 @@ jobs: - name: "PR - Test Summary" id: pr_test_summary_create if: github.event_name == 'pull_request_target' - uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2.4 + uses: test-summary/action@v2 with: paths: "${{ github.workspace }}/merged-test-results.xml" output: test-summary.md @@ -310,7 +310,7 @@ jobs: - name: "PR - Upload Test Summary" id: pr_test_summary_upload if: github.event_name == 'pull_request_target' - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@v3 with: name: test-summary path: test-summary.md @@ -320,7 +320,7 @@ jobs: - name: "PR - Update comment" id: pr_update_comment if: github.event_name == 'pull_request_target' - uses: thollander/actions-comment-pull-request@24bffb9b452ba05a4f3f77933840a6a841d1b32b # v3.0.1 + uses: thollander/actions-comment-pull-request@v2 with: filePath: test-summary.md @@ -350,6 +350,6 @@ jobs: - name: "Manual - Test Summary" id: manual_test_summary if: always() && github.event_name == 'workflow_dispatch' - uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2.4 + uses: test-summary/action@v2 with: paths: "${{ github.workspace }}/merged-test-results.xml" diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 000000000..046387ab9 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,29 @@ +name: Pre-commit + +on: + pull_request: + push: + branches: [main] + +jobs: + pre-commit: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: pip + cache-dependency-path: | + **/requirements*.txt + .pre-commit-config.yaml + + - uses: pre-commit/action@v3.0.1 + + - name: Verify if there are any diff files after pre-commit + run: | + git diff --exit-code || (echo "There are uncommitted changes, run pre-commit locally and commit again" && exit 1) diff --git a/.github/workflows_upstream/semantic-pr.yml b/.github/workflows/semantic-pr.yml similarity index 64% rename from .github/workflows_upstream/semantic-pr.yml rename to .github/workflows/semantic-pr.yml index 2dc1ed473..460acf237 100644 --- a/.github/workflows_upstream/semantic-pr.yml +++ b/.github/workflows/semantic-pr.yml @@ -8,10 +8,6 @@ on: - reopened - synchronize -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - permissions: contents: read @@ -20,6 +16,6 @@ jobs: runs-on: ubuntu-latest steps: - name: Check PR Title's semantic conformance - uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5.5.3 + uses: amannn/action-semantic-pull-request@v5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows_upstream/tests.yml b/.github/workflows/tests.yml similarity index 96% rename from .github/workflows_upstream/tests.yml rename to .github/workflows/tests.yml index 79c935005..cfc26000b 100644 --- a/.github/workflows_upstream/tests.yml +++ b/.github/workflows/tests.yml @@ -20,7 +20,7 @@ jobs: matrix: provider: [fireworks, together] steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.commit_sha }} diff --git a/.github/workflows_upstream/update-readthedocs.yml b/.github/workflows/update-readthedocs.yml similarity index 73% rename from .github/workflows_upstream/update-readthedocs.yml rename to .github/workflows/update-readthedocs.yml index 981332a77..23bafa1e5 100644 --- a/.github/workflows_upstream/update-readthedocs.yml +++ b/.github/workflows/update-readthedocs.yml @@ -12,22 +12,14 @@ on: - main paths: - 'docs/**' - - 'pyproject.toml' - '.github/workflows/update-readthedocs.yml' - tags: - - '*' pull_request: branches: - main paths: - 'docs/**' - - 'pyproject.toml' - '.github/workflows/update-readthedocs.yml' -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - jobs: update-readthedocs: runs-on: ubuntu-latest @@ -35,10 +27,18 @@ jobs: TOKEN: ${{ secrets.READTHEDOCS_TOKEN }} steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@v4 - - name: Install dependencies - uses: ./.github/actions/setup-runner + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install the latest version of uv + uses: astral-sh/setup-uv@v5 + + - name: Sync with uv + run: uv sync --extra docs - name: Build HTML run: | @@ -55,10 +55,7 @@ jobs: response=$(curl -X POST \ -H "Content-Type: application/json" \ - -d "{ - \"token\": \"$TOKEN\", - \"version\": \"$GITHUB_REF_NAME\" - }" \ + -d "{\"token\": \"$TOKEN\"}" \ https://readthedocs.org/api/v2/webhook/llama-stack/289768/) echo "Response: $response" diff --git a/.github/workflows_upstream/changelog.yml b/.github/workflows_upstream/changelog.yml deleted file mode 100644 index c497348b0..000000000 --- a/.github/workflows_upstream/changelog.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: Update Changelog - -on: - release: - types: [published, unpublished, created, edited, deleted, released] - -permissions: - contents: read - -jobs: - generate_changelog: - name: Generate changelog - permissions: - contents: write # for peter-evans/create-pull-request to create branch - pull-requests: write # for peter-evans/create-pull-request to create a PR - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - ref: main - fetch-depth: 0 - - run: | - python ./scripts/gen-changelog.py - - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 - with: - title: 'docs: update CHANGELOG.md for ${{ github.ref_name }}' - commit-message: 'docs: update CHANGELOG.md for ${{ github.ref_name }}' - branch: create-pull-request/changelog - signoff: true diff --git a/.github/workflows_upstream/install-script-ci.yml b/.github/workflows_upstream/install-script-ci.yml deleted file mode 100644 index 2eb234c77..000000000 --- a/.github/workflows_upstream/install-script-ci.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Installer CI - -on: - pull_request: - paths: - - 'install.sh' - push: - paths: - - 'install.sh' - schedule: - - cron: '0 2 * * *' # every day at 02:00 UTC - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 - - name: Run ShellCheck on install.sh - run: shellcheck install.sh - smoke-test: - needs: lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # 4.2.2 - - name: Run installer end-to-end - run: ./install.sh diff --git a/.github/workflows_upstream/integration-auth-tests.yml b/.github/workflows_upstream/integration-auth-tests.yml deleted file mode 100644 index a3a746246..000000000 --- a/.github/workflows_upstream/integration-auth-tests.yml +++ /dev/null @@ -1,132 +0,0 @@ -name: Integration Auth Tests - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - paths: - - 'distributions/**' - - 'llama_stack/**' - - 'tests/integration/**' - - 'uv.lock' - - 'pyproject.toml' - - 'requirements.txt' - - '.github/workflows/integration-auth-tests.yml' # This workflow - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - test-matrix: - runs-on: ubuntu-latest - strategy: - matrix: - auth-provider: [oauth2_token] - fail-fast: false # we want to run all tests regardless of failure - - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Build Llama Stack - run: | - llama stack build --template ollama --image-type venv - - - name: Install minikube - if: ${{ matrix.auth-provider == 'kubernetes' }} - uses: medyagh/setup-minikube@cea33675329b799adccc9526aa5daccc26cd5052 # v0.0.19 - - - name: Start minikube - if: ${{ matrix.auth-provider == 'oauth2_token' }} - run: | - minikube start - kubectl get pods -A - - - name: Configure Kube Auth - if: ${{ matrix.auth-provider == 'oauth2_token' }} - run: | - kubectl create namespace llama-stack - kubectl create serviceaccount llama-stack-auth -n llama-stack - kubectl create rolebinding llama-stack-auth-rolebinding --clusterrole=admin --serviceaccount=llama-stack:llama-stack-auth -n llama-stack - kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token - cat <> $GITHUB_ENV - echo "KUBERNETES_CA_CERT_PATH=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}')" >> $GITHUB_ENV - echo "KUBERNETES_ISSUER=$(kubectl get --raw /.well-known/openid-configuration| jq -r .issuer)" >> $GITHUB_ENV - echo "KUBERNETES_AUDIENCE=$(kubectl create token llama-stack-auth -n llama-stack --duration=1h | cut -d. -f2 | base64 -d | jq -r '.aud[0]')" >> $GITHUB_ENV - - - name: Set Kube Auth Config and run server - env: - INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" - if: ${{ matrix.auth-provider == 'oauth2_token' }} - run: | - run_dir=$(mktemp -d) - cat <<'EOF' > $run_dir/run.yaml - version: '2' - image_name: kube - apis: [] - providers: {} - server: - port: 8321 - EOF - yq eval '.server.auth = {"provider_type": "${{ matrix.auth-provider }}"}' -i $run_dir/run.yaml - yq eval '.server.auth.config = {"tls_cafile": "${{ env.KUBERNETES_CA_CERT_PATH }}", "issuer": "${{ env.KUBERNETES_ISSUER }}", "audience": "${{ env.KUBERNETES_AUDIENCE }}"}' -i $run_dir/run.yaml - yq eval '.server.auth.config.jwks = {"uri": "${{ env.KUBERNETES_API_SERVER_URL }}"}' -i $run_dir/run.yaml - cat $run_dir/run.yaml - - nohup uv run llama stack run $run_dir/run.yaml --image-type venv > server.log 2>&1 & - - - name: Wait for Llama Stack server to be ready - run: | - echo "Waiting for Llama Stack server..." - for i in {1..30}; do - if curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://localhost:8321/v1/health | grep -q "OK"; then - echo "Llama Stack server is up!" - if grep -q "Enabling authentication with provider: ${{ matrix.auth-provider }}" server.log; then - echo "Llama Stack server is configured to use ${{ matrix.auth-provider }} auth" - exit 0 - else - echo "Llama Stack server is not configured to use ${{ matrix.auth-provider }} auth" - cat server.log - exit 1 - fi - fi - sleep 1 - done - echo "Llama Stack server failed to start" - cat server.log - exit 1 - - - name: Test auth - run: | - curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers|jq diff --git a/.github/workflows_upstream/integration-tests.yml b/.github/workflows_upstream/integration-tests.yml deleted file mode 100644 index d78e82c9d..000000000 --- a/.github/workflows_upstream/integration-tests.yml +++ /dev/null @@ -1,116 +0,0 @@ -name: Integration Tests - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - paths: - - 'llama_stack/**' - - 'tests/integration/**' - - 'uv.lock' - - 'pyproject.toml' - - 'requirements.txt' - - '.github/workflows/integration-tests.yml' # This workflow - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - test-matrix: - runs-on: ubuntu-latest - strategy: - matrix: - # Listing tests manually since some of them currently fail - # TODO: generate matrix list from tests/integration when fixed - test-type: [agents, inference, datasets, inspect, scoring, post_training, providers, tool_runtime] - client-type: [library, http] - fail-fast: false # we want to run all tests regardless of failure - - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Setup ollama - uses: ./.github/actions/setup-ollama - - - name: Build Llama Stack - run: | - llama stack build --template ollama --image-type venv - - - name: Start Llama Stack server in background - if: matrix.client-type == 'http' - env: - INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" - run: | - LLAMA_STACK_LOG_FILE=server.log nohup uv run llama stack run ./llama_stack/templates/ollama/run.yaml --image-type venv & - - - name: Wait for Llama Stack server to be ready - if: matrix.client-type == 'http' - run: | - echo "Waiting for Llama Stack server..." - for i in {1..30}; do - if curl -s http://localhost:8321/v1/health | grep -q "OK"; then - echo "Llama Stack server is up!" - exit 0 - fi - sleep 1 - done - echo "Llama Stack server failed to start" - cat server.log - exit 1 - - - name: Verify Ollama status is OK - if: matrix.client-type == 'http' - run: | - echo "Verifying Ollama status..." - ollama_status=$(curl -s -L http://127.0.0.1:8321/v1/providers/ollama|jq --raw-output .health.status) - echo "Ollama status: $ollama_status" - if [ "$ollama_status" != "OK" ]; then - echo "Ollama health check failed" - exit 1 - fi - - - name: Check Storage and Memory Available Before Tests - if: ${{ always() }} - run: | - free -h - df -h - - - name: Run Integration Tests - env: - INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" - run: | - if [ "${{ matrix.client-type }}" == "library" ]; then - stack_config="ollama" - else - stack_config="http://localhost:8321" - fi - uv run pytest -s -v tests/integration/${{ matrix.test-type }} --stack-config=${stack_config} \ - -k "not(builtin_tool or safety_with_image or code_interpreter or test_rag)" \ - --text-model="meta-llama/Llama-3.2-3B-Instruct" \ - --embedding-model=all-MiniLM-L6-v2 - - - name: Check Storage and Memory Available After Tests - if: ${{ always() }} - run: | - free -h - df -h - - - name: Write ollama logs to file - if: ${{ always() }} - run: | - sudo journalctl -u ollama.service > ollama.log - - - name: Upload all logs to artifacts - if: ${{ always() }} - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.client-type }}-${{ matrix.test-type }} - path: | - *.log - retention-days: 1 diff --git a/.github/workflows_upstream/pre-commit.yml b/.github/workflows_upstream/pre-commit.yml deleted file mode 100644 index 2bbd52c53..000000000 --- a/.github/workflows_upstream/pre-commit.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Pre-commit - -on: - pull_request: - push: - branches: [main] - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - pre-commit: - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set up Python - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 - with: - python-version: '3.11' - cache: pip - cache-dependency-path: | - **/requirements*.txt - .pre-commit-config.yaml - - - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 - env: - SKIP: no-commit-to-branch - RUFF_OUTPUT_FORMAT: github - - - name: Verify if there are any diff files after pre-commit - run: | - git diff --exit-code || (echo "There are uncommitted changes, run pre-commit locally and commit again" && exit 1) - - - name: Verify if there are any new files after pre-commit - run: | - unstaged_files=$(git ls-files --others --exclude-standard) - if [ -n "$unstaged_files" ]; then - echo "There are uncommitted new files, run pre-commit locally and commit again" - echo "$unstaged_files" - exit 1 - fi diff --git a/.github/workflows_upstream/providers-build.yml b/.github/workflows_upstream/providers-build.yml deleted file mode 100644 index cf53459b9..000000000 --- a/.github/workflows_upstream/providers-build.yml +++ /dev/null @@ -1,147 +0,0 @@ -name: Test Llama Stack Build - -on: - push: - branches: - - main - paths: - - 'llama_stack/cli/stack/build.py' - - 'llama_stack/cli/stack/_build.py' - - 'llama_stack/distribution/build.*' - - 'llama_stack/distribution/*.sh' - - '.github/workflows/providers-build.yml' - pull_request: - paths: - - 'llama_stack/cli/stack/build.py' - - 'llama_stack/cli/stack/_build.py' - - 'llama_stack/distribution/build.*' - - 'llama_stack/distribution/*.sh' - - '.github/workflows/providers-build.yml' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - generate-matrix: - runs-on: ubuntu-latest - outputs: - templates: ${{ steps.set-matrix.outputs.templates }} - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Generate Template List - id: set-matrix - run: | - templates=$(ls llama_stack/templates/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]') - echo "templates=$templates" >> "$GITHUB_OUTPUT" - - build: - needs: generate-matrix - runs-on: ubuntu-latest - strategy: - matrix: - template: ${{ fromJson(needs.generate-matrix.outputs.templates) }} - image-type: [venv, container] - fail-fast: false # We want to run all jobs even if some fail - - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Print build dependencies - run: | - uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only - - - name: Run Llama Stack Build - run: | - # USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead - # LLAMA_STACK_DIR is set to the current directory so we are building from the source - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test - - - name: Print dependencies in the image - if: matrix.image-type == 'venv' - run: | - uv pip list - - build-single-provider: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Build a single provider - run: | - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama - - build-custom-container-distribution: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Build a single provider - run: | - yq -i '.image_type = "container"' llama_stack/templates/starter/build.yaml - yq -i '.image_name = "test"' llama_stack/templates/starter/build.yaml - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/templates/starter/build.yaml - - - name: Inspect the container image entrypoint - run: | - IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1) - entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID) - echo "Entrypoint: $entrypoint" - if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then - echo "Entrypoint is not correct" - exit 1 - fi - - build-ubi9-container-distribution: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Pin template to UBI9 base - run: | - yq -i ' - .image_type = "container" | - .image_name = "ubi9-test" | - .distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest" - ' llama_stack/templates/starter/build.yaml - - - name: Build dev container (UBI9) - env: - USE_COPY_NOT_MOUNT: "true" - LLAMA_STACK_DIR: "." - run: | - uv run llama stack build --config llama_stack/templates/starter/build.yaml - - - name: Inspect UBI9 image - run: | - IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1) - entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID) - echo "Entrypoint: $entrypoint" - if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then - echo "Entrypoint is not correct" - exit 1 - fi - - echo "Checking /etc/os-release in $IMAGE_ID" - docker run --rm --entrypoint sh "$IMAGE_ID" -c \ - 'source /etc/os-release && echo "$ID"' \ - | grep -qE '^(rhel|ubi)$' \ - || { echo "Base image is not UBI 9!"; exit 1; } diff --git a/.github/workflows_upstream/stale_bot.yml b/.github/workflows_upstream/stale_bot.yml deleted file mode 100644 index 06318b5f7..000000000 --- a/.github/workflows_upstream/stale_bot.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: Close stale issues and PRs - -on: - schedule: - - cron: '0 0 * * *' # every day at midnight - -env: - LC_ALL: en_US.UTF-8 - -defaults: - run: - shell: bash - -permissions: - contents: read - -jobs: - stale: - permissions: - issues: write - pull-requests: write - runs-on: ubuntu-latest - steps: - - name: Stale Action - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 - with: - stale-issue-label: 'stale' - stale-issue-message: > - This issue has been automatically marked as stale because it has not had activity within 60 days. - It will be automatically closed if no further activity occurs within 30 days. - close-issue-message: > - This issue has been automatically closed due to inactivity. - Please feel free to reopen if you feel it is still relevant! - days-before-issue-stale: 60 - days-before-issue-close: 30 - stale-pr-label: 'stale' - stale-pr-message: > - This pull request has been automatically marked as stale because it has not had activity within 60 days. - It will be automatically closed if no further activity occurs within 30 days. - close-pr-message: > - This pull request has been automatically closed due to inactivity. - Please feel free to reopen if you intend to continue working on it! - days-before-pr-stale: 60 - days-before-pr-close: 30 - operations-per-run: 300 diff --git a/.github/workflows_upstream/test-external-providers.yml b/.github/workflows_upstream/test-external-providers.yml deleted file mode 100644 index 06ab7cf3c..000000000 --- a/.github/workflows_upstream/test-external-providers.yml +++ /dev/null @@ -1,71 +0,0 @@ -name: Test External Providers - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - paths: - - 'llama_stack/**' - - 'tests/integration/**' - - 'uv.lock' - - 'pyproject.toml' - - 'requirements.txt' - - '.github/workflows/test-external-providers.yml' # This workflow - -jobs: - test-external-providers: - runs-on: ubuntu-latest - strategy: - matrix: - image-type: [venv] - # We don't do container yet, it's tricky to install a package from the host into the - # container and point 'uv pip install' to the correct path... - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Apply image type to config file - run: | - yq -i '.image_type = "${{ matrix.image-type }}"' tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml - cat tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml - - - name: Setup directory for Ollama custom provider - run: | - mkdir -p tests/external-provider/llama-stack-provider-ollama/src/ - cp -a llama_stack/providers/remote/inference/ollama/ tests/external-provider/llama-stack-provider-ollama/src/llama_stack_provider_ollama - - - name: Create provider configuration - run: | - mkdir -p /home/runner/.llama/providers.d/remote/inference - cp tests/external-provider/llama-stack-provider-ollama/custom_ollama.yaml /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml - - - name: Build distro from config file - run: | - USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml - - - name: Start Llama Stack server in background - if: ${{ matrix.image-type }} == 'venv' - env: - INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" - run: | - uv run pip list - nohup uv run --active llama stack run tests/external-provider/llama-stack-provider-ollama/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 & - - - name: Wait for Llama Stack server to be ready - run: | - for i in {1..30}; do - if ! grep -q "remote::custom_ollama from /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml" server.log; then - echo "Waiting for Llama Stack server to load the provider..." - sleep 1 - else - echo "Provider loaded" - exit 0 - fi - done - echo "Provider failed to load" - cat server.log - exit 1 diff --git a/.github/workflows_upstream/unit-tests.yml b/.github/workflows_upstream/unit-tests.yml deleted file mode 100644 index fc0459f0f..000000000 --- a/.github/workflows_upstream/unit-tests.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Unit Tests - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - paths: - - 'llama_stack/**' - - 'tests/unit/**' - - 'uv.lock' - - 'pyproject.toml' - - 'requirements.txt' - - '.github/workflows/unit-tests.yml' # This workflow - workflow_dispatch: - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - unit-tests: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python: - - "3.10" - - "3.11" - - "3.12" - - "3.13" - steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Install dependencies - uses: ./.github/actions/setup-runner - - - name: Run unit tests - run: | - PYTHON_VERSION=${{ matrix.python }} ./scripts/unit-tests.sh --cov=llama_stack --junitxml=pytest-report-${{ matrix.python }}.xml --cov-report=html:htmlcov-${{ matrix.python }} - - - name: Upload test results - if: always() - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: test-results-${{ matrix.python }} - path: | - .pytest_cache/ - pytest-report-${{ matrix.python }}.xml - htmlcov-${{ matrix.python }}/ - retention-days: 7 diff --git a/.gitignore b/.gitignore index 747acdc7b..f54d1563d 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,6 @@ dev_requirements.txt build .DS_Store llama_stack/configs/* -.cursor/ xcuserdata/ *.hmap .DS_Store @@ -21,7 +20,3 @@ _build docs/src pyrightconfig.json venv/ -pytest-report.xml -.coverage -.python-version -data diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..e69de29bb diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aaec469e4..351cd12ac 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,25 +8,14 @@ repos: rev: v5.0.0 # Latest stable version hooks: - id: check-merge-conflict - args: ['--assume-in-merge'] - - id: trailing-whitespace - exclude: '\.py$' # Exclude Python files as Ruff already handles them - id: check-added-large-files args: ['--maxkb=1000'] - id: end-of-file-fixer exclude: '^(.*\.svg)$' - - id: no-commit-to-branch - - id: check-yaml - args: ["--unsafe"] - - id: detect-private-key - - id: requirements-txt-fixer - - id: mixed-line-ending - args: [--fix=lf] # Forces to replace line ending by LF (line feed) - - id: check-executables-have-shebangs - - id: check-json - - id: check-shebang-scripts-are-executable - - id: check-symlinks - - id: check-toml + +# Temporarily disabling this +# - id: no-commit-to-branch +# args: ['--branch=main'] - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.4 @@ -53,7 +42,7 @@ repos: - black==24.3.0 - repo: https://github.com/astral-sh/uv-pre-commit - rev: 0.7.8 + rev: 0.6.3 hooks: - id: uv-lock - id: uv-export @@ -61,7 +50,6 @@ repos: "--frozen", "--no-hashes", "--no-emit-project", - "--no-default-groups", "--output-file=requirements.txt" ] @@ -78,6 +66,12 @@ repos: - pydantic pass_filenames: false +# - repo: https://github.com/jsh9/pydoclint +# rev: d88180a8632bb1602a4d81344085cf320f288c5a +# hooks: +# - id: pydoclint +# args: [--config=pyproject.toml] + # - repo: https://github.com/tcort/markdown-link-check # rev: v3.11.2 # hooks: @@ -89,29 +83,14 @@ repos: - id: distro-codegen name: Distribution Template Codegen additional_dependencies: - - uv==0.7.8 - entry: uv run --group codegen ./scripts/distro_codegen.py + - rich + - pydantic + - uv==0.6.0 + entry: uv run python -m llama_stack.scripts.distro_codegen language: python pass_filenames: false require_serial: true files: ^llama_stack/templates/.*$|^llama_stack/providers/.*/inference/.*/models\.py$ - - id: openapi-codegen - name: API Spec Codegen - additional_dependencies: - - uv==0.7.8 - entry: sh -c 'uv run ./docs/openapi_generator/run_openapi_generator.sh > /dev/null' - language: python - pass_filenames: false - require_serial: true - files: ^llama_stack/apis/|^docs/openapi_generator/ - - id: check-workflows-use-hashes - name: Check GitHub Actions use SHA-pinned actions - entry: ./scripts/check-workflows-use-hashes.sh - language: system - pass_filenames: false - require_serial: true - always_run: true - files: ^\.github/workflows/.*\.ya?ml$ ci: autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks diff --git a/.python-version b/.python-version new file mode 100644 index 000000000..c8cfe3959 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 461977a6c..f114dbf9b 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -5,21 +5,28 @@ # Required version: 2 -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/source/conf.py - # Set the OS, Python version and other tools you might need build: os: ubuntu-22.04 tools: python: "3.12" - jobs: - pre_create_environment: - - asdf plugin add uv - - asdf install uv latest - - asdf global uv latest - create_environment: - - uv venv "${READTHEDOCS_VIRTUALENV_PATH}" - install: - - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv sync --frozen --group docs + # You can also specify other tool versions: + # nodejs: "19" + # rust: "1.64" + # golang: "1.19" + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optionally build your docs in additional formats such as PDF and ePub +# formats: +# - pdf +# - epub + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index f7644a5af..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,482 +0,0 @@ -# Changelog - -# v0.2.7 -Published on: 2025-05-16T20:38:10Z - -## Highlights - -This is a small update. But a couple highlights: - -* feat: function tools in OpenAI Responses by @bbrowning in https://github.com/meta-llama/llama-stack/pull/2094, getting closer to ready. Streaming is the next missing piece. -* feat: Adding support for customizing chunk context in RAG insertion and querying by @franciscojavierarceo in https://github.com/meta-llama/llama-stack/pull/2134 -* feat: scaffolding for Llama Stack UI by @ehhuang in https://github.com/meta-llama/llama-stack/pull/2149, more to come in the coming releases. - - ---- - -# v0.2.6 -Published on: 2025-05-12T18:06:52Z - - - ---- - -# v0.2.5 -Published on: 2025-05-04T20:16:49Z - - - ---- - -# v0.2.4 -Published on: 2025-04-29T17:26:01Z - -## Highlights - -* One-liner to install and run Llama Stack yay! by @reluctantfuturist in https://github.com/meta-llama/llama-stack/pull/1383 -* support for NVIDIA NeMo datastore by @raspawar in https://github.com/meta-llama/llama-stack/pull/1852 -* (yuge!) Kubernetes authentication by @leseb in https://github.com/meta-llama/llama-stack/pull/1778 -* (yuge!) OpenAI Responses API by @bbrowning in https://github.com/meta-llama/llama-stack/pull/1989 -* add api.llama provider, llama-guard-4 model by @ashwinb in https://github.com/meta-llama/llama-stack/pull/2058 - - ---- - -# v0.2.3 -Published on: 2025-04-25T22:46:21Z - -## Highlights - -* OpenAI compatible inference endpoints and client-SDK support. `client.chat.completions.create()` now works. -* significant improvements and functionality added to the nVIDIA distribution -* many improvements to the test verification suite. -* new inference providers: Ramalama, IBM WatsonX -* many improvements to the Playground UI - - ---- - -# v0.2.2 -Published on: 2025-04-13T01:19:49Z - -## Main changes - -- Bring Your Own Provider (@leseb) - use out-of-tree provider code to execute the distribution server -- OpenAI compatible inference API in progress (@bbrowning) -- Provider verifications (@ehhuang) -- Many updates and fixes to playground -- Several llama4 related fixes - - ---- - -# v0.2.1 -Published on: 2025-04-05T23:13:00Z - - - ---- - -# v0.2.0 -Published on: 2025-04-05T19:04:29Z - -## Llama 4 Support - -Checkout more at https://www.llama.com - - - ---- - -# v0.1.9 -Published on: 2025-03-29T00:52:23Z - -### Build and Test Agents -* Agents: Entire document context with attachments -* RAG: Documentation with sqlite-vec faiss comparison -* Getting started: Fixes to getting started notebook. - -### Agent Evals and Model Customization -* (**New**) Post-training: Add nemo customizer - -### Better Engineering -* Moved sqlite-vec to non-blocking calls -* Don't return a payload on file delete - - - ---- - -# v0.1.8 -Published on: 2025-03-24T01:28:50Z - -# v0.1.8 Release Notes - -### Build and Test Agents -* Safety: Integrated NVIDIA as a safety provider. -* VectorDB: Added Qdrant as an inline provider. -* Agents: Added support for multiple tool groups in agents. -* Agents: Simplified imports for Agents in client package - - -### Agent Evals and Model Customization -* Introduced DocVQA and IfEval benchmarks. - -### Deploying and Monitoring Agents -* Introduced a Containerfile and image workflow for the Playground. -* Implemented support for Bearer (API Key) authentication. -* Added attribute-based access control for resources. -* Fixes on docker deployments: use --pull always and standardized the default port to 8321 -* Deprecated: /v1/inspect/providers use /v1/providers/ instead - -### Better Engineering -* Consolidated scripts under the ./scripts directory. -* Addressed mypy violations in various modules. -* Added Dependabot scans for Python dependencies. -* Implemented a scheduled workflow to update the changelog automatically. -* Enforced concurrency to reduce CI loads. - - -### New Contributors -* @cmodi-meta made their first contribution in https://github.com/meta-llama/llama-stack/pull/1650 -* @jeffmaury made their first contribution in https://github.com/meta-llama/llama-stack/pull/1671 -* @derekhiggins made their first contribution in https://github.com/meta-llama/llama-stack/pull/1698 -* @Bobbins228 made their first contribution in https://github.com/meta-llama/llama-stack/pull/1745 - -**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.7...v0.1.8 - ---- - -# v0.1.7 -Published on: 2025-03-14T22:30:51Z - -## 0.1.7 Release Notes - -### Build and Test Agents -* Inference: ImageType is now refactored to LlamaStackImageType -* Inference: Added tests to measure TTFT -* Inference: Bring back usage metrics -* Agents: Added endpoint for get agent, list agents and list sessions -* Agents: Automated conversion of type hints in client tool for lite llm format -* Agents: Deprecated ToolResponseMessage in agent.resume API -* Added Provider API for listing and inspecting provider info - -### Agent Evals and Model Customization -* Eval: Added new eval benchmarks Math 500 and BFCL v3 -* Deploy and Monitoring of Agents -* Telemetry: Fix tracing to work across coroutines - -### Better Engineering -* Display code coverage for unit tests -* Updated call sites (inference, tool calls, agents) to move to async non blocking calls -* Unit tests also run on Python 3.11, 3.12, and 3.13 -* Added ollama inference to Integration tests CI -* Improved documentation across examples, testing, CLI, updated providers table ) - - - - ---- - -# v0.1.6 -Published on: 2025-03-08T04:35:08Z - -## 0.1.6 Release Notes - -### Build and Test Agents -* Inference: Fixed support for inline vllm provider -* (**New**) Agent: Build & Monitor Agent Workflows with Llama Stack + Anthropic's Best Practice [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb) -* (**New**) Agent: Revamped agent [documentation](https://llama-stack.readthedocs.io/en/latest/building_applications/agent.html) with more details and examples -* Agent: Unify tools and Python SDK Agents API -* Agent: AsyncAgent Python SDK wrapper supporting async client tool calls -* Agent: Support python functions without @client_tool decorator as client tools -* Agent: deprecation for allow_resume_turn flag, and remove need to specify tool_prompt_format -* VectorIO: MilvusDB support added - -### Agent Evals and Model Customization -* (**New**) Agent: Llama Stack RAG Lifecycle [Notebook](https://github.com/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_RAG_Lifecycle.ipynb) -* Eval: Documentation for eval, scoring, adding new benchmarks -* Eval: Distribution template to run benchmarks on llama & non-llama models -* Eval: Ability to register new custom LLM-as-judge scoring functions -* (**New**) Looking for contributors for open benchmarks. See [documentation](https://llama-stack.readthedocs.io/en/latest/references/evals_reference/index.html#open-benchmark-contributing-guide) for details. - -### Deploy and Monitoring of Agents -* Better support for different log levels across all components for better monitoring - -### Better Engineering -* Enhance OpenAPI spec to include Error types across all APIs -* Moved all tests to /tests and created unit tests to run on each PR -* Removed all dependencies on llama-models repo - - ---- - -# v0.1.5.1 -Published on: 2025-02-28T22:37:44Z - -## 0.1.5.1 Release Notes -* Fixes for security risk in https://github.com/meta-llama/llama-stack/pull/1327 and https://github.com/meta-llama/llama-stack/pull/1328 - -**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.1.5...v0.1.5.1 - ---- - -# v0.1.5 -Published on: 2025-02-28T18:14:01Z - -## 0.1.5 Release Notes -### Build Agents -* Inference: Support more non-llama models (openai, anthropic, gemini) -* Inference: Can use the provider's model name in addition to the HF alias -* Inference: Fixed issues with calling tools that weren't specified in the prompt -* RAG: Improved system prompt for RAG and no more need for hard-coded rag-tool calling -* Embeddings: Added support for Nemo retriever embedding models -* Tools: Added support for MCP tools in Ollama Distribution -* Distributions: Added new Groq distribution - -### Customize Models -* Save post-trained checkpoint in SafeTensor format to allow Ollama inference provider to use the post-trained model - -### Monitor agents -* More comprehensive logging of agent steps including client tools -* Telemetry inputs/outputs are now structured and queryable -* Ability to retrieve agents session, turn, step by ids - -### Better Engineering -* Moved executorch Swift code out of this repo into the llama-stack-client-swift repo, similar to kotlin -* Move most logging to use logger instead of prints -* Completed text /chat-completion and /completion tests - - ---- - -# v0.1.4 -Published on: 2025-02-25T00:02:43Z - -## v0.1.4 Release Notes -Here are the key changes coming as part of this release: - -### Build and Test Agents -* Inference: Added support for non-llama models -* Inference: Added option to list all downloaded models and remove models -* Agent: Introduce new api agents.resume_turn to include client side tool execution in the same turn -* Agent: AgentConfig introduces new variable “tool_config” that allows for better tool configuration and system prompt overrides -* Agent: Added logging for agent step start and completion times -* Agent: Added support for logging for tool execution metadata -* Embedding: Updated /inference/embeddings to support asymmetric models, truncation and variable sized outputs -* Embedding: Updated embedding models for Ollama, Together, and Fireworks with available defaults -* VectorIO: Improved performance of sqlite-vec using chunked writes -### Agent Evals and Model Customization -* Deprecated api /eval-tasks. Use /eval/benchmark instead -* Added CPU training support for TorchTune -### Deploy and Monitoring of Agents -* Consistent view of client and server tool calls in telemetry -### Better Engineering -* Made tests more data-driven for consistent evaluation -* Fixed documentation links and improved API reference generation -* Various small fixes for build scripts and system reliability - - - ---- - -# v0.1.3 -Published on: 2025-02-14T20:24:32Z - -## v0.1.3 Release - -Here are some key changes that are coming as part of this release. - -### Build and Test Agents -Streamlined the initial development experience -- Added support for llama stack run --image-type venv -- Enhanced vector store options with new sqlite-vec provider and improved Qdrant integration -- vLLM improvements for tool calling and logprobs -- Better handling of sporadic code_interpreter tool calls - -### Agent Evals -Better benchmarking and Agent performance assessment -- Renamed eval API /eval-task to /benchmarks -- Improved documentation and notebooks for RAG and evals - -### Deploy and Monitoring of Agents -Improved production readiness -- Added usage metrics collection for chat completions -- CLI improvements for provider information -- Improved error handling and system reliability -- Better model endpoint handling and accessibility -- Improved signal handling on distro server - -### Better Engineering -Infrastructure and code quality improvements -- Faster text-based chat completion tests -- Improved testing for non-streaming agent apis -- Standardized import formatting with ruff linter -- Added conventional commits standard -- Fixed documentation parsing issues - - ---- - -# v0.1.2 -Published on: 2025-02-07T22:06:49Z - -# TL;DR -- Several stabilizations to development flows after the switch to `uv` -- Migrated CI workflows to new OSS repo - [llama-stack-ops](https://github.com/meta-llama/llama-stack-ops) -- Added automated rebuilds for ReadTheDocs -- Llama Stack server supports HTTPS -- Added system prompt overrides support -- Several bug fixes and improvements to documentation (check out Kubernetes deployment guide by @terrytangyuan ) - - ---- - -# v0.1.1 -Published on: 2025-02-02T02:29:24Z - -A bunch of small / big improvements everywhere including support for Windows, switching to `uv` and many provider improvements. - - ---- - -# v0.1.0 -Published on: 2025-01-24T17:47:47Z - -We are excited to announce a stable API release of Llama Stack, which enables developers to build RAG applications and Agents using tools and safety shields, monitor and those agents with telemetry, and evaluate the agent with scoring functions. - -## Context -GenAI application developers need more than just an LLM - they need to integrate tools, connect with their data sources, establish guardrails, and ground the LLM responses effectively. Currently, developers must piece together various tools and APIs, complicating the development lifecycle and increasing costs. The result is that developers are spending more time on these integrations rather than focusing on the application logic itself. The bespoke coupling of components also makes it challenging to adopt state-of-the-art solutions in the rapidly evolving GenAI space. This is particularly difficult for open models like Llama, as best practices are not widely established in the open. - -Llama Stack was created to provide developers with a comprehensive and coherent interface that simplifies AI application development and codifies best practices across the Llama ecosystem. Since our launch in September 2024, we have seen a huge uptick in interest in Llama Stack APIs by both AI developers and from partners building AI services with Llama models. Partners like Nvidia, Fireworks, and Ollama have collaborated with us to develop implementations across various APIs, including inference, memory, and safety. - -With Llama Stack, you can easily build a RAG agent which can also search the web, do complex math, and custom tool calling. You can use telemetry to inspect those traces, and convert telemetry into evals datasets. And with Llama Stack’s plugin architecture and prepackage distributions, you choose to run your agent anywhere - in the cloud with our partners, deploy your own environment using virtualenv, conda, or Docker, operate locally with Ollama, or even run on mobile devices with our SDKs. Llama Stack offers unprecedented flexibility while also simplifying the developer experience. - -## Release -After iterating on the APIs for the last 3 months, today we’re launching a stable release (V1) of the Llama Stack APIs and the corresponding llama-stack server and client packages(v0.1.0). We now have automated tests for providers. These tests make sure that all provider implementations are verified. Developers can now easily and reliably select distributions or providers based on their specific requirements. - -There are example standalone apps in llama-stack-apps. - - -## Key Features of this release - -- **Unified API Layer** - - Inference: Run LLM models - - RAG: Store and retrieve knowledge for RAG - - Agents: Build multi-step agentic workflows - - Tools: Register tools that can be called by the agent - - Safety: Apply content filtering and safety policies - - Evaluation: Test model and agent quality - - Telemetry: Collect and analyze usage data and complex agentic traces - - Post Training ( Coming Soon ): Fine tune models for specific use cases - -- **Rich Provider Ecosystem** - - Local Development: Meta's Reference, Ollama - - Cloud: Fireworks, Together, Nvidia, AWS Bedrock, Groq, Cerebras - - On-premises: Nvidia NIM, vLLM, TGI, Dell-TGI - - On-device: iOS and Android support - -- **Built for Production** - - Pre-packaged distributions for common deployment scenarios - - Backwards compatibility across model versions - - Comprehensive evaluation capabilities - - Full observability and monitoring - -- **Multiple developer interfaces** - - CLI: Command line interface - - Python SDK - - Swift iOS SDK - - Kotlin Android SDK - -- **Sample llama stack applications** - - Python - - iOS - - Android - - - ---- - -# v0.1.0rc12 -Published on: 2025-01-22T22:24:01Z - - - ---- - -# v0.0.63 -Published on: 2024-12-18T07:17:43Z - -A small but important bug-fix release to update the URL datatype for the client-SDKs. The issue affected multimodal agentic turns especially. - -**Full Changelog**: https://github.com/meta-llama/llama-stack/compare/v0.0.62...v0.0.63 - ---- - -# v0.0.62 -Published on: 2024-12-18T02:39:43Z - - - ---- - -# v0.0.61 -Published on: 2024-12-10T20:50:33Z - - - ---- - -# v0.0.55 -Published on: 2024-11-23T17:14:07Z - - - ---- - -# v0.0.54 -Published on: 2024-11-22T00:36:09Z - - - ---- - -# v0.0.53 -Published on: 2024-11-20T22:18:00Z - -🚀 Initial Release Notes for Llama Stack! - -### Added -- Resource-oriented design for models, shields, memory banks, datasets and eval tasks -- Persistence for registered objects with distribution -- Ability to persist memory banks created for FAISS -- PostgreSQL KVStore implementation -- Environment variable placeholder support in run.yaml files -- Comprehensive Zero-to-Hero notebooks and quickstart guides -- Support for quantized models in Ollama -- Vision models support for Together, Fireworks, Meta-Reference, and Ollama, and vLLM -- Bedrock distribution with safety shields support -- Evals API with task registration and scoring functions -- MMLU and SimpleQA benchmark scoring functions -- Huggingface dataset provider integration for benchmarks -- Support for custom dataset registration from local paths -- Benchmark evaluation CLI tools with visualization tables -- RAG evaluation scoring functions and metrics -- Local persistence for datasets and eval tasks - -### Changed -- Split safety into distinct providers (llama-guard, prompt-guard, code-scanner) -- Changed provider naming convention (`impls` → `inline`, `adapters` → `remote`) -- Updated API signatures for dataset and eval task registration -- Restructured folder organization for providers -- Enhanced Docker build configuration -- Added version prefixing for REST API routes -- Enhanced evaluation task registration workflow -- Improved benchmark evaluation output formatting -- Restructured evals folder organization for better modularity - -### Removed -- `llama stack configure` command - - ---- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 10e3f6cee..ef73dadc3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,34 +61,26 @@ outlined on that page and do not file a public issue. We use [uv](https://github.com/astral-sh/uv) to manage python dependencies and virtual environments. You can install `uv` by following this [guide](https://docs.astral.sh/uv/getting-started/installation/). - You can install the dependencies by running: ```bash -cd llama-stack -uv sync --extra dev -uv pip install -e . -source .venv/bin/activate +$ cd llama-stack +$ uv sync --extra dev +$ uv pip install -e . +$ source .venv/bin/activate ``` -> [!NOTE] -> You can pin a specific version of Python to use for `uv` by adding a `.python-version` file in the root project directory. -> Otherwise, `uv` will automatically select a Python version according to the `requires-python` section of the `pyproject.toml`. -> For more info, see the [uv docs around Python versions](https://docs.astral.sh/uv/concepts/python-versions/). - Note that you can create a dotenv file `.env` that includes necessary environment variables: ``` LLAMA_STACK_BASE_URL=http://localhost:8321 LLAMA_STACK_CLIENT_LOG=debug LLAMA_STACK_PORT=8321 -LLAMA_STACK_CONFIG= -TAVILY_SEARCH_API_KEY= -BRAVE_SEARCH_API_KEY= +LLAMA_STACK_CONFIG= ``` And then use this dotenv file when running client SDK tests via the following: ```bash -uv run --env-file .env -- pytest -v tests/integration/inference/test_text_inference.py --text-model=meta-llama/Llama-3.1-8B-Instruct +$ uv run --env-file .env -- pytest -v tests/client-sdk/inference/test_text_inference.py ``` ## Pre-commit Hooks @@ -96,7 +88,7 @@ uv run --env-file .env -- pytest -v tests/integration/inference/test_text_infere We use [pre-commit](https://pre-commit.com/) to run linting and formatting checks on your code. You can install the pre-commit hooks by running: ```bash -uv run pre-commit install +$ uv run pre-commit install ``` After that, pre-commit hooks will run automatically before each commit. @@ -104,41 +96,26 @@ After that, pre-commit hooks will run automatically before each commit. Alternatively, if you don't want to install the pre-commit hooks, you can run the checks manually by running: ```bash -uv run pre-commit run --all-files +$ uv run pre-commit run --all-files ``` > [!CAUTION] > Before pushing your changes, make sure that the pre-commit hooks have passed successfully. -## Running tests - -You can find the Llama Stack testing documentation here [here](tests/README.md). - ## Adding a new dependency to the project To add a new dependency to the project, you can use the `uv` command. For example, to add `foo` to the project, you can run: ```bash -uv add foo -uv sync +$ uv add foo +$ uv sync ``` ## Coding Style -* Comments should provide meaningful insights into the code. Avoid filler comments that simply - describe the next step, as they create unnecessary clutter, same goes for docstrings. -* Prefer comments to clarify surprising behavior and/or relationships between parts of the code - rather than explain what the next line of code does. -* Catching exceptions, prefer using a specific exception type rather than a broad catch-all like - `Exception`. -* Error messages should be prefixed with "Failed to ..." -* 4 spaces for indentation rather than tab -* When using `# noqa` to suppress a style or linter warning, include a comment explaining the - justification for bypassing the check. -* When using `# type: ignore` to suppress a mypy warning, include a comment explaining the - justification for bypassing the check. -* Don't use unicode characters in the codebase. ASCII-only is preferred for compatibility or - readability reasons. +* 4 spaces for indentation rather than tabs +* 80 character line length +* ... ## Common Tasks @@ -146,32 +123,35 @@ Some tips about common tasks you work on while contributing to Llama Stack: ### Using `llama stack build` -Building a stack image (conda / docker) will use the production version of the `llama-stack` and `llama-stack-client` packages. If you are developing with a llama-stack repository checked out and need your code to be reflected in the stack image, set `LLAMA_STACK_DIR` and `LLAMA_STACK_CLIENT_DIR` to the appropriate checked out directories when running any of the `llama` CLI commands. +Building a stack image (conda / docker) will use the production version of the `llama-stack`, `llama-models` and `llama-stack-client` packages. If you are developing with a llama-stack repository checked out and need your code to be reflected in the stack image, set `LLAMA_STACK_DIR` and `LLAMA_MODELS_DIR` to the appropriate checked out directories when running any of the `llama` CLI commands. Example: ```bash -cd work/ -git clone https://github.com/meta-llama/llama-stack.git -git clone https://github.com/meta-llama/llama-stack-client-python.git -cd llama-stack -LLAMA_STACK_DIR=$(pwd) LLAMA_STACK_CLIENT_DIR=../llama-stack-client-python llama stack build --template <...> +$ cd work/ +$ git clone https://github.com/meta-llama/llama-stack.git +$ git clone https://github.com/meta-llama/llama-models.git +$ cd llama-stack +$ LLAMA_STACK_DIR=$(pwd) LLAMA_MODELS_DIR=../llama-models llama stack build --template <...> ``` ### Updating Provider Configurations -If you have made changes to a provider's configuration in any form (introducing a new config key, or changing models, etc.), you should run `./scripts/distro_codegen.py` to re-generate various YAML files as well as the documentation. You should not change `docs/source/.../distributions/` files manually as they are auto-generated. +If you have made changes to a provider's configuration in any form (introducing a new config key, or changing models, etc.), you should run `python llama_stack/scripts/distro_codegen.py` to re-generate various YAML files as well as the documentation. You should not change `docs/source/.../distributions/` files manually as they are auto-generated. ### Building the Documentation If you are making changes to the documentation at [https://llama-stack.readthedocs.io/en/latest/](https://llama-stack.readthedocs.io/en/latest/), you can use the following command to build the documentation and preview your changes. You will need [Sphinx](https://www.sphinx-doc.org/en/master/) and the readthedocs theme. ```bash +$ cd llama-stack/docs +$ uv sync --extra docs + # This rebuilds the documentation pages. -uv run --group docs make -C docs/ html +$ uv run make html # This will start a local server (usually at http://127.0.0.1:8000) that automatically rebuilds and refreshes when you make changes to the documentation. -uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all +$ uv run sphinx-autobuild source build/html --write-all ``` ### Update API Documentation @@ -179,7 +159,8 @@ uv run --group docs sphinx-autobuild docs/source docs/build/html --write-all If you modify or add new API endpoints, update the API documentation accordingly. You can do this by running the following command: ```bash -uv run ./docs/openapi_generator/run_openapi_generator.sh +$ uv sync --extra dev +$ uv run ./docs/openapi_generator/run_openapi_generator.sh ``` The generated API documentation will be available in `docs/_static/`. Make sure to review the changes before committing. diff --git a/MANIFEST.in b/MANIFEST.in index 88bd11767..0e9efd9eb 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,9 +1,6 @@ include pyproject.toml -include llama_stack/models/llama/llama3/tokenizer.model -include llama_stack/models/llama/llama4/tokenizer.model +include distributions/dependencies.json include llama_stack/distribution/*.sh include llama_stack/cli/scripts/*.sh include llama_stack/templates/*/*.yaml include llama_stack/providers/tests/test_cases/inference/*.json -include llama_stack/models/llama/*/*.md -include llama_stack/tests/integration/*.jpg diff --git a/README.md b/README.md index 37f1aa0f3..3946deea6 100644 --- a/README.md +++ b/README.md @@ -3,82 +3,9 @@ [![PyPI version](https://img.shields.io/pypi/v/llama_stack.svg)](https://pypi.org/project/llama_stack/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack)](https://pypi.org/project/llama-stack/) [![License](https://img.shields.io/pypi/l/llama_stack.svg)](https://github.com/meta-llama/llama-stack/blob/main/LICENSE) -[![Discord](https://img.shields.io/discord/1257833999603335178?color=6A7EC2&logo=discord&logoColor=ffffff)](https://discord.gg/llama-stack) -[![Unit Tests](https://github.com/meta-llama/llama-stack/actions/workflows/unit-tests.yml/badge.svg?branch=main)](https://github.com/meta-llama/llama-stack/actions/workflows/unit-tests.yml?query=branch%3Amain) -[![Integration Tests](https://github.com/meta-llama/llama-stack/actions/workflows/integration-tests.yml/badge.svg?branch=main)](https://github.com/meta-llama/llama-stack/actions/workflows/integration-tests.yml?query=branch%3Amain) +[![Discord](https://img.shields.io/discord/1257833999603335178)](https://discord.gg/llama-stack) -[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb) | [**Discord**](https://discord.gg/llama-stack) - -### ✨🎉 Llama 4 Support 🎉✨ -We released [Version 0.2.0](https://github.com/meta-llama/llama-stack/releases/tag/v0.2.0) with support for the Llama 4 herd of models released by Meta. - -
- -👋 Click here to see how to run Llama 4 models on Llama Stack - -\ -*Note you need 8xH100 GPU-host to run these models* - -```bash -pip install -U llama_stack - -MODEL="Llama-4-Scout-17B-16E-Instruct" -# get meta url from llama.com -llama model download --source meta --model-id $MODEL --meta-url - -# start a llama stack server -INFERENCE_MODEL=meta-llama/$MODEL llama stack build --run --template meta-reference-gpu - -# install client to interact with the server -pip install llama-stack-client -``` -### CLI -```bash -# Run a chat completion -llama-stack-client --endpoint http://localhost:8321 \ -inference chat-completion \ ---model-id meta-llama/$MODEL \ ---message "write a haiku for meta's llama 4 models" - -ChatCompletionResponse( - completion_message=CompletionMessage(content="Whispers in code born\nLlama's gentle, wise heartbeat\nFuture's soft unfold", role='assistant', stop_reason='end_of_turn', tool_calls=[]), - logprobs=None, - metrics=[Metric(metric='prompt_tokens', value=21.0, unit=None), Metric(metric='completion_tokens', value=28.0, unit=None), Metric(metric='total_tokens', value=49.0, unit=None)] -) -``` -### Python SDK -```python -from llama_stack_client import LlamaStackClient - -client = LlamaStackClient(base_url=f"http://localhost:8321") - -model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" -prompt = "Write a haiku about coding" - -print(f"User> {prompt}") -response = client.inference.chat_completion( - model_id=model_id, - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": prompt}, - ], -) -print(f"Assistant> {response.completion_message.content}") -``` -As more providers start supporting Llama 4, you can use them in Llama Stack as well. We are adding to the list. Stay tuned! - - -
- -### 🚀 One-Line Installer 🚀 - -To try Llama Stack locally, run: - -```bash -curl -LsSf https://github.com/meta-llama/llama-stack/raw/main/install.sh | sh -``` - -### Overview +[**Quick Start**](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html) | [**Documentation**](https://llama-stack.readthedocs.io/en/latest/index.html) | [**Colab Notebook**](./docs/getting_started.ipynb) Llama Stack standardizes the core building blocks that simplify AI application development. It codifies best practices across the Llama ecosystem. More specifically, it provides @@ -105,32 +32,24 @@ Llama Stack standardizes the core building blocks that simplify AI application d By reducing friction and complexity, Llama Stack empowers developers to focus on what they do best: building transformative generative AI applications. ### API Providers -Here is a list of the various API providers and available distributions that can help developers get started easily with Llama Stack. - -| **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** | **Post Training** | -|:------------------------:|:----------------------:|:----------:|:-------------:|:----------:|:----------:|:-------------:|:-----------------:| -| Meta Reference | Single Node | ✅ | ✅ | ✅ | ✅ | ✅ | | -| SambaNova | Hosted | | ✅ | | ✅ | | | -| Cerebras | Hosted | | ✅ | | | | | -| Fireworks | Hosted | ✅ | ✅ | ✅ | | | | -| AWS Bedrock | Hosted | | ✅ | | ✅ | | | -| Together | Hosted | ✅ | ✅ | | ✅ | | | -| Groq | Hosted | | ✅ | | | | | -| Ollama | Single Node | | ✅ | | | | | -| TGI | Hosted and Single Node | | ✅ | | | | | -| NVIDIA NIM | Hosted and Single Node | | ✅ | | | | | -| Chroma | Single Node | | | ✅ | | | | -| PG Vector | Single Node | | | ✅ | | | | -| PyTorch ExecuTorch | On-device iOS | ✅ | ✅ | | | | | -| vLLM | Hosted and Single Node | | ✅ | | | | | -| OpenAI | Hosted | | ✅ | | | | | -| Anthropic | Hosted | | ✅ | | | | | -| Gemini | Hosted | | ✅ | | | | | -| watsonx | Hosted | | ✅ | | | | | -| HuggingFace | Single Node | | | | | | ✅ | -| TorchTune | Single Node | | | | | | ✅ | -| NVIDIA NEMO | Hosted | | | | | | ✅ | +Here is a list of the various API providers and available distributions that can help developers get started easily with Llama Stack. +| **API Provider Builder** | **Environments** | **Agents** | **Inference** | **Memory** | **Safety** | **Telemetry** | +|:------------------------:|:----------------------:|:----------:|:-------------:|:----------:|:----------:|:-------------:| +| Meta Reference | Single Node | ✅ | ✅ | ✅ | ✅ | ✅ | +| SambaNova | Hosted | | ✅ | | | | +| Cerebras | Hosted | | ✅ | | | | +| Fireworks | Hosted | ✅ | ✅ | ✅ | | | +| AWS Bedrock | Hosted | | ✅ | | ✅ | | +| Together | Hosted | ✅ | ✅ | | ✅ | | +| Groq | Hosted | | ✅ | | | | +| Ollama | Single Node | | ✅ | | | | +| TGI | Hosted and Single Node | | ✅ | | | | +| NVIDIA NIM | Hosted and Single Node | | ✅ | | | | +| Chroma | Single Node | | | ✅ | | | +| PG Vector | Single Node | | | ✅ | | | +| PyTorch ExecuTorch | On-device iOS | ✅ | ✅ | | | | +| vLLM | Hosted and Single Node | | ✅ | | | | ### Distributions @@ -139,6 +58,7 @@ A Llama Stack Distribution (or "distro") is a pre-configured bundle of provider | **Distribution** | **Llama Stack Docker** | Start This Distribution | |:---------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------:| | Meta Reference | [llamastack/distribution-meta-reference-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/meta-reference-gpu.html) | +| Meta Reference Quantized | [llamastack/distribution-meta-reference-quantized-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-quantized-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/meta-reference-quantized-gpu.html) | | SambaNova | [llamastack/distribution-sambanova](https://hub.docker.com/repository/docker/llamastack/distribution-sambanova/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/sambanova.html) | | Cerebras | [llamastack/distribution-cerebras](https://hub.docker.com/repository/docker/llamastack/distribution-cerebras/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/cerebras.html) | | Ollama | [llamastack/distribution-ollama](https://hub.docker.com/repository/docker/llamastack/distribution-ollama/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/ollama.html) | @@ -147,6 +67,26 @@ A Llama Stack Distribution (or "distro") is a pre-configured bundle of provider | Fireworks | [llamastack/distribution-fireworks](https://hub.docker.com/repository/docker/llamastack/distribution-fireworks/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/fireworks.html) | | vLLM | [llamastack/distribution-remote-vllm](https://hub.docker.com/repository/docker/llamastack/distribution-remote-vllm/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/remote-vllm.html) | +### Installation + +You have two ways to install this repository: + +* **Install as a package**: + You can install the repository directly from [PyPI](https://pypi.org/project/llama-stack/) by running the following command: + ```bash + pip install llama-stack + ``` + +* **Install from source**: + If you prefer to install from the source code, we recommend using [uv](https://github.com/astral-sh/uv). + Then, run the following commands: + ```bash + git clone git@github.com:meta-llama/llama-stack.git + cd llama-stack + + uv sync + uv pip install -e . + ``` ### Documentation diff --git a/distributions/bedrock/build.yaml b/distributions/bedrock/build.yaml new file mode 120000 index 000000000..72402ef8d --- /dev/null +++ b/distributions/bedrock/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/bedrock/build.yaml \ No newline at end of file diff --git a/distributions/bedrock/compose.yaml b/distributions/bedrock/compose.yaml new file mode 100644 index 000000000..055b92c67 --- /dev/null +++ b/distributions/bedrock/compose.yaml @@ -0,0 +1,15 @@ +services: + llamastack: + image: distribution-bedrock + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/llamastack-run-bedrock.yaml + ports: + - "8321:8321" + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-bedrock.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/bedrock/run.yaml b/distributions/bedrock/run.yaml new file mode 120000 index 000000000..f38abfc4e --- /dev/null +++ b/distributions/bedrock/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/bedrock/run.yaml \ No newline at end of file diff --git a/distributions/cerebras/build.yaml b/distributions/cerebras/build.yaml new file mode 120000 index 000000000..bccbbcf60 --- /dev/null +++ b/distributions/cerebras/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/cerebras/build.yaml \ No newline at end of file diff --git a/distributions/cerebras/compose.yaml b/distributions/cerebras/compose.yaml new file mode 100644 index 000000000..8dc09a865 --- /dev/null +++ b/distributions/cerebras/compose.yaml @@ -0,0 +1,16 @@ +services: + llamastack: + image: llamastack/distribution-cerebras + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/llamastack-run-cerebras.yaml + ports: + - "8321:8321" + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-cerebras.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/cerebras/run.yaml b/distributions/cerebras/run.yaml new file mode 120000 index 000000000..9f9d20b4b --- /dev/null +++ b/distributions/cerebras/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/cerebras/run.yaml \ No newline at end of file diff --git a/distributions/dell-tgi/compose.yaml b/distributions/dell-tgi/compose.yaml new file mode 100644 index 000000000..d26636cbd --- /dev/null +++ b/distributions/dell-tgi/compose.yaml @@ -0,0 +1,50 @@ +services: + text-generation-inference: + image: registry.dell.huggingface.co/enterprise-dell-inference-meta-llama-meta-llama-3.1-8b-instruct + network_mode: "host" + volumes: + - $HOME/.cache/huggingface:/data + ports: + - "5009:5009" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=0,1,2,3,4 + - NUM_SHARD=4 + - MAX_BATCH_PREFILL_TOKENS=32768 + - MAX_INPUT_TOKENS=8000 + - MAX_TOTAL_TOKENS=8192 + command: [] + deploy: + resources: + reservations: + devices: + - driver: nvidia + # that's the closest analogue to --gpus; provide + # an integer amount of devices or 'all' + count: all + # Devices are reserved using a list of capabilities, making + # capabilities the only required field. A device MUST + # satisfy all the requested capabilities for a successful + # reservation. + capabilities: [gpu] + runtime: nvidia + llamastack: + depends_on: + text-generation-inference: + condition: service_healthy + image: llamastack/distribution-tgi + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + # Link to TGI run.yaml file + - ./run.yaml:/root/my-run.yaml + ports: + - "8321:8321" + # Hack: wait for TGI server to start before starting docker + entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/dell-tgi/run.yaml b/distributions/dell-tgi/run.yaml new file mode 100644 index 000000000..cd6ddcfdf --- /dev/null +++ b/distributions/dell-tgi/run.yaml @@ -0,0 +1,44 @@ +version: '2' +image_name: local +container_image: null +conda_env: local +apis: +- shields +- agents +- models +- memory +- memory_banks +- inference +- safety +providers: + inference: + - provider_id: tgi0 + provider_type: remote::tgi + config: + url: http://127.0.0.1:80 + safety: + - provider_id: meta0 + provider_type: inline::llama-guard + config: + model: Llama-Guard-3-1B + excluded_categories: [] + - provider_id: meta1 + provider_type: inline::prompt-guard + config: + model: Prompt-Guard-86M + memory: + - provider_id: meta0 + provider_type: inline::faiss + config: {} + agents: + - provider_id: meta0 + provider_type: inline::meta-reference + config: + persistence_store: + namespace: null + type: sqlite + db_path: ~/.llama/runtime/kvstore.db + telemetry: + - provider_id: meta0 + provider_type: inline::meta-reference + config: {} diff --git a/llama_stack/templates/dependencies.json b/distributions/dependencies.json similarity index 66% rename from llama_stack/templates/dependencies.json rename to distributions/dependencies.json index 47a35edc0..59b0c9e62 100644 --- a/llama_stack/templates/dependencies.json +++ b/distributions/dependencies.json @@ -7,12 +7,10 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -25,16 +23,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn" ], "cerebras": [ @@ -45,12 +40,10 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "matplotlib", "nltk", "numpy", @@ -62,16 +55,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -83,12 +73,10 @@ "chardet", "chromadb-client", "datasets", - "emoji", "fastapi", "fire", "fireworks-ai", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -101,17 +89,14 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "sqlite-vec", "tqdm", "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -124,13 +109,11 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", "huggingface_hub", - "langdetect", "matplotlib", "nltk", "numpy", @@ -142,16 +125,49 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", + "uvicorn", + "sentence-transformers --no-deps", + "torch torchvision --index-url https://download.pytorch.org/whl/cpu" + ], + "dev": [ + "aiosqlite", + "autoevals", + "blobfile", + "chardet", + "chromadb-client", + "datasets", + "fastapi", + "fire", + "fireworks-ai", + "httpx", + "litellm", + "matplotlib", + "mcp", + "nltk", + "numpy", + "openai", + "opentelemetry-exporter-otlp-proto-http", + "opentelemetry-sdk", + "pandas", + "pillow", + "psycopg2-binary", + "pymongo", + "pypdf", + "redis", + "requests", + "scikit-learn", + "scipy", + "sentencepiece", + "sqlite-vec", + "tqdm", + "transformers", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -163,13 +179,11 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "fireworks-ai", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -182,16 +196,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -202,12 +213,10 @@ "blobfile", "chardet", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "litellm", "matplotlib", "nltk", @@ -220,16 +229,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn" ], "hf-endpoint": [ @@ -240,13 +246,11 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", "huggingface_hub", - "langdetect", "matplotlib", "mcp", "nltk", @@ -259,16 +263,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn" ], "hf-serverless": [ @@ -279,13 +280,11 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", "huggingface_hub", - "langdetect", "matplotlib", "mcp", "nltk", @@ -298,95 +297,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], - "kvant": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "faiss-cpu", - "fastapi", - "fire", - "httpx", - "langdetect", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", - "tqdm", - "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], - "llama_api": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "fastapi", - "fire", - "httpx", - "langdetect", - "litellm", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", - "sqlite-vec", - "tqdm", - "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -399,14 +316,11 @@ "chardet", "chromadb-client", "datasets", - "emoji", "fairscale", "faiss-cpu", "fastapi", - "fbgemm-gpu-genai==1.1.2", "fire", "httpx", - "langdetect", "lm-format-enforcer", "matplotlib", "mcp", @@ -420,26 +334,63 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentence-transformers", "sentencepiece", - "sqlalchemy[asyncio]", "torch", - "torchao==0.8.0", "torchvision", "tqdm", "transformers", - "tree_sitter", + "uvicorn", + "zmq" + ], + "meta-reference-quantized-gpu": [ + "accelerate", + "aiosqlite", + "autoevals", + "blobfile", + "chardet", + "chromadb-client", + "datasets", + "fairscale", + "faiss-cpu", + "fastapi", + "fbgemm-gpu", + "fire", + "httpx", + "lm-format-enforcer", + "matplotlib", + "mcp", + "nltk", + "numpy", + "openai", + "opentelemetry-exporter-otlp-proto-http", + "opentelemetry-sdk", + "pandas", + "pillow", + "psycopg2-binary", + "pymongo", + "pypdf", + "redis", + "requests", + "scikit-learn", + "scipy", + "sentence-transformers", + "sentencepiece", + "torch", + "torchao==0.5.0", + "torchvision", + "tqdm", + "transformers", "uvicorn", "zmq" ], "nvidia": [ - "aiohttp", "aiosqlite", + "autoevals", "blobfile", "chardet", "datasets", @@ -448,6 +399,7 @@ "fire", "httpx", "matplotlib", + "mcp", "nltk", "numpy", "openai", @@ -463,7 +415,6 @@ "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", "uvicorn" @@ -476,12 +427,9 @@ "chardet", "chromadb-client", "datasets", - "emoji", - "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -491,103 +439,20 @@ "opentelemetry-exporter-otlp-proto-http", "opentelemetry-sdk", "pandas", - "peft", "pillow", "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", - "torch", - "tqdm", - "transformers", - "tree_sitter", - "trl", - "uvicorn" - ], - "open-benchmark": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "fastapi", - "fire", - "httpx", - "langdetect", - "litellm", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", "sqlite-vec", - "together", "tqdm", "transformers", - "tree_sitter", "uvicorn" ], - "passthrough": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "faiss-cpu", - "fastapi", - "fire", - "httpx", - "langdetect", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", - "tqdm", - "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], "remote-vllm": [ "aiosqlite", "autoevals", @@ -595,12 +460,10 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -613,16 +476,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -636,46 +496,7 @@ "fastapi", "fire", "httpx", - "litellm", "matplotlib", - "mcp", - "nltk", - "numpy", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", - "tqdm", - "transformers", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], - "starter": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "fastapi", - "fire", - "fireworks-ai", - "httpx", - "langdetect", - "litellm", - "matplotlib", - "mcp", "nltk", "numpy", "openai", @@ -686,20 +507,14 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", - "sqlite-vec", "tqdm", "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" + "uvicorn" ], "tgi": [ "aiohttp", @@ -709,13 +524,11 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", "huggingface_hub", - "langdetect", "matplotlib", "mcp", "nltk", @@ -728,16 +541,13 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -749,12 +559,10 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -767,57 +575,14 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "together", "tqdm", "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], - "verification": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "chromadb-client", - "datasets", - "emoji", - "fastapi", - "fire", - "httpx", - "langdetect", - "litellm", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", - "sqlite-vec", - "tqdm", - "transformers", - "tree_sitter", "uvicorn", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" @@ -829,12 +594,10 @@ "chardet", "chromadb-client", "datasets", - "emoji", "faiss-cpu", "fastapi", "fire", "httpx", - "langdetect", "matplotlib", "mcp", "nltk", @@ -847,58 +610,16 @@ "psycopg2-binary", "pymongo", "pypdf", - "pythainlp", "redis", "requests", "scikit-learn", "scipy", "sentencepiece", - "sqlalchemy[asyncio]", "tqdm", "transformers", - "tree_sitter", "uvicorn", "vllm", "sentence-transformers --no-deps", "torch torchvision --index-url https://download.pytorch.org/whl/cpu" - ], - "watsonx": [ - "aiosqlite", - "autoevals", - "blobfile", - "chardet", - "datasets", - "emoji", - "faiss-cpu", - "fastapi", - "fire", - "httpx", - "ibm_watson_machine_learning", - "langdetect", - "matplotlib", - "mcp", - "nltk", - "numpy", - "openai", - "opentelemetry-exporter-otlp-proto-http", - "opentelemetry-sdk", - "pandas", - "pillow", - "psycopg2-binary", - "pymongo", - "pypdf", - "pythainlp", - "redis", - "requests", - "scikit-learn", - "scipy", - "sentencepiece", - "sqlalchemy[asyncio]", - "tqdm", - "transformers", - "tree_sitter", - "uvicorn", - "sentence-transformers --no-deps", - "torch torchvision --index-url https://download.pytorch.org/whl/cpu" ] } diff --git a/distributions/fireworks/build.yaml b/distributions/fireworks/build.yaml new file mode 120000 index 000000000..32a5bd869 --- /dev/null +++ b/distributions/fireworks/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/fireworks/build.yaml \ No newline at end of file diff --git a/distributions/fireworks/compose.yaml b/distributions/fireworks/compose.yaml new file mode 100644 index 000000000..84b8491e4 --- /dev/null +++ b/distributions/fireworks/compose.yaml @@ -0,0 +1,14 @@ +services: + llamastack: + image: llamastack/distribution-fireworks + ports: + - "8321:8321" + environment: + - FIREWORKS_API_KEY=${FIREWORKS_API_KEY} + entrypoint: bash -c "python -m llama_stack.distribution.server.server --template fireworks" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/fireworks/run.yaml b/distributions/fireworks/run.yaml new file mode 120000 index 000000000..532e0e2a8 --- /dev/null +++ b/distributions/fireworks/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/fireworks/run.yaml \ No newline at end of file diff --git a/distributions/meta-reference-gpu/build.yaml b/distributions/meta-reference-gpu/build.yaml new file mode 120000 index 000000000..4418195eb --- /dev/null +++ b/distributions/meta-reference-gpu/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/meta-reference-gpu/build.yaml \ No newline at end of file diff --git a/distributions/meta-reference-gpu/compose.yaml b/distributions/meta-reference-gpu/compose.yaml new file mode 100644 index 000000000..d977e92ea --- /dev/null +++ b/distributions/meta-reference-gpu/compose.yaml @@ -0,0 +1,34 @@ +services: + llamastack: + image: llamastack/distribution-meta-reference-gpu + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/my-run.yaml + ports: + - "8321:8321" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=0 + command: [] + deploy: + resources: + reservations: + devices: + - driver: nvidia + # that's the closest analogue to --gpus; provide + # an integer amount of devices or 'all' + count: 1 + # Devices are reserved using a list of capabilities, making + # capabilities the only required field. A device MUST + # satisfy all the requested capabilities for a successful + # reservation. + capabilities: [gpu] + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s + runtime: nvidia + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" diff --git a/distributions/meta-reference-gpu/run-with-safety.yaml b/distributions/meta-reference-gpu/run-with-safety.yaml new file mode 120000 index 000000000..4c5483425 --- /dev/null +++ b/distributions/meta-reference-gpu/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/meta-reference-gpu/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/meta-reference-gpu/run.yaml b/distributions/meta-reference-gpu/run.yaml new file mode 120000 index 000000000..d680186ab --- /dev/null +++ b/distributions/meta-reference-gpu/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/meta-reference-gpu/run.yaml \ No newline at end of file diff --git a/distributions/meta-reference-quantized-gpu/build.yaml b/distributions/meta-reference-quantized-gpu/build.yaml new file mode 120000 index 000000000..f3dbe996f --- /dev/null +++ b/distributions/meta-reference-quantized-gpu/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/meta-reference-quantized-gpu/build.yaml \ No newline at end of file diff --git a/distributions/meta-reference-quantized-gpu/compose.yaml b/distributions/meta-reference-quantized-gpu/compose.yaml new file mode 100644 index 000000000..98e943dce --- /dev/null +++ b/distributions/meta-reference-quantized-gpu/compose.yaml @@ -0,0 +1,35 @@ +services: + llamastack: + image: llamastack/distribution-meta-reference-quantized-gpu + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/my-run.yaml + ports: + - "8321:8321" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=0 + command: [] + deploy: + resources: + reservations: + devices: + - driver: nvidia + # that's the closest analogue to --gpus; provide + # an integer amount of devices or 'all' + count: 1 + # Devices are reserved using a list of capabilities, making + # capabilities the only required field. A device MUST + # satisfy all the requested capabilities for a successful + # reservation. + capabilities: [gpu] + runtime: nvidia + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/meta-reference-quantized-gpu/run.yaml b/distributions/meta-reference-quantized-gpu/run.yaml new file mode 100644 index 000000000..eb631adaa --- /dev/null +++ b/distributions/meta-reference-quantized-gpu/run.yaml @@ -0,0 +1,58 @@ +version: '2' +image_name: local +container_image: null +conda_env: local +apis: +- shields +- agents +- models +- memory +- memory_banks +- inference +- safety +providers: + inference: + - provider_id: meta0 + provider_type: inline::meta-reference-quantized + config: + model: Llama3.2-3B-Instruct:int4-qlora-eo8 + quantization: + type: int4 + torch_seed: null + max_seq_len: 2048 + max_batch_size: 1 + - provider_id: meta1 + provider_type: inline::meta-reference-quantized + config: + # not a quantized model ! + model: Llama-Guard-3-1B + quantization: null + torch_seed: null + max_seq_len: 2048 + max_batch_size: 1 + safety: + - provider_id: meta0 + provider_type: inline::llama-guard + config: + model: Llama-Guard-3-1B + excluded_categories: [] + - provider_id: meta1 + provider_type: inline::prompt-guard + config: + model: Prompt-Guard-86M + memory: + - provider_id: meta0 + provider_type: inline::meta-reference + config: {} + agents: + - provider_id: meta0 + provider_type: inline::meta-reference + config: + persistence_store: + namespace: null + type: sqlite + db_path: ~/.llama/runtime/kvstore.db + telemetry: + - provider_id: meta0 + provider_type: inline::meta-reference + config: {} diff --git a/distributions/ollama/build.yaml b/distributions/ollama/build.yaml new file mode 120000 index 000000000..8772548e0 --- /dev/null +++ b/distributions/ollama/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/ollama/build.yaml \ No newline at end of file diff --git a/distributions/ollama/compose.yaml b/distributions/ollama/compose.yaml new file mode 100644 index 000000000..176f19d6b --- /dev/null +++ b/distributions/ollama/compose.yaml @@ -0,0 +1,71 @@ +services: + ollama: + image: ollama/ollama:latest + network_mode: ${NETWORK_MODE:-bridge} + volumes: + - ~/.ollama:/root/.ollama + ports: + - "11434:11434" + environment: + OLLAMA_DEBUG: 1 + command: [] + deploy: + resources: + limits: + memory: 8G # Set maximum memory + reservations: + memory: 8G # Set minimum memory reservation + # healthcheck: + # # ugh, no CURL in ollama image + # test: ["CMD", "curl", "-f", "http://ollama:11434"] + # interval: 10s + # timeout: 5s + # retries: 5 + + ollama-init: + image: ollama/ollama:latest + depends_on: + - ollama + # condition: service_healthy + network_mode: ${NETWORK_MODE:-bridge} + environment: + - OLLAMA_HOST=ollama + - INFERENCE_MODEL=${INFERENCE_MODEL} + - SAFETY_MODEL=${SAFETY_MODEL:-} + volumes: + - ~/.ollama:/root/.ollama + - ./pull-models.sh:/pull-models.sh + entrypoint: ["/pull-models.sh"] + + llamastack: + depends_on: + ollama: + condition: service_started + ollama-init: + condition: service_started + image: ${LLAMA_STACK_IMAGE:-llamastack/distribution-ollama} + network_mode: ${NETWORK_MODE:-bridge} + volumes: + - ~/.llama:/root/.llama + # Link to ollama run.yaml file + - ~/local/llama-stack/:/app/llama-stack-source + - ./run${SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml + ports: + - "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}" + environment: + - INFERENCE_MODEL=${INFERENCE_MODEL} + - SAFETY_MODEL=${SAFETY_MODEL:-} + - OLLAMA_URL=http://ollama:11434 + entrypoint: > + python -m llama_stack.distribution.server.server /root/my-run.yaml \ + --port ${LLAMA_STACK_PORT:-5001} + deploy: + restart_policy: + condition: on-failure + delay: 10s + max_attempts: 3 + window: 60s +volumes: + ollama: + ollama-init: + llamastack: diff --git a/distributions/ollama/pull-models.sh b/distributions/ollama/pull-models.sh new file mode 100755 index 000000000..fb5bf8a4a --- /dev/null +++ b/distributions/ollama/pull-models.sh @@ -0,0 +1,18 @@ +#!/bin/sh + +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the terms described in the LICENSE file in +# the root directory of this source tree. + +echo "Preloading (${INFERENCE_MODEL}, ${SAFETY_MODEL})..." +for model in ${INFERENCE_MODEL} ${SAFETY_MODEL}; do + echo "Preloading $model..." + if ! ollama run "$model"; then + echo "Failed to pull and run $model" + exit 1 + fi +done + +echo "All models pulled successfully" diff --git a/distributions/ollama/run-with-safety.yaml b/distributions/ollama/run-with-safety.yaml new file mode 120000 index 000000000..5695b49e7 --- /dev/null +++ b/distributions/ollama/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/ollama/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/ollama/run.yaml b/distributions/ollama/run.yaml new file mode 120000 index 000000000..b008b1bf4 --- /dev/null +++ b/distributions/ollama/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/ollama/run.yaml \ No newline at end of file diff --git a/distributions/remote-nvidia/build.yaml b/distributions/remote-nvidia/build.yaml new file mode 120000 index 000000000..8903d2e57 --- /dev/null +++ b/distributions/remote-nvidia/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/nvidia/build.yaml \ No newline at end of file diff --git a/distributions/remote-nvidia/compose.yaml b/distributions/remote-nvidia/compose.yaml new file mode 100644 index 000000000..ab8b4ce25 --- /dev/null +++ b/distributions/remote-nvidia/compose.yaml @@ -0,0 +1,19 @@ +services: + llamastack: + image: distribution-nvidia:dev + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/llamastack-run-nvidia.yaml + ports: + - "8321:8321" + environment: + - INFERENCE_MODEL=${INFERENCE_MODEL:-Llama3.1-8B-Instruct} + - NVIDIA_API_KEY=${NVIDIA_API_KEY:-} + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml-config /root/llamastack-run-nvidia.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/remote-nvidia/run.yaml b/distributions/remote-nvidia/run.yaml new file mode 120000 index 000000000..85da3e26b --- /dev/null +++ b/distributions/remote-nvidia/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/nvidia/run.yaml \ No newline at end of file diff --git a/distributions/remote-vllm/build.yaml b/distributions/remote-vllm/build.yaml new file mode 120000 index 000000000..52e5d0f2d --- /dev/null +++ b/distributions/remote-vllm/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/remote-vllm/build.yaml \ No newline at end of file diff --git a/distributions/remote-vllm/compose.yaml b/distributions/remote-vllm/compose.yaml new file mode 100644 index 000000000..c387e1049 --- /dev/null +++ b/distributions/remote-vllm/compose.yaml @@ -0,0 +1,100 @@ +services: + vllm-inference: + image: vllm/vllm-openai:latest + volumes: + - $HOME/.cache/huggingface:/root/.cache/huggingface + network_mode: ${NETWORK_MODE:-bridged} + ports: + - "${VLLM_INFERENCE_PORT:-5100}:${VLLM_INFERENCE_PORT:-5100}" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=${VLLM_INFERENCE_GPU:-0} + - HUGGING_FACE_HUB_TOKEN=$HF_TOKEN + command: > + --gpu-memory-utilization 0.75 + --model ${VLLM_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + --enforce-eager + --max-model-len 8192 + --max-num-seqs 16 + --port ${VLLM_INFERENCE_PORT:-5100} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:${VLLM_INFERENCE_PORT:-5100}/v1/health"] + interval: 30s + timeout: 10s + retries: 5 + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + runtime: nvidia + + # A little trick: + # if VLLM_SAFETY_MODEL is set, we will create a service for the safety model + # otherwise, the entry will end in a hyphen which gets ignored by docker compose + vllm-${VLLM_SAFETY_MODEL:+safety}: + image: vllm/vllm-openai:latest + volumes: + - $HOME/.cache/huggingface:/root/.cache/huggingface + network_mode: ${NETWORK_MODE:-bridged} + ports: + - "${VLLM_SAFETY_PORT:-5101}:${VLLM_SAFETY_PORT:-5101}" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=${VLLM_SAFETY_GPU:-1} + - HUGGING_FACE_HUB_TOKEN=$HF_TOKEN + command: > + --gpu-memory-utilization 0.75 + --model ${VLLM_SAFETY_MODEL} + --enforce-eager + --max-model-len 8192 + --max-num-seqs 16 + --port ${VLLM_SAFETY_PORT:-5101} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:${VLLM_SAFETY_PORT:-5101}/v1/health"] + interval: 30s + timeout: 10s + retries: 5 + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + runtime: nvidia + llamastack: + depends_on: + - vllm-inference: + condition: service_healthy + - vllm-${VLLM_SAFETY_MODEL:+safety}: + condition: service_healthy + # image: llamastack/distribution-remote-vllm + image: llamastack/distribution-remote-vllm:test-0.0.52rc3 + volumes: + - ~/.llama:/root/.llama + - ./run${VLLM_SAFETY_MODEL:+-with-safety}.yaml:/root/llamastack-run-remote-vllm.yaml + network_mode: ${NETWORK_MODE:-bridged} + environment: + - VLLM_URL=http://vllm-inference:${VLLM_INFERENCE_PORT:-5100}/v1 + - VLLM_SAFETY_URL=http://vllm-safety:${VLLM_SAFETY_PORT:-5101}/v1 + - INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + - MAX_TOKENS=${MAX_TOKENS:-4096} + - SQLITE_STORE_DIR=${SQLITE_STORE_DIR:-$HOME/.llama/distributions/remote-vllm} + - SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + ports: + - "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}" + # Hack: wait for vLLM server to start before starting docker + entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-remote-vllm.yaml --port 5001" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s +volumes: + vllm-inference: + vllm-safety: + llamastack: diff --git a/distributions/remote-vllm/run-with-safety.yaml b/distributions/remote-vllm/run-with-safety.yaml new file mode 120000 index 000000000..b2c3c36da --- /dev/null +++ b/distributions/remote-vllm/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/remote-vllm/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/remote-vllm/run.yaml b/distributions/remote-vllm/run.yaml new file mode 120000 index 000000000..ac70c0e6a --- /dev/null +++ b/distributions/remote-vllm/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/remote-vllm/run.yaml \ No newline at end of file diff --git a/distributions/runpod/build.yaml b/distributions/runpod/build.yaml new file mode 100644 index 000000000..9348573ef --- /dev/null +++ b/distributions/runpod/build.yaml @@ -0,0 +1,9 @@ +name: runpod +distribution_spec: + description: Use Runpod for running LLM inference + providers: + inference: remote::runpod + memory: meta-reference + safety: meta-reference + agents: meta-reference + telemetry: meta-reference diff --git a/distributions/sambanova/build.yaml b/distributions/sambanova/build.yaml new file mode 100644 index 000000000..dbf013d2d --- /dev/null +++ b/distributions/sambanova/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/sambanova/build.yaml diff --git a/distributions/sambanova/compose.yaml b/distributions/sambanova/compose.yaml new file mode 100644 index 000000000..58b9fb1ef --- /dev/null +++ b/distributions/sambanova/compose.yaml @@ -0,0 +1,16 @@ +services: + llamastack: + image: llamastack/distribution-sambanova + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/llamastack-run-sambanova.yaml + ports: + - "5000:5000" + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/llamastack-run-sambanova.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/sambanova/run.yaml b/distributions/sambanova/run.yaml new file mode 100644 index 000000000..385282c67 --- /dev/null +++ b/distributions/sambanova/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/sambanova/run.yaml diff --git a/distributions/tgi/build.yaml b/distributions/tgi/build.yaml new file mode 120000 index 000000000..73e59ad84 --- /dev/null +++ b/distributions/tgi/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/tgi/build.yaml \ No newline at end of file diff --git a/distributions/tgi/compose.yaml b/distributions/tgi/compose.yaml new file mode 100644 index 000000000..753b7880b --- /dev/null +++ b/distributions/tgi/compose.yaml @@ -0,0 +1,103 @@ +services: + tgi-inference: + image: ghcr.io/huggingface/text-generation-inference:latest + volumes: + - $HOME/.cache/huggingface:/data + network_mode: ${NETWORK_MODE:-bridged} + ports: + - "${TGI_INFERENCE_PORT:-8080}:${TGI_INFERENCE_PORT:-8080}" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=${TGI_INFERENCE_GPU:-0} + - HF_TOKEN=$HF_TOKEN + - HF_HOME=/data + - HF_DATASETS_CACHE=/data + - HF_MODULES_CACHE=/data + - HF_HUB_CACHE=/data + command: > + --dtype bfloat16 + --usage-stats off + --sharded false + --model-id ${TGI_INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + --port ${TGI_INFERENCE_PORT:-8080} + --cuda-memory-fraction 0.75 + healthcheck: + test: ["CMD", "curl", "-f", "http://tgi-inference:${TGI_INFERENCE_PORT:-8080}/health"] + interval: 5s + timeout: 5s + retries: 30 + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + runtime: nvidia + + tgi-${TGI_SAFETY_MODEL:+safety}: + image: ghcr.io/huggingface/text-generation-inference:latest + volumes: + - $HOME/.cache/huggingface:/data + network_mode: ${NETWORK_MODE:-bridged} + ports: + - "${TGI_SAFETY_PORT:-8081}:${TGI_SAFETY_PORT:-8081}" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=${TGI_SAFETY_GPU:-1} + - HF_TOKEN=$HF_TOKEN + - HF_HOME=/data + - HF_DATASETS_CACHE=/data + - HF_MODULES_CACHE=/data + - HF_HUB_CACHE=/data + command: > + --dtype bfloat16 + --usage-stats off + --sharded false + --model-id ${TGI_SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + --port ${TGI_SAFETY_PORT:-8081} + --cuda-memory-fraction 0.75 + healthcheck: + test: ["CMD", "curl", "-f", "http://tgi-safety:${TGI_SAFETY_PORT:-8081}/health"] + interval: 5s + timeout: 5s + retries: 30 + deploy: + resources: + reservations: + devices: + - driver: nvidia + capabilities: [gpu] + runtime: nvidia + + llamastack: + depends_on: + tgi-inference: + condition: service_healthy + tgi-${TGI_SAFETY_MODEL:+safety}: + condition: service_healthy + image: llamastack/distribution-tgi:test-0.0.52rc3 + network_mode: ${NETWORK_MODE:-bridged} + volumes: + - ~/.llama:/root/.llama + - ./run${TGI_SAFETY_MODEL:+-with-safety}.yaml:/root/my-run.yaml + ports: + - "${LLAMA_STACK_PORT:-5001}:${LLAMA_STACK_PORT:-5001}" + # Hack: wait for TGI server to start before starting docker + entrypoint: bash -c "sleep 60; python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s + environment: + - TGI_URL=http://tgi-inference:${TGI_INFERENCE_PORT:-8080} + - SAFETY_TGI_URL=http://tgi-safety:${TGI_SAFETY_PORT:-8081} + - INFERENCE_MODEL=${INFERENCE_MODEL:-meta-llama/Llama-3.2-3B-Instruct} + - SAFETY_MODEL=${SAFETY_MODEL:-meta-llama/Llama-Guard-3-1B} + +volumes: + tgi-inference: + tgi-safety: + llamastack: diff --git a/distributions/tgi/run-with-safety.yaml b/distributions/tgi/run-with-safety.yaml new file mode 120000 index 000000000..62d26708e --- /dev/null +++ b/distributions/tgi/run-with-safety.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/tgi/run-with-safety.yaml \ No newline at end of file diff --git a/distributions/tgi/run.yaml b/distributions/tgi/run.yaml new file mode 120000 index 000000000..f3cc3a502 --- /dev/null +++ b/distributions/tgi/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/tgi/run.yaml \ No newline at end of file diff --git a/distributions/together/build.yaml b/distributions/together/build.yaml new file mode 120000 index 000000000..3877a9c96 --- /dev/null +++ b/distributions/together/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/together/build.yaml \ No newline at end of file diff --git a/distributions/together/compose.yaml b/distributions/together/compose.yaml new file mode 100644 index 000000000..f66ee69f9 --- /dev/null +++ b/distributions/together/compose.yaml @@ -0,0 +1,14 @@ +services: + llamastack: + image: llamastack/distribution-together + ports: + - "8321:8321" + environment: + - TOGETHER_API_KEY=${TOGETHER_API_KEY} + entrypoint: bash -c "python -m llama_stack.distribution.server.server --template together" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/together/run.yaml b/distributions/together/run.yaml new file mode 120000 index 000000000..102d9866e --- /dev/null +++ b/distributions/together/run.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/together/run.yaml \ No newline at end of file diff --git a/distributions/vllm-gpu/build.yaml b/distributions/vllm-gpu/build.yaml new file mode 120000 index 000000000..a95d34c1f --- /dev/null +++ b/distributions/vllm-gpu/build.yaml @@ -0,0 +1 @@ +../../llama_stack/templates/inline-vllm/build.yaml \ No newline at end of file diff --git a/distributions/vllm-gpu/compose.yaml b/distributions/vllm-gpu/compose.yaml new file mode 100644 index 000000000..98267cdc3 --- /dev/null +++ b/distributions/vllm-gpu/compose.yaml @@ -0,0 +1,35 @@ +services: + llamastack: + image: llamastack/distribution-inline-vllm + network_mode: "host" + volumes: + - ~/.llama:/root/.llama + - ./run.yaml:/root/my-run.yaml + ports: + - "8321:8321" + devices: + - nvidia.com/gpu=all + environment: + - CUDA_VISIBLE_DEVICES=0 + command: [] + deploy: + resources: + reservations: + devices: + - driver: nvidia + # that's the closest analogue to --gpus; provide + # an integer amount of devices or 'all' + count: 1 + # Devices are reserved using a list of capabilities, making + # capabilities the only required field. A device MUST + # satisfy all the requested capabilities for a successful + # reservation. + capabilities: [gpu] + runtime: nvidia + entrypoint: bash -c "python -m llama_stack.distribution.server.server --yaml_config /root/my-run.yaml" + deploy: + restart_policy: + condition: on-failure + delay: 3s + max_attempts: 5 + window: 60s diff --git a/distributions/vllm-gpu/run.yaml b/distributions/vllm-gpu/run.yaml new file mode 100644 index 000000000..a75a4c451 --- /dev/null +++ b/distributions/vllm-gpu/run.yaml @@ -0,0 +1,66 @@ +version: '2' +image_name: local +container_image: null +conda_env: local +apis: +- shields +- agents +- models +- memory +- memory_banks +- inference +- safety +providers: + inference: + - provider_id: vllm-inference + provider_type: inline::vllm + config: + model: Llama3.2-3B-Instruct + tensor_parallel_size: 1 + gpu_memory_utilization: 0.4 + enforce_eager: true + max_tokens: 4096 + - provider_id: vllm-inference-safety + provider_type: inline::vllm + config: + model: Llama-Guard-3-1B + tensor_parallel_size: 1 + gpu_memory_utilization: 0.2 + enforce_eager: true + max_tokens: 4096 + safety: + - provider_id: meta0 + provider_type: inline::llama-guard + config: + model: Llama-Guard-3-1B + excluded_categories: [] + # Uncomment to use prompt guard + # - provider_id: meta1 + # provider_type: inline::prompt-guard + # config: + # model: Prompt-Guard-86M + memory: + - provider_id: meta0 + provider_type: inline::meta-reference + config: {} + # Uncomment to use pgvector + # - provider_id: pgvector + # provider_type: remote::pgvector + # config: + # host: 127.0.0.1 + # port: 5432 + # db: postgres + # user: postgres + # password: mysecretpassword + agents: + - provider_id: meta0 + provider_type: inline::meta-reference + config: + persistence_store: + namespace: null + type: sqlite + db_path: ~/.llama/runtime/agents_store.db + telemetry: + - provider_id: meta0 + provider_type: inline::meta-reference + config: {} diff --git a/docs/_static/css/my_theme.css b/docs/_static/css/my_theme.css index d078ec057..ccd7d2060 100644 --- a/docs/_static/css/my_theme.css +++ b/docs/_static/css/my_theme.css @@ -16,20 +16,3 @@ .hide-title h1 { display: none; } - -h2, h3, h4 { - font-weight: normal; -} -html[data-theme="dark"] .rst-content div[class^="highlight"] { - background-color: #0b0b0b; -} -pre { - white-space: pre-wrap !important; - word-break: break-all; -} - -[data-theme="dark"] .mermaid { - background-color: #f4f4f6 !important; - border-radius: 6px; - padding: 0.5em; - } diff --git a/docs/_static/js/detect_theme.js b/docs/_static/js/detect_theme.js deleted file mode 100644 index 712565ef7..000000000 --- a/docs/_static/js/detect_theme.js +++ /dev/null @@ -1,32 +0,0 @@ -document.addEventListener("DOMContentLoaded", function () { - const prefersDark = window.matchMedia("(prefers-color-scheme: dark)").matches; - const htmlElement = document.documentElement; - - // Check if theme is saved in localStorage - const savedTheme = localStorage.getItem("sphinx-rtd-theme"); - - if (savedTheme) { - // Use the saved theme preference - htmlElement.setAttribute("data-theme", savedTheme); - document.body.classList.toggle("dark", savedTheme === "dark"); - } else { - // Fall back to system preference - const theme = prefersDark ? "dark" : "light"; - htmlElement.setAttribute("data-theme", theme); - document.body.classList.toggle("dark", theme === "dark"); - // Save initial preference - localStorage.setItem("sphinx-rtd-theme", theme); - } - - // Listen for theme changes from the existing toggle - const observer = new MutationObserver(function(mutations) { - mutations.forEach(function(mutation) { - if (mutation.attributeName === "data-theme") { - const currentTheme = htmlElement.getAttribute("data-theme"); - localStorage.setItem("sphinx-rtd-theme", currentTheme); - } - }); - }); - - observer.observe(htmlElement, { attributes: true }); -}); diff --git a/docs/_static/llama-stack-spec.html b/docs/_static/llama-stack-spec.html index d88462909..2a9f4b6f7 100644 --- a/docs/_static/llama-stack-spec.html +++ b/docs/_static/llama-stack-spec.html @@ -6,8 +6,8 @@ OpenAPI specification - - + +