Merge remote-tracking branch 'origin/main' into stack-config-default-embed

This commit is contained in:
Ashwin Bharambe 2025-10-20 13:29:19 -07:00
commit 31249a1a75
237 changed files with 30895 additions and 15441 deletions

View file

@ -82,11 +82,14 @@ runs:
echo "No recording changes"
fi
- name: Write inference logs to file
- name: Write docker logs to file
if: ${{ always() }}
shell: bash
run: |
sudo docker logs ollama > ollama-${{ inputs.inference-mode }}.log || true
distro_name=$(echo "${{ inputs.stack-config }}" | sed 's/^docker://' | sed 's/^server://')
stack_container_name="llama-stack-test-$distro_name"
sudo docker logs $stack_container_name > docker-${distro_name}-${{ inputs.inference-mode }}.log || true
- name: Upload logs
if: ${{ always() }}

View file

@ -57,7 +57,7 @@ runs:
echo "Building Llama Stack"
LLAMA_STACK_DIR=. \
uv run --no-sync llama stack build --template ci-tests --image-type venv
uv run --no-sync llama stack list-deps ci-tests | xargs -L1 uv pip install
- name: Configure git for commits
shell: bash

View file

@ -14,6 +14,7 @@ Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a tabl
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
| Pre-commit Bot | [precommit-trigger.yml](precommit-trigger.yml) | Pre-commit bot for PR |
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build |
| Test llama stack list-deps | [providers-list-deps.yml](providers-list-deps.yml) | Test llama stack list-deps |
| Python Package Build Test | [python-build-test.yml](python-build-test.yml) | Test building the llama-stack PyPI project |
| Integration Tests (Record) | [record-integration-tests.yml](record-integration-tests.yml) | Run the integration test suite from tests/integration |
| Check semantic PR titles | [semantic-pr.yml](semantic-pr.yml) | Ensure that PR titles follow the conventional commit spec |

View file

@ -30,8 +30,11 @@ jobs:
- name: Build a single provider
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync \
llama stack build --template starter --image-type container --image-name test
docker build . \
-f containers/Containerfile \
--build-arg INSTALL_MODE=editable \
--build-arg DISTRO_NAME=starter \
--tag llama-stack:starter-ci
- name: Run installer end-to-end
run: |

View file

@ -73,6 +73,24 @@ jobs:
image_name: kube
apis: []
providers: {}
storage:
backends:
kv_default:
type: kv_sqlite
db_path: $run_dir/kvstore.db
sql_default:
type: sql_sqlite
db_path: $run_dir/sql_store.db
stores:
metadata:
namespace: registry
backend: kv_default
inference:
table_name: inference_store
backend: sql_default
conversations:
table_name: openai_conversations
backend: sql_default
server:
port: 8321
EOF

View file

@ -47,7 +47,7 @@ jobs:
strategy:
fail-fast: false
matrix:
client-type: [library, server]
client-type: [library, server, docker]
# Use Python 3.13 only on nightly schedule (daily latest client test), otherwise use 3.12
python-version: ${{ github.event.schedule == '0 0 * * *' && fromJSON('["3.12", "3.13"]') || fromJSON('["3.12"]') }}
client-version: ${{ (github.event.schedule == '0 0 * * *' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
@ -82,7 +82,7 @@ jobs:
env:
OPENAI_API_KEY: dummy
with:
stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || 'server:ci-tests' }}
stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || matrix.client-type == 'server' && 'server:ci-tests' || 'docker:ci-tests' }}
setup: ${{ matrix.config.setup }}
inference-mode: 'replay'
suite: ${{ matrix.config.suite }}

View file

@ -144,7 +144,7 @@ jobs:
- name: Build Llama Stack
run: |
uv run --no-sync llama stack build --template ci-tests --image-type venv
uv run --no-sync llama stack list-deps ci-tests | xargs -L1 uv pip install
- name: Check Storage and Memory Available Before Tests
if: ${{ always() }}

View file

@ -14,6 +14,8 @@ on:
- '.github/workflows/providers-build.yml'
- 'llama_stack/distributions/**'
- 'pyproject.toml'
- 'containers/Containerfile'
- '.dockerignore'
pull_request:
paths:
@ -24,6 +26,8 @@ on:
- '.github/workflows/providers-build.yml'
- 'llama_stack/distributions/**'
- 'pyproject.toml'
- 'containers/Containerfile'
- '.dockerignore'
concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
@ -60,15 +64,19 @@ jobs:
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Print build dependencies
- name: Install distribution into venv
if: matrix.image-type == 'venv'
run: |
uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
uv run llama stack list-deps ${{ matrix.distro }} | xargs -L1 uv pip install
- name: Run Llama Stack Build
- name: Build container image
if: matrix.image-type == 'container'
run: |
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --distro ${{ matrix.distro }} --image-type ${{ matrix.image-type }} --image-name test
docker build . \
-f containers/Containerfile \
--build-arg INSTALL_MODE=editable \
--build-arg DISTRO_NAME=${{ matrix.distro }} \
--tag llama-stack:${{ matrix.distro }}-ci
- name: Print dependencies in the image
if: matrix.image-type == 'venv'
@ -86,8 +94,8 @@ jobs:
- name: Build a single provider
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama
uv pip install -e .
uv run --no-sync llama stack list-deps --providers inference=remote::ollama | xargs -L1 uv pip install
build-custom-container-distribution:
runs-on: ubuntu-latest
steps:
@ -97,11 +105,16 @@ jobs:
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Build a single provider
- name: Build container image
run: |
yq -i '.image_type = "container"' llama_stack/distributions/ci-tests/build.yaml
yq -i '.image_name = "test"' llama_stack/distributions/ci-tests/build.yaml
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "python:3.12-slim"' llama_stack/distributions/ci-tests/build.yaml)
docker build . \
-f containers/Containerfile \
--build-arg INSTALL_MODE=editable \
--build-arg DISTRO_NAME=ci-tests \
--build-arg BASE_IMAGE="$BASE_IMAGE" \
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
-t llama-stack:ci-tests
- name: Inspect the container image entrypoint
run: |
@ -112,7 +125,7 @@ jobs:
fi
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
echo "Entrypoint: $entrypoint"
if [ "$entrypoint" != "[llama stack run /app/run.yaml]" ]; then
if [ "$entrypoint" != "[/usr/local/bin/llama-stack-entrypoint.sh]" ]; then
echo "Entrypoint is not correct"
exit 1
fi
@ -129,17 +142,19 @@ jobs:
- name: Pin distribution to UBI9 base
run: |
yq -i '
.image_type = "container" |
.image_name = "ubi9-test" |
.distribution_spec.container_image = "registry.access.redhat.com/ubi9:latest"
' llama_stack/distributions/ci-tests/build.yaml
- name: Build dev container (UBI9)
env:
USE_COPY_NOT_MOUNT: "true"
LLAMA_STACK_DIR: "."
- name: Build UBI9 container image
run: |
uv run llama stack build --config llama_stack/distributions/ci-tests/build.yaml
BASE_IMAGE=$(yq -r '.distribution_spec.container_image // "registry.access.redhat.com/ubi9:latest"' llama_stack/distributions/ci-tests/build.yaml)
docker build . \
-f containers/Containerfile \
--build-arg INSTALL_MODE=editable \
--build-arg DISTRO_NAME=ci-tests \
--build-arg BASE_IMAGE="$BASE_IMAGE" \
--build-arg RUN_CONFIG_PATH=/workspace/llama_stack/distributions/ci-tests/run.yaml \
-t llama-stack:ci-tests-ubi9
- name: Inspect UBI9 image
run: |
@ -150,7 +165,7 @@ jobs:
fi
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
echo "Entrypoint: $entrypoint"
if [ "$entrypoint" != "[llama stack run /app/run.yaml]" ]; then
if [ "$entrypoint" != "[/usr/local/bin/llama-stack-entrypoint.sh]" ]; then
echo "Entrypoint is not correct"
exit 1
fi

View file

@ -0,0 +1,105 @@
name: Test llama stack list-deps
run-name: Test llama stack list-deps
on:
push:
branches:
- main
paths:
- 'llama_stack/cli/stack/list_deps.py'
- 'llama_stack/cli/stack/_list_deps.py'
- 'llama_stack/core/build.*'
- 'llama_stack/core/*.sh'
- '.github/workflows/providers-list-deps.yml'
- 'llama_stack/templates/**'
- 'pyproject.toml'
pull_request:
paths:
- 'llama_stack/cli/stack/list_deps.py'
- 'llama_stack/cli/stack/_list_deps.py'
- 'llama_stack/core/build.*'
- 'llama_stack/core/*.sh'
- '.github/workflows/providers-list-deps.yml'
- 'llama_stack/templates/**'
- 'pyproject.toml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
distros: ${{ steps.set-matrix.outputs.distros }}
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Generate Distribution List
id: set-matrix
run: |
distros=$(ls llama_stack/distributions/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
echo "distros=$distros" >> "$GITHUB_OUTPUT"
list-deps:
needs: generate-matrix
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.generate-matrix.outputs.distros) }}
image-type: [venv, container]
fail-fast: false # We want to run all jobs even if some fail
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Print dependencies
run: |
uv run llama stack list-deps ${{ matrix.distro }}
- name: Install Distro using llama stack list-deps
run: |
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack list-deps ${{ matrix.distro }} | xargs -L1 uv pip install
- name: Print dependencies in the image
if: matrix.image-type == 'venv'
run: |
uv pip list
show-single-provider:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: Show a single provider
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack list-deps --providers inference=remote::ollama
list-deps-from-config:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install dependencies
uses: ./.github/actions/setup-runner
- name: list-des from Config
env:
USE_COPY_NOT_MOUNT: "true"
LLAMA_STACK_DIR: "."
run: |
uv run llama stack list-deps llama_stack/distributions/ci-tests/build.yaml

View file

@ -46,9 +46,9 @@ jobs:
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml
cat tests/external/ramalama-stack/run.yaml
- name: Build distro from config file
- name: Install distribution dependencies
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/ramalama-stack/build.yaml
uv run llama stack list-deps tests/external/ramalama-stack/build.yaml | xargs -L1 uv pip install
- name: Start Llama Stack server in background
if: ${{ matrix.image-type }} == 'venv'

View file

@ -44,11 +44,14 @@ jobs:
- name: Print distro dependencies
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync llama stack build --config tests/external/build.yaml --print-deps-only
uv run --no-sync llama stack list-deps tests/external/build.yaml
- name: Build distro from config file
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run --no-sync llama stack build --config tests/external/build.yaml
uv venv ci-test
source ci-test/bin/activate
uv pip install -e .
LLAMA_STACK_LOGGING=all=CRITICAL llama stack list-deps tests/external/build.yaml | xargs -L1 uv pip install
- name: Start Llama Stack server in background
if: ${{ matrix.image-type }} == 'venv'