feat(ci): keep only one re-recording job because independent recordings will conflict (#2956)

A couple of important updates:

- When recording tests, we cannot be generating a matrix because all the
independent recordings will conflict.
- In fact, we just don't need a matrix on test types any more because
things are very fast and the overhead of `llama stack build` and setting
up `uv` etc. is much more.
- Refactored the running of tests into an independent action
This commit is contained in:
Ashwin Bharambe 2025-07-29 17:48:04 -07:00 committed by GitHub
parent b237df8f18
commit f6afb3c26b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 158 additions and 103 deletions

View file

@ -0,0 +1,73 @@
name: 'Run Integration Tests'
description: 'Run integration tests with configurable execution mode and provider settings'
inputs:
test-types:
description: 'Test types to run (JSON array)'
required: true
stack-config:
description: 'Stack configuration: "ci-tests" or "server:ci-tests"'
required: true
provider:
description: 'Provider to use: "ollama" or "vllm"'
required: true
inference-mode:
description: 'Inference mode: "record" or "replay"'
required: true
outputs:
logs-path:
description: 'Path to generated log files'
value: '*.log'
runs:
using: 'composite'
steps:
- name: Run Integration Tests
env:
LLAMA_STACK_CLIENT_TIMEOUT: "300"
LLAMA_STACK_TEST_RECORDING_DIR: "tests/integration/recordings"
LLAMA_STACK_TEST_INFERENCE_MODE: ${{ inputs.inference-mode }}
shell: bash
run: |
stack_config="${{ inputs.stack-config }}"
EXCLUDE_TESTS="builtin_tool or safety_with_image or code_interpreter or test_rag"
# Configure provider-specific settings
if [ "${{ inputs.provider }}" == "ollama" ]; then
export OLLAMA_URL="http://0.0.0.0:11434"
export TEXT_MODEL="ollama/llama3.2:3b-instruct-fp16"
export SAFETY_MODEL="ollama/llama-guard3:1b"
EXTRA_PARAMS="--safety-shield=llama-guard"
else
export VLLM_URL="http://localhost:8000/v1"
export TEXT_MODEL="vllm/meta-llama/Llama-3.2-1B-Instruct"
EXTRA_PARAMS=""
EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls"
fi
TEST_TYPES='${{ inputs.test-types }}'
echo "Test types to run: $TEST_TYPES"
for test_type in $(echo "$TEST_TYPES" | jq -r '.[]'); do
# if provider is vllm, exclude the following tests: (safety, post_training, tool_runtime)
if [ "${{ inputs.provider }}" == "vllm" ]; then
if [ "$test_type" == "safety" ] || [ "$test_type" == "post_training" ] || [ "$test_type" == "tool_runtime" ]; then
continue
fi
fi
echo "=== Running tests for: $test_type ==="
if uv run pytest -s -v tests/integration/$test_type --stack-config=${stack_config} \
-k "not( ${EXCLUDE_TESTS} )" \
--text-model=$TEXT_MODEL \
--embedding-model=sentence-transformers/all-MiniLM-L6-v2 \
--color=yes ${EXTRA_PARAMS} \
--capture=tee-sys | tee pytest-${{ inputs.inference-mode }}-$test_type.log; then
echo "✅ Tests completed for $test_type"
else
echo "❌ Tests failed for $test_type"
exit 1
fi
done

View file

@ -1,6 +1,6 @@
name: Integration Tests name: Integration Tests
run-name: Run the integration test suite with Ollama run-name: Run the integration test suite from tests/integration
on: on:
push: push:
@ -16,6 +16,7 @@ on:
- 'requirements.txt' - 'requirements.txt'
- '.github/workflows/integration-tests.yml' # This workflow - '.github/workflows/integration-tests.yml' # This workflow
- '.github/actions/setup-ollama/action.yml' - '.github/actions/setup-ollama/action.yml'
- '.github/actions/run-integration-tests/action.yml'
schedule: schedule:
# If changing the cron schedule, update the provider in the test-matrix job # If changing the cron schedule, update the provider in the test-matrix job
- cron: '0 0 * * *' # (test latest client) Daily at 12 AM UTC - cron: '0 0 * * *' # (test latest client) Daily at 12 AM UTC
@ -39,21 +40,21 @@ jobs:
discover-tests: discover-tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
test-type: ${{ steps.generate-matrix.outputs.test-type }} test-types: ${{ steps.generate-test-types.outputs.test-types }}
rerecord-tests: ${{ steps.check-rerecord-tests.outputs.rerecord-tests }} rerecord-tests: ${{ steps.check-rerecord-tests.outputs.rerecord-tests }}
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Generate test matrix - name: Generate test types
id: generate-matrix id: generate-test-types
run: | run: |
# Get test directories dynamically, excluding non-test directories # Get test directories dynamically, excluding non-test directories
TEST_TYPES=$(find tests/integration -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | TEST_TYPES=$(find tests/integration -maxdepth 1 -mindepth 1 -type d -printf "%f\n" |
grep -Ev "^(__pycache__|fixtures|test_cases|recordings)$" | grep -Ev "^(__pycache__|fixtures|test_cases|recordings)$" |
sort | jq -R -s -c 'split("\n")[:-1]') sort | jq -R -s -c 'split("\n")[:-1]')
echo "test-type=$TEST_TYPES" >> $GITHUB_OUTPUT echo "test-types=$TEST_TYPES" >> $GITHUB_OUTPUT
- name: Check if re-record-tests label exists - name: Check if re-record-tests label exists
id: check-rerecord-tests id: check-rerecord-tests
@ -64,49 +65,93 @@ jobs:
echo "rerecord-tests=false" >> $GITHUB_OUTPUT echo "rerecord-tests=false" >> $GITHUB_OUTPUT
fi fi
test-matrix: record-tests:
# Sequential job for recording to avoid SQLite conflicts
if: ${{ needs.discover-tests.outputs.rerecord-tests == 'true' }}
needs: discover-tests needs: discover-tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
# Set write permissions since we might need to commit recordings
contents: write contents: write
pull-requests: write pull-requests: write
env: steps:
# Create reusable variable for the re-record tests condition - name: Checkout repository
SHOULD_RECORD: ${{ needs.discover-tests.outputs.rerecord-tests == 'true' }} uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# TODO: set up another var to track whether we need ollama or not
# not every matrix type needs ollama - name: Install dependencies
uses: ./.github/actions/setup-runner
with:
python-version: "3.12" # Use single Python version for recording
client-version: "latest"
- name: Setup ollama
if: ${{ inputs.test-provider == 'ollama' }}
uses: ./.github/actions/setup-ollama
- name: Setup vllm
if: ${{ inputs.test-provider == 'vllm' }}
uses: ./.github/actions/setup-vllm
- name: Build Llama Stack
run: |
uv run llama stack build --template ci-tests --image-type venv
- name: Configure git for commits
run: |
git config --local user.email "github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
- name: Run Integration Tests for All Types (Recording Mode)
uses: ./.github/actions/run-integration-tests
with:
test-types: ${{ needs.discover-tests.outputs.test-types }}
stack-config: 'ci-tests'
provider: ${{ inputs.test-provider }}
inference-mode: 'record'
- name: Commit and push recordings
run: |
if ! git diff --quiet tests/integration/recordings/; then
echo "Committing recordings"
git add tests/integration/recordings/
git commit -m "Update recordings"
echo "Pushing all recording commits to PR"
git push origin HEAD:${{ github.head_ref }}
else
echo "No recording changes"
fi
- name: Write inference logs to file
if: ${{ always() }}
run: |
sudo docker logs ollama > ollama-recording.log || true
- name: Upload recording logs
if: ${{ always() }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: recording-logs-${{ github.run_id }}
path: |
*.log
retention-days: 1
run-tests:
# Skip this job if we're in recording mode (handled by record-tests job)
if: ${{ needs.discover-tests.outputs.rerecord-tests != 'true' }}
needs: discover-tests
runs-on: ubuntu-latest
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
test-type: ${{ fromJson(needs.discover-tests.outputs.test-type) }}
client-type: [library, server] client-type: [library, server]
# Use vllm on weekly schedule, otherwise use test-provider input (defaults to ollama) # Use vllm on weekly schedule, otherwise use test-provider input (defaults to ollama)
provider: ${{ (github.event.schedule == '1 0 * * 0') && fromJSON('["vllm"]') || fromJSON(format('["{0}"]', github.event.inputs.test-provider || 'ollama')) }} provider: ${{ (github.event.schedule == '1 0 * * 0') && fromJSON('["vllm"]') || fromJSON(format('["{0}"]', github.event.inputs.test-provider || 'ollama')) }}
python-version: ["3.12", "3.13"] python-version: ["3.12", "3.13"]
client-version: ${{ (github.event.schedule == '0 0 * * 0' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }} client-version: ${{ (github.event.schedule == '0 0 * * 0' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
exclude: # TODO: look into why these tests are failing and fix them
- provider: vllm
test-type: safety
- provider: vllm
test-type: post_training
- provider: vllm
test-type: tool_runtime
steps: steps:
- name: Debug
run: |
echo "test-type: ${{ matrix.test-type }}"
echo "client-type: ${{ matrix.client-type }}"
echo "provider: ${{ matrix.provider }}"
echo "python-version: ${{ matrix.python-version }}"
echo "client-version: ${{ matrix.client-version }}"
echo "SHOULD_RECORD: ${{ env.SHOULD_RECORD }}"
echo "rerecord-tests: ${{ needs.discover-tests.outputs.rerecord-tests }}"
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
@ -116,14 +161,6 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
client-version: ${{ matrix.client-version }} client-version: ${{ matrix.client-version }}
- name: Setup ollama
if: ${{ matrix.provider == 'ollama' && env.SHOULD_RECORD == 'true' }}
uses: ./.github/actions/setup-ollama
- name: Setup vllm
if: ${{ matrix.provider == 'vllm' }}
uses: ./.github/actions/setup-vllm
- name: Build Llama Stack - name: Build Llama Stack
run: | run: |
uv run llama stack build --template ci-tests --image-type venv uv run llama stack build --template ci-tests --image-type venv
@ -134,62 +171,13 @@ jobs:
free -h free -h
df -h df -h
- name: Run Integration Tests - name: Run Integration Tests (Replay Mode)
env: uses: ./.github/actions/run-integration-tests
LLAMA_STACK_CLIENT_TIMEOUT: "300" # Increased timeout for eval operations with:
# Use 'shell' to get pipefail behavior test-types: ${{ needs.discover-tests.outputs.test-types }}
# https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || 'server:ci-tests' }}
# TODO: write a precommit hook to detect if a test contains a pipe but does not use 'shell: bash' provider: ${{ matrix.provider }}
shell: bash inference-mode: 'replay'
run: |
if [ "${{ matrix.client-type }}" == "library" ]; then
stack_config="ci-tests"
else
stack_config="server:ci-tests"
fi
EXCLUDE_TESTS="builtin_tool or safety_with_image or code_interpreter or test_rag"
export LLAMA_STACK_TEST_RECORDING_DIR="tests/integration/recordings"
if [ "$SHOULD_RECORD" == "true" ]; then
export LLAMA_STACK_TEST_INFERENCE_MODE="record"
else
export LLAMA_STACK_TEST_INFERENCE_MODE="replay"
fi
if [ "${{ matrix.provider }}" == "ollama" ]; then
export OLLAMA_URL="http://0.0.0.0:11434"
export TEXT_MODEL=ollama/llama3.2:3b-instruct-fp16
export SAFETY_MODEL="ollama/llama-guard3:1b"
EXTRA_PARAMS="--safety-shield=llama-guard"
else
export VLLM_URL="http://localhost:8000/v1"
export TEXT_MODEL=vllm/meta-llama/Llama-3.2-1B-Instruct
# TODO: remove the not(test_inference_store_tool_calls) once we can get the tool called consistently
EXTRA_PARAMS=
EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls"
fi
uv run pytest -s -v tests/integration/${{ matrix.test-type }} --stack-config=${stack_config} \
-k "not( ${EXCLUDE_TESTS} )" \
--text-model=$TEXT_MODEL \
--embedding-model=sentence-transformers/all-MiniLM-L6-v2 \
--color=yes ${EXTRA_PARAMS} \
--capture=tee-sys | tee pytest-${{ matrix.test-type }}.log
- name: Update the PR if tests/integration/recordings/ has changed
if: ${{ env.SHOULD_RECORD == 'true' }}
run: |
if ! git diff --quiet tests/integration/recordings/; then
echo "Updating PR with updated recordings"
git config --local user.email "github-actions[bot]@users.noreply.github.com"
git config --local user.name "github-actions[bot]"
git add tests/integration/recordings/
git commit -m "Update recordings from integration tests"
git push origin HEAD:${{ github.head_ref }}
else
echo "No changes to recordings detected"
fi
- name: Check Storage and Memory Available After Tests - name: Check Storage and Memory Available After Tests
if: ${{ always() }} if: ${{ always() }}
@ -197,17 +185,11 @@ jobs:
free -h free -h
df -h df -h
- name: Write inference logs to file - name: Upload test logs on failure
if: ${{ env.SHOULD_RECORD == 'true' }} if: ${{ failure() }}
run: |
sudo docker logs ollama > ollama.log || true
sudo docker logs vllm > vllm.log || true
- name: Upload all logs to artifacts
if: ${{ env.SHOULD_RECORD == 'true' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.provider }}-${{ matrix.client-type }}-${{ matrix.test-type }}-${{ matrix.python-version }}-${{ matrix.client-version }} name: test-logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.provider }}-${{ matrix.client-type }}-${{ matrix.python-version }}-${{ matrix.client-version }}
path: | path: |
*.log *.log
retention-days: 1 retention-days: 1