mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-26 22:19:49 +00:00
# What does this PR do? Our CI is entirely undocumented, this commit adds a README.md file with a table of the current CI and what is does --------- Signed-off-by: Nathan Weinberg <nweinber@redhat.com>
163 lines
6.1 KiB
YAML
163 lines
6.1 KiB
YAML
name: Integration Tests
|
|
|
|
run-name: Run the integration test suite with Ollama
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
pull_request:
|
|
branches: [ main ]
|
|
paths:
|
|
- 'llama_stack/**'
|
|
- 'tests/**'
|
|
- 'uv.lock'
|
|
- 'pyproject.toml'
|
|
- 'requirements.txt'
|
|
- '.github/workflows/integration-tests.yml' # This workflow
|
|
- '.github/actions/setup-ollama/action.yml'
|
|
schedule:
|
|
# If changing the cron schedule, update the provider in the test-matrix job
|
|
- cron: '0 0 * * *' # (test latest client) Daily at 12 AM UTC
|
|
- cron: '1 0 * * 0' # (test vllm) Weekly on Sunday at 1 AM UTC
|
|
workflow_dispatch:
|
|
inputs:
|
|
test-all-client-versions:
|
|
description: 'Test against both the latest and published versions'
|
|
type: boolean
|
|
default: false
|
|
test-provider:
|
|
description: 'Test against a specific provider'
|
|
type: string
|
|
default: 'ollama'
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
discover-tests:
|
|
runs-on: ubuntu-latest
|
|
outputs:
|
|
test-type: ${{ steps.generate-matrix.outputs.test-type }}
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
|
|
- name: Generate test matrix
|
|
id: generate-matrix
|
|
run: |
|
|
# Get test directories dynamically, excluding non-test directories
|
|
TEST_TYPES=$(find tests/integration -maxdepth 1 -mindepth 1 -type d -printf "%f\n" |
|
|
grep -Ev "^(__pycache__|fixtures|test_cases)$" |
|
|
sort | jq -R -s -c 'split("\n")[:-1]')
|
|
echo "test-type=$TEST_TYPES" >> $GITHUB_OUTPUT
|
|
|
|
test-matrix:
|
|
needs: discover-tests
|
|
runs-on: ubuntu-latest
|
|
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
test-type: ${{ fromJson(needs.discover-tests.outputs.test-type) }}
|
|
client-type: [library, server]
|
|
# Use vllm on weekly schedule, otherwise use test-provider input (defaults to ollama)
|
|
provider: ${{ (github.event.schedule == '1 0 * * 0') && fromJSON('["vllm"]') || fromJSON(format('["{0}"]', github.event.inputs.test-provider || 'ollama')) }}
|
|
python-version: ["3.12", "3.13"]
|
|
client-version: ${{ (github.event.schedule == '0 0 * * 0' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
|
|
exclude: # TODO: look into why these tests are failing and fix them
|
|
- provider: vllm
|
|
test-type: safety
|
|
- provider: vllm
|
|
test-type: post_training
|
|
- provider: vllm
|
|
test-type: tool_runtime
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
|
|
- name: Install dependencies
|
|
uses: ./.github/actions/setup-runner
|
|
with:
|
|
python-version: ${{ matrix.python-version }}
|
|
client-version: ${{ matrix.client-version }}
|
|
|
|
- name: Setup ollama
|
|
if: ${{ matrix.provider == 'ollama' }}
|
|
uses: ./.github/actions/setup-ollama
|
|
|
|
- name: Setup vllm
|
|
if: ${{ matrix.provider == 'vllm' }}
|
|
uses: ./.github/actions/setup-vllm
|
|
|
|
- name: Build Llama Stack
|
|
run: |
|
|
uv run llama stack build --template ci-tests --image-type venv
|
|
|
|
- name: Check Storage and Memory Available Before Tests
|
|
if: ${{ always() }}
|
|
run: |
|
|
free -h
|
|
df -h
|
|
|
|
- name: Run Integration Tests
|
|
env:
|
|
LLAMA_STACK_CLIENT_TIMEOUT: "300" # Increased timeout for eval operations
|
|
# Use 'shell' to get pipefail behavior
|
|
# https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#exit-codes-and-error-action-preference
|
|
# TODO: write a precommit hook to detect if a test contains a pipe but does not use 'shell: bash'
|
|
shell: bash
|
|
run: |
|
|
if [ "${{ matrix.client-type }}" == "library" ]; then
|
|
stack_config="ci-tests"
|
|
else
|
|
stack_config="server:ci-tests"
|
|
fi
|
|
|
|
EXCLUDE_TESTS="builtin_tool or safety_with_image or code_interpreter or test_rag"
|
|
if [ "${{ matrix.provider }}" == "ollama" ]; then
|
|
export ENABLE_OLLAMA="ollama"
|
|
export OLLAMA_URL="http://0.0.0.0:11434"
|
|
export OLLAMA_INFERENCE_MODEL="llama3.2:3b-instruct-fp16"
|
|
export TEXT_MODEL=ollama/$OLLAMA_INFERENCE_MODEL
|
|
export SAFETY_MODEL="llama-guard3:1b"
|
|
EXTRA_PARAMS="--safety-shield=$SAFETY_MODEL"
|
|
else
|
|
export ENABLE_VLLM="vllm"
|
|
export VLLM_URL="http://localhost:8000/v1"
|
|
export VLLM_INFERENCE_MODEL="meta-llama/Llama-3.2-1B-Instruct"
|
|
export TEXT_MODEL=vllm/$VLLM_INFERENCE_MODEL
|
|
# TODO: remove the not(test_inference_store_tool_calls) once we can get the tool called consistently
|
|
EXTRA_PARAMS=
|
|
EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls"
|
|
fi
|
|
|
|
|
|
uv run pytest -s -v tests/integration/${{ matrix.test-type }} --stack-config=${stack_config} \
|
|
-k "not( ${EXCLUDE_TESTS} )" \
|
|
--text-model=$TEXT_MODEL \
|
|
--embedding-model=sentence-transformers/all-MiniLM-L6-v2 \
|
|
--color=yes ${EXTRA_PARAMS} \
|
|
--capture=tee-sys | tee pytest-${{ matrix.test-type }}.log
|
|
|
|
- name: Check Storage and Memory Available After Tests
|
|
if: ${{ always() }}
|
|
run: |
|
|
free -h
|
|
df -h
|
|
|
|
- name: Write inference logs to file
|
|
if: ${{ always() }}
|
|
run: |
|
|
sudo docker logs ollama > ollama.log || true
|
|
sudo docker logs vllm > vllm.log || true
|
|
|
|
- name: Upload all logs to artifacts
|
|
if: ${{ always() }}
|
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
|
with:
|
|
name: logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.provider }}-${{ matrix.client-type }}-${{ matrix.test-type }}-${{ matrix.python-version }}-${{ matrix.client-version }}
|
|
path: |
|
|
*.log
|
|
retention-days: 1
|