mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-24 18:30:00 +00:00
Merge branch 'main' into openai-vector-store/qdrant
This commit is contained in:
commit
970d0f307f
338 changed files with 15301 additions and 15997 deletions
198
.github/actions/run-and-record-tests/action.yml
vendored
Normal file
198
.github/actions/run-and-record-tests/action.yml
vendored
Normal file
|
|
@ -0,0 +1,198 @@
|
|||
name: 'Run and Record Tests'
|
||||
description: 'Run integration tests and handle recording/artifact upload'
|
||||
|
||||
inputs:
|
||||
test-types:
|
||||
description: 'JSON array of test types to run'
|
||||
required: true
|
||||
stack-config:
|
||||
description: 'Stack configuration to use'
|
||||
required: true
|
||||
provider:
|
||||
description: 'Provider to use for tests'
|
||||
required: true
|
||||
inference-mode:
|
||||
description: 'Inference mode (record or replay)'
|
||||
required: true
|
||||
run-vision-tests:
|
||||
description: 'Whether to run vision tests'
|
||||
required: false
|
||||
default: 'false'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Check Storage and Memory Available Before Tests
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
free -h
|
||||
df -h
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "LLAMA_STACK_CLIENT_TIMEOUT=300" >> $GITHUB_ENV
|
||||
echo "LLAMA_STACK_TEST_INFERENCE_MODE=${{ inputs.inference-mode }}" >> $GITHUB_ENV
|
||||
|
||||
# Configure provider-specific settings
|
||||
if [ "${{ inputs.provider }}" == "ollama" ]; then
|
||||
echo "OLLAMA_URL=http://0.0.0.0:11434" >> $GITHUB_ENV
|
||||
echo "TEXT_MODEL=ollama/llama3.2:3b-instruct-fp16" >> $GITHUB_ENV
|
||||
echo "SAFETY_MODEL=ollama/llama-guard3:1b" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VLLM_URL=http://localhost:8000/v1" >> $GITHUB_ENV
|
||||
echo "TEXT_MODEL=vllm/meta-llama/Llama-3.2-1B-Instruct" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
if [ "${{ inputs.run-vision-tests }}" == "true" ]; then
|
||||
echo "LLAMA_STACK_TEST_RECORDING_DIR=tests/integration/recordings/vision" >> $GITHUB_ENV
|
||||
else
|
||||
echo "LLAMA_STACK_TEST_RECORDING_DIR=tests/integration/recordings" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Run Llama Stack Server
|
||||
if: ${{ contains(inputs.stack-config, 'server:') }}
|
||||
shell: bash
|
||||
run: |
|
||||
# Run this so pytest in a loop doesn't start-stop servers in a loop
|
||||
echo "Starting Llama Stack Server"
|
||||
nohup uv run llama stack run ci-tests --image-type venv > server.log 2>&1 &
|
||||
|
||||
echo "Waiting for Llama Stack Server to start"
|
||||
for i in {1..30}; do
|
||||
if curl -s http://localhost:8321/v1/health | grep -q "OK"; then
|
||||
echo "Llama Stack Server started"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "Llama Stack Server failed to start"
|
||||
cat server.log
|
||||
exit 1
|
||||
|
||||
- name: Run Integration Tests
|
||||
shell: bash
|
||||
run: |
|
||||
stack_config="${{ inputs.stack-config }}"
|
||||
EXCLUDE_TESTS="builtin_tool or safety_with_image or code_interpreter or test_rag"
|
||||
|
||||
# Configure provider-specific settings
|
||||
if [ "${{ inputs.provider }}" == "ollama" ]; then
|
||||
EXTRA_PARAMS="--safety-shield=llama-guard"
|
||||
else
|
||||
EXTRA_PARAMS=""
|
||||
EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls"
|
||||
fi
|
||||
|
||||
if [ "${{ inputs.run-vision-tests }}" == "true" ]; then
|
||||
if uv run pytest -s -v tests/integration/inference/test_vision_inference.py --stack-config=${stack_config} \
|
||||
-k "not( ${EXCLUDE_TESTS} )" \
|
||||
--vision-model=ollama/llama3.2-vision:11b \
|
||||
--embedding-model=sentence-transformers/all-MiniLM-L6-v2 \
|
||||
--color=yes ${EXTRA_PARAMS} \
|
||||
--capture=tee-sys | tee pytest-${{ inputs.inference-mode }}-vision.log; then
|
||||
echo "✅ Tests completed for vision"
|
||||
else
|
||||
echo "❌ Tests failed for vision"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Run non-vision tests
|
||||
TEST_TYPES='${{ inputs.test-types }}'
|
||||
echo "Test types to run: $TEST_TYPES"
|
||||
|
||||
# Collect all test files for the specified test types
|
||||
TEST_FILES=""
|
||||
for test_type in $(echo "$TEST_TYPES" | jq -r '.[]'); do
|
||||
# if provider is vllm, exclude the following tests: (safety, post_training, tool_runtime)
|
||||
if [ "${{ inputs.provider }}" == "vllm" ]; then
|
||||
if [ "$test_type" == "safety" ] || [ "$test_type" == "post_training" ] || [ "$test_type" == "tool_runtime" ]; then
|
||||
echo "Skipping $test_type for vllm provider"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "tests/integration/$test_type" ]; then
|
||||
# Find all Python test files in this directory
|
||||
test_files=$(find tests/integration/$test_type -name "test_*.py" -o -name "*_test.py")
|
||||
if [ -n "$test_files" ]; then
|
||||
TEST_FILES="$TEST_FILES $test_files"
|
||||
echo "Added test files from $test_type: $(echo $test_files | wc -w) files"
|
||||
fi
|
||||
else
|
||||
echo "Warning: Directory tests/integration/$test_type does not exist"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$TEST_FILES" ]; then
|
||||
echo "No test files found for the specified test types"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "=== Running all collected tests in a single pytest command ==="
|
||||
echo "Total test files: $(echo $TEST_FILES | wc -w)"
|
||||
|
||||
if uv run pytest -s -v $TEST_FILES --stack-config=${stack_config} \
|
||||
-k "not( ${EXCLUDE_TESTS} )" \
|
||||
--text-model=$TEXT_MODEL \
|
||||
--embedding-model=sentence-transformers/all-MiniLM-L6-v2 \
|
||||
--color=yes ${EXTRA_PARAMS} \
|
||||
--capture=tee-sys | tee pytest-${{ inputs.inference-mode }}-all.log; then
|
||||
echo "✅ All tests completed successfully"
|
||||
else
|
||||
echo "❌ Tests failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check Storage and Memory Available After Tests
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
free -h
|
||||
df -h
|
||||
|
||||
- name: Commit and push recordings
|
||||
if: ${{ inputs.inference-mode == 'record' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Checking for recording changes"
|
||||
git status --porcelain tests/integration/recordings/
|
||||
|
||||
if [[ -n $(git status --porcelain tests/integration/recordings/) ]]; then
|
||||
echo "New recordings detected, committing and pushing"
|
||||
git add tests/integration/recordings/
|
||||
|
||||
if [ "${{ inputs.run-vision-tests }}" == "true" ]; then
|
||||
git commit -m "Recordings update from CI (vision)"
|
||||
else
|
||||
git commit -m "Recordings update from CI"
|
||||
fi
|
||||
|
||||
git fetch origin ${{ github.event.pull_request.head.ref }}
|
||||
git rebase origin/${{ github.event.pull_request.head.ref }}
|
||||
echo "Rebased successfully"
|
||||
git push origin HEAD:${{ github.event.pull_request.head.ref }}
|
||||
echo "Pushed successfully"
|
||||
else
|
||||
echo "No recording changes"
|
||||
fi
|
||||
|
||||
- name: Write inference logs to file
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo docker logs ollama > ollama-${{ inputs.inference-mode }}.log || true
|
||||
|
||||
- name: Upload logs
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: logs-${{ github.run_id }}-${{ github.run_attempt || '' }}-${{ strategy.job-index }}
|
||||
path: |
|
||||
*.log
|
||||
retention-days: 1
|
||||
73
.github/actions/run-integration-tests/action.yml
vendored
73
.github/actions/run-integration-tests/action.yml
vendored
|
|
@ -1,73 +0,0 @@
|
|||
name: 'Run Integration Tests'
|
||||
description: 'Run integration tests with configurable execution mode and provider settings'
|
||||
|
||||
inputs:
|
||||
test-types:
|
||||
description: 'Test types to run (JSON array)'
|
||||
required: true
|
||||
stack-config:
|
||||
description: 'Stack configuration: "ci-tests" or "server:ci-tests"'
|
||||
required: true
|
||||
provider:
|
||||
description: 'Provider to use: "ollama" or "vllm"'
|
||||
required: true
|
||||
inference-mode:
|
||||
description: 'Inference mode: "record" or "replay"'
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
logs-path:
|
||||
description: 'Path to generated log files'
|
||||
value: '*.log'
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Run Integration Tests
|
||||
env:
|
||||
LLAMA_STACK_CLIENT_TIMEOUT: "300"
|
||||
LLAMA_STACK_TEST_RECORDING_DIR: "tests/integration/recordings"
|
||||
LLAMA_STACK_TEST_INFERENCE_MODE: ${{ inputs.inference-mode }}
|
||||
shell: bash
|
||||
run: |
|
||||
stack_config="${{ inputs.stack-config }}"
|
||||
EXCLUDE_TESTS="builtin_tool or safety_with_image or code_interpreter or test_rag"
|
||||
|
||||
# Configure provider-specific settings
|
||||
if [ "${{ inputs.provider }}" == "ollama" ]; then
|
||||
export OLLAMA_URL="http://0.0.0.0:11434"
|
||||
export TEXT_MODEL="ollama/llama3.2:3b-instruct-fp16"
|
||||
export SAFETY_MODEL="ollama/llama-guard3:1b"
|
||||
EXTRA_PARAMS="--safety-shield=llama-guard"
|
||||
else
|
||||
export VLLM_URL="http://localhost:8000/v1"
|
||||
export TEXT_MODEL="vllm/meta-llama/Llama-3.2-1B-Instruct"
|
||||
EXTRA_PARAMS=""
|
||||
EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls"
|
||||
fi
|
||||
|
||||
TEST_TYPES='${{ inputs.test-types }}'
|
||||
echo "Test types to run: $TEST_TYPES"
|
||||
|
||||
for test_type in $(echo "$TEST_TYPES" | jq -r '.[]'); do
|
||||
# if provider is vllm, exclude the following tests: (safety, post_training, tool_runtime)
|
||||
if [ "${{ inputs.provider }}" == "vllm" ]; then
|
||||
if [ "$test_type" == "safety" ] || [ "$test_type" == "post_training" ] || [ "$test_type" == "tool_runtime" ]; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "=== Running tests for: $test_type ==="
|
||||
|
||||
if uv run pytest -s -v tests/integration/$test_type --stack-config=${stack_config} \
|
||||
-k "not( ${EXCLUDE_TESTS} )" \
|
||||
--text-model=$TEXT_MODEL \
|
||||
--embedding-model=sentence-transformers/all-MiniLM-L6-v2 \
|
||||
--color=yes ${EXTRA_PARAMS} \
|
||||
--capture=tee-sys | tee pytest-${{ inputs.inference-mode }}-$test_type.log; then
|
||||
echo "✅ Tests completed for $test_type"
|
||||
else
|
||||
echo "❌ Tests failed for $test_type"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
14
.github/actions/setup-ollama/action.yml
vendored
14
.github/actions/setup-ollama/action.yml
vendored
|
|
@ -1,11 +1,23 @@
|
|||
name: Setup Ollama
|
||||
description: Start Ollama
|
||||
inputs:
|
||||
run-vision-tests:
|
||||
description: 'Run vision tests: "true" or "false"'
|
||||
required: false
|
||||
default: 'false'
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Start Ollama
|
||||
shell: bash
|
||||
run: |
|
||||
docker run -d --name ollama -p 11434:11434 docker.io/leseb/ollama-with-models
|
||||
if [ "${{ inputs.run-vision-tests }}" == "true" ]; then
|
||||
image="ollama-with-vision-model"
|
||||
else
|
||||
image="ollama-with-models"
|
||||
fi
|
||||
|
||||
echo "Starting Ollama with image: $image"
|
||||
docker run -d --name ollama -p 11434:11434 docker.io/llamastack/$image
|
||||
echo "Verifying Ollama status..."
|
||||
timeout 30 bash -c 'while ! curl -s -L http://127.0.0.1:11434; do sleep 1 && echo "."; done'
|
||||
|
|
|
|||
51
.github/actions/setup-test-environment/action.yml
vendored
Normal file
51
.github/actions/setup-test-environment/action.yml
vendored
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
name: 'Setup Test Environment'
|
||||
description: 'Common setup steps for integration tests including dependencies, providers, and build'
|
||||
|
||||
inputs:
|
||||
python-version:
|
||||
description: 'Python version to use'
|
||||
required: true
|
||||
client-version:
|
||||
description: 'Client version (latest or published)'
|
||||
required: true
|
||||
provider:
|
||||
description: 'Provider to setup (ollama or vllm)'
|
||||
required: true
|
||||
default: 'ollama'
|
||||
run-vision-tests:
|
||||
description: 'Whether to setup provider for vision tests'
|
||||
required: false
|
||||
default: 'false'
|
||||
inference-mode:
|
||||
description: 'Inference mode (record or replay)'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
with:
|
||||
python-version: ${{ inputs.python-version }}
|
||||
client-version: ${{ inputs.client-version }}
|
||||
|
||||
- name: Setup ollama
|
||||
if: ${{ inputs.provider == 'ollama' && inputs.inference-mode == 'record' }}
|
||||
uses: ./.github/actions/setup-ollama
|
||||
with:
|
||||
run-vision-tests: ${{ inputs.run-vision-tests }}
|
||||
|
||||
- name: Setup vllm
|
||||
if: ${{ inputs.provider == 'vllm' && inputs.inference-mode == 'record' }}
|
||||
uses: ./.github/actions/setup-vllm
|
||||
|
||||
- name: Build Llama Stack
|
||||
shell: bash
|
||||
run: |
|
||||
uv run llama stack build --template ci-tests --image-type venv
|
||||
|
||||
- name: Configure git for commits
|
||||
shell: bash
|
||||
run: |
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
4
.github/workflows/README.md
vendored
4
.github/workflows/README.md
vendored
|
|
@ -1,6 +1,6 @@
|
|||
# Llama Stack CI
|
||||
|
||||
Llama Stack uses GitHub Actions for Continous Integration (CI). Below is a table detailing what CI the project includes and the purpose.
|
||||
Llama Stack uses GitHub Actions for Continuous Integration (CI). Below is a table detailing what CI the project includes and the purpose.
|
||||
|
||||
| Name | File | Purpose |
|
||||
| ---- | ---- | ------- |
|
||||
|
|
@ -8,7 +8,7 @@ Llama Stack uses GitHub Actions for Continous Integration (CI). Below is a table
|
|||
| Installer CI | [install-script-ci.yml](install-script-ci.yml) | Test the installation script |
|
||||
| Integration Auth Tests | [integration-auth-tests.yml](integration-auth-tests.yml) | Run the integration test suite with Kubernetes authentication |
|
||||
| SqlStore Integration Tests | [integration-sql-store-tests.yml](integration-sql-store-tests.yml) | Run the integration test suite with SqlStore |
|
||||
| Integration Tests | [integration-tests.yml](integration-tests.yml) | Run the integration test suite from tests/integration |
|
||||
| Integration Tests (Replay) | [integration-tests.yml](integration-tests.yml) | Run the integration test suite from tests/integration in replay mode |
|
||||
| Vector IO Integration Tests | [integration-vector-io-tests.yml](integration-vector-io-tests.yml) | Run the integration test suite with various VectorIO providers |
|
||||
| Pre-commit | [pre-commit.yml](pre-commit.yml) | Run pre-commit checks |
|
||||
| Test Llama Stack Build | [providers-build.yml](providers-build.yml) | Test llama stack build |
|
||||
|
|
|
|||
160
.github/workflows/integration-tests.yml
vendored
160
.github/workflows/integration-tests.yml
vendored
|
|
@ -1,22 +1,22 @@
|
|||
name: Integration Tests
|
||||
name: Integration Tests (Replay)
|
||||
|
||||
run-name: Run the integration test suite from tests/integration
|
||||
run-name: Run the integration test suite from tests/integration in replay mode
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
types: [opened, synchronize, labeled]
|
||||
types: [opened, synchronize, reopened]
|
||||
paths:
|
||||
- 'llama_stack/**'
|
||||
- 'tests/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
- 'requirements.txt'
|
||||
- '.github/workflows/integration-tests.yml' # This workflow
|
||||
- '.github/actions/setup-ollama/action.yml'
|
||||
- '.github/actions/run-integration-tests/action.yml'
|
||||
- '.github/actions/setup-test-environment/action.yml'
|
||||
- '.github/actions/run-and-record-tests/action.yml'
|
||||
schedule:
|
||||
# If changing the cron schedule, update the provider in the test-matrix job
|
||||
- cron: '0 0 * * *' # (test latest client) Daily at 12 AM UTC
|
||||
|
|
@ -33,31 +33,15 @@ on:
|
|||
default: 'ollama'
|
||||
|
||||
concurrency:
|
||||
# This creates three concurrency groups:
|
||||
# ${{ github.workflow }}-${{ github.ref }}-rerecord (for valid triggers with re-record-tests label)
|
||||
# ${{ github.workflow }}-${{ github.ref }}-replay (for valid triggers without re-record-tests label)
|
||||
# ${{ github.workflow }}-${{ github.ref }}-no-run (for invalid triggers that will be skipped)
|
||||
# The "no-run" group ensures that irrelevant label events don't interfere with the real workflows.
|
||||
group: >-
|
||||
${{ github.workflow }}-${{ github.ref }}-${{
|
||||
(github.event.action == 'opened' ||
|
||||
github.event.action == 'synchronize' ||
|
||||
(github.event.action == 'labeled' && contains(github.event.pull_request.labels.*.name, 're-record-tests'))) &&
|
||||
(contains(github.event.pull_request.labels.*.name, 're-record-tests') && 'rerecord' || 'replay') ||
|
||||
'no-run'
|
||||
}}
|
||||
# Skip concurrency for pushes to main - each commit should be tested independently
|
||||
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
discover-tests:
|
||||
if: |
|
||||
github.event.action == 'opened' ||
|
||||
github.event.action == 'synchronize' ||
|
||||
(github.event.action == 'labeled' && contains(github.event.pull_request.labels.*.name, 're-record-tests'))
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
test-types: ${{ steps.generate-test-types.outputs.test-types }}
|
||||
rerecord-tests: ${{ steps.check-rerecord-tests.outputs.rerecord-tests }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
|
@ -67,94 +51,13 @@ jobs:
|
|||
id: generate-test-types
|
||||
run: |
|
||||
# Get test directories dynamically, excluding non-test directories
|
||||
# NOTE: we are excluding post_training since the tests take too long
|
||||
TEST_TYPES=$(find tests/integration -maxdepth 1 -mindepth 1 -type d -printf "%f\n" |
|
||||
grep -Ev "^(__pycache__|fixtures|test_cases|recordings)$" |
|
||||
grep -Ev "^(__pycache__|fixtures|test_cases|recordings|post_training)$" |
|
||||
sort | jq -R -s -c 'split("\n")[:-1]')
|
||||
echo "test-types=$TEST_TYPES" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check if re-record-tests label exists
|
||||
id: check-rerecord-tests
|
||||
run: |
|
||||
if [[ "${{ contains(github.event.pull_request.labels.*.name, 're-record-tests') }}" == "true" ]]; then
|
||||
echo "rerecord-tests=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "rerecord-tests=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
record-tests:
|
||||
# Sequential job for recording to avoid SQLite conflicts
|
||||
if: ${{ needs.discover-tests.outputs.rerecord-tests == 'true' }}
|
||||
needs: discover-tests
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
with:
|
||||
python-version: "3.12" # Use single Python version for recording
|
||||
client-version: "latest"
|
||||
|
||||
- name: Setup ollama
|
||||
if: ${{ inputs.test-provider == 'ollama' }}
|
||||
uses: ./.github/actions/setup-ollama
|
||||
|
||||
- name: Setup vllm
|
||||
if: ${{ inputs.test-provider == 'vllm' }}
|
||||
uses: ./.github/actions/setup-vllm
|
||||
|
||||
- name: Build Llama Stack
|
||||
run: |
|
||||
uv run llama stack build --template ci-tests --image-type venv
|
||||
|
||||
- name: Configure git for commits
|
||||
run: |
|
||||
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git config --local user.name "github-actions[bot]"
|
||||
|
||||
- name: Run Integration Tests for All Types (Recording Mode)
|
||||
uses: ./.github/actions/run-integration-tests
|
||||
with:
|
||||
test-types: ${{ needs.discover-tests.outputs.test-types }}
|
||||
stack-config: 'server:ci-tests' # recording must be done with server since more tests are run
|
||||
provider: ${{ inputs.test-provider }}
|
||||
inference-mode: 'record'
|
||||
|
||||
- name: Commit and push recordings
|
||||
run: |
|
||||
if ! git diff --quiet tests/integration/recordings/; then
|
||||
echo "Committing recordings"
|
||||
git add tests/integration/recordings/
|
||||
git commit -m "Update recordings"
|
||||
echo "Pushing all recording commits to PR"
|
||||
git push origin HEAD:${{ github.head_ref }}
|
||||
else
|
||||
echo "No recording changes"
|
||||
fi
|
||||
|
||||
- name: Write inference logs to file
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
sudo docker logs ollama > ollama-recording.log || true
|
||||
|
||||
- name: Upload recording logs
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: recording-logs-${{ github.run_id }}
|
||||
path: |
|
||||
*.log
|
||||
retention-days: 1
|
||||
|
||||
run-tests:
|
||||
# Skip this job if we're in recording mode (handled by record-tests job)
|
||||
if: ${{ needs.discover-tests.outputs.rerecord-tests != 'true' }}
|
||||
run-replay-mode-tests:
|
||||
needs: discover-tests
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
|
|
@ -164,48 +67,29 @@ jobs:
|
|||
client-type: [library, server]
|
||||
# Use vllm on weekly schedule, otherwise use test-provider input (defaults to ollama)
|
||||
provider: ${{ (github.event.schedule == '1 0 * * 0') && fromJSON('["vllm"]') || fromJSON(format('["{0}"]', github.event.inputs.test-provider || 'ollama')) }}
|
||||
python-version: ["3.12", "3.13"]
|
||||
client-version: ${{ (github.event.schedule == '0 0 * * 0' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
|
||||
# Use Python 3.13 only on nightly schedule (daily latest client test), otherwise use 3.12
|
||||
python-version: ${{ github.event.schedule == '0 0 * * *' && fromJSON('["3.12", "3.13"]') || fromJSON('["3.12"]') }}
|
||||
client-version: ${{ (github.event.schedule == '0 0 * * *' || github.event.inputs.test-all-client-versions == 'true') && fromJSON('["published", "latest"]') || fromJSON('["latest"]') }}
|
||||
run-vision-tests: ['true', 'false']
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
- name: Setup test environment
|
||||
uses: ./.github/actions/setup-test-environment
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
client-version: ${{ matrix.client-version }}
|
||||
provider: ${{ matrix.provider }}
|
||||
run-vision-tests: ${{ matrix.run-vision-tests }}
|
||||
inference-mode: 'replay'
|
||||
|
||||
- name: Build Llama Stack
|
||||
run: |
|
||||
uv run llama stack build --template ci-tests --image-type venv
|
||||
|
||||
- name: Check Storage and Memory Available Before Tests
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
free -h
|
||||
df -h
|
||||
|
||||
- name: Run Integration Tests (Replay Mode)
|
||||
uses: ./.github/actions/run-integration-tests
|
||||
- name: Run tests
|
||||
uses: ./.github/actions/run-and-record-tests
|
||||
with:
|
||||
test-types: ${{ needs.discover-tests.outputs.test-types }}
|
||||
stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || 'server:ci-tests' }}
|
||||
provider: ${{ matrix.provider }}
|
||||
inference-mode: 'replay'
|
||||
|
||||
- name: Check Storage and Memory Available After Tests
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
free -h
|
||||
df -h
|
||||
|
||||
- name: Upload test logs on failure
|
||||
if: ${{ failure() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: test-logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.provider }}-${{ matrix.client-type }}-${{ matrix.python-version }}-${{ matrix.client-version }}
|
||||
path: |
|
||||
*.log
|
||||
retention-days: 1
|
||||
run-vision-tests: ${{ matrix.run-vision-tests }}
|
||||
|
|
|
|||
12
.github/workflows/providers-build.yml
vendored
12
.github/workflows/providers-build.yml
vendored
|
|
@ -9,8 +9,8 @@ on:
|
|||
paths:
|
||||
- 'llama_stack/cli/stack/build.py'
|
||||
- 'llama_stack/cli/stack/_build.py'
|
||||
- 'llama_stack/distribution/build.*'
|
||||
- 'llama_stack/distribution/*.sh'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-build.yml'
|
||||
- 'llama_stack/templates/**'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -19,8 +19,8 @@ on:
|
|||
paths:
|
||||
- 'llama_stack/cli/stack/build.py'
|
||||
- 'llama_stack/cli/stack/_build.py'
|
||||
- 'llama_stack/distribution/build.*'
|
||||
- 'llama_stack/distribution/*.sh'
|
||||
- 'llama_stack/core/build.*'
|
||||
- 'llama_stack/core/*.sh'
|
||||
- '.github/workflows/providers-build.yml'
|
||||
- 'llama_stack/templates/**'
|
||||
- 'pyproject.toml'
|
||||
|
|
@ -108,7 +108,7 @@ jobs:
|
|||
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
|
||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||
echo "Entrypoint: $entrypoint"
|
||||
if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then
|
||||
if [ "$entrypoint" != "[python -m llama_stack.core.server.server --config /app/run.yaml]" ]; then
|
||||
echo "Entrypoint is not correct"
|
||||
exit 1
|
||||
fi
|
||||
|
|
@ -142,7 +142,7 @@ jobs:
|
|||
IMAGE_ID=$(docker images --format "{{.Repository}}:{{.Tag}}" | head -n 1)
|
||||
entrypoint=$(docker inspect --format '{{ .Config.Entrypoint }}' $IMAGE_ID)
|
||||
echo "Entrypoint: $entrypoint"
|
||||
if [ "$entrypoint" != "[python -m llama_stack.distribution.server.server --config /app/run.yaml]" ]; then
|
||||
if [ "$entrypoint" != "[python -m llama_stack.core.server.server --config /app/run.yaml]" ]; then
|
||||
echo "Entrypoint is not correct"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -12,12 +12,13 @@ on:
|
|||
- 'tests/integration/**'
|
||||
- 'uv.lock'
|
||||
- 'pyproject.toml'
|
||||
- 'requirements.txt'
|
||||
- 'tests/external/*'
|
||||
- '.github/workflows/test-external-provider-module.yml' # This workflow
|
||||
|
||||
jobs:
|
||||
test-external-providers-from-module:
|
||||
# This workflow is disabled. See https://github.com/meta-llama/llama-stack/pull/2975#issuecomment-3138702984 for details
|
||||
if: false
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
|
|
@ -47,7 +48,7 @@ jobs:
|
|||
|
||||
- name: Build distro from config file
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/ramalama-stack/build.yaml
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/ramalama-stack/build.yaml
|
||||
|
||||
- name: Start Llama Stack server in background
|
||||
if: ${{ matrix.image-type }} == 'venv'
|
||||
|
|
|
|||
4
.github/workflows/test-external.yml
vendored
4
.github/workflows/test-external.yml
vendored
|
|
@ -43,11 +43,11 @@ jobs:
|
|||
|
||||
- name: Print distro dependencies
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/build.yaml --print-deps-only
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/build.yaml --print-deps-only
|
||||
|
||||
- name: Build distro from config file
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/build.yaml
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/build.yaml
|
||||
|
||||
- name: Start Llama Stack server in background
|
||||
if: ${{ matrix.image-type }} == 'venv'
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue