From 0b02af792dc1c3b5eac98f48fab0998f06f77f3e Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Wed, 30 Jul 2025 12:37:50 -0700 Subject: [PATCH] feat(ci): add support for running vision inference tests --- .../actions/run-integration-tests/action.yml | 20 ++ .../actions/setup-vision-ollama/action.yml | 11 ++ .../workflows/integration-vision-tests.yml | 183 ++++++++++++++++++ tests/VisionContainerfile | 13 ++ 4 files changed, 227 insertions(+) create mode 100644 .github/actions/setup-vision-ollama/action.yml create mode 100644 .github/workflows/integration-vision-tests.yml create mode 100644 tests/VisionContainerfile diff --git a/.github/actions/run-integration-tests/action.yml b/.github/actions/run-integration-tests/action.yml index e2db846e4..ecf71d804 100644 --- a/.github/actions/run-integration-tests/action.yml +++ b/.github/actions/run-integration-tests/action.yml @@ -14,6 +14,10 @@ inputs: inference-mode: description: 'Inference mode: "record" or "replay"' required: true + run-vision-tests: + description: 'Run vision tests: "true" or "false"' + required: false + default: 'false' outputs: logs-path: @@ -46,6 +50,22 @@ runs: EXCLUDE_TESTS="${EXCLUDE_TESTS} or test_inference_store_tool_calls" fi + if [ "${{ inputs.run-vision-tests }}" == "true" ]; then + if uv run pytest -s -v tests/integration/inference/test_vision_inference.py --stack-config=${stack_config} \ + -k "not( ${EXCLUDE_TESTS} )" \ + --vision-model=ollama/llama3.2-vision:11b \ + --embedding-model=sentence-transformers/all-MiniLM-L6-v2 \ + --color=yes ${EXTRA_PARAMS} \ + --capture=tee-sys | tee pytest-${{ inputs.inference-mode }}-vision.log; then + echo "✅ Tests completed for vision" + else + echo "❌ Tests failed for vision" + exit 1 + fi + + exit 0 + fi + TEST_TYPES='${{ inputs.test-types }}' echo "Test types to run: $TEST_TYPES" diff --git a/.github/actions/setup-vision-ollama/action.yml b/.github/actions/setup-vision-ollama/action.yml new file mode 100644 index 000000000..c9b702fea --- /dev/null +++ b/.github/actions/setup-vision-ollama/action.yml @@ -0,0 +1,11 @@ +name: Setup Ollama for Vision Tests +description: Start Ollama with vision model +runs: + using: "composite" + steps: + - name: Start Ollama + shell: bash + run: | + docker run -d --name ollama -p 11434:11434 docker.io/llamastack/ollama-with-vision-model + echo "Verifying Ollama status..." + timeout 30 bash -c 'while ! curl -s -L http://127.0.0.1:11434; do sleep 1 && echo "."; done' diff --git a/.github/workflows/integration-vision-tests.yml b/.github/workflows/integration-vision-tests.yml new file mode 100644 index 000000000..dcc4d55ff --- /dev/null +++ b/.github/workflows/integration-vision-tests.yml @@ -0,0 +1,183 @@ +name: Vision Inference Integration Tests + +run-name: Run vision inference integration test suite from tests/integration/inference + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + types: [opened, synchronize, labeled] + paths: + - 'llama_stack/**' + - 'tests/**' + - 'uv.lock' + - 'pyproject.toml' + - 'requirements.txt' + - '.github/workflows/integration-vision-tests.yml' # This workflow + - '.github/actions/setup-vision-ollama/action.yml' + - '.github/actions/run-integration-tests/action.yml' + workflow_dispatch: + inputs: + test-all-client-versions: + description: 'Test against both the latest and published versions' + type: boolean + default: false + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ contains(github.event.pull_request.labels.*.name, 're-record-tests') && 'rerecord' || 'replay' }} + cancel-in-progress: true + +jobs: + discover-tests: + if: | + github.event.action == 'opened' || + github.event.action == 'synchronize' || + (github.event.action == 'labeled' && contains(github.event.pull_request.labels.*.name, 're-record-tests')) + runs-on: ubuntu-latest + outputs: + test-types: ${{ steps.generate-test-types.outputs.test-types }} + rerecord-tests: ${{ steps.check-rerecord-tests.outputs.rerecord-tests }} + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Generate test types + id: generate-test-types + run: | + echo "test-types='[\"vision\"]'" >> $GITHUB_OUTPUT + + - name: Check if re-record-tests label exists + id: check-rerecord-tests + run: | + if [[ "${{ contains(github.event.pull_request.labels.*.name, 're-record-tests') }}" == "true" ]]; then + echo "rerecord-tests=true" >> $GITHUB_OUTPUT + else + echo "rerecord-tests=false" >> $GITHUB_OUTPUT + fi + + record-tests: + # Sequential job for recording to avoid SQLite conflicts + if: ${{ needs.discover-tests.outputs.rerecord-tests == 'true' }} + needs: discover-tests + runs-on: ubuntu-latest + + permissions: + contents: write + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + with: + python-version: "3.12" # Use single Python version for recording + client-version: "latest" + + - name: Setup ollama for vision tests + uses: ./.github/actions/setup-vision-ollama + + - name: Build Llama Stack + run: | + uv run llama stack build --template ci-tests --image-type venv + + - name: Configure git for commits + run: | + git config --local user.email "github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + + - name: Run Integration Tests for All Types (Recording Mode) + uses: ./.github/actions/run-integration-tests + with: + test-types: ${{ needs.discover-tests.outputs.test-types }} + stack-config: 'server:ci-tests' # re-recording must be done in server mode + provider: 'ollama' + inference-mode: 'record' + run-vision-tests: 'true' + + - name: Commit and push recordings + run: | + if ! git diff --quiet tests/integration/recordings/; then + echo "Committing recordings" + git add tests/integration/recordings/ + git commit -m "Update recordings" + echo "Pushing all recording commits to PR" + git push origin HEAD:${{ github.head_ref }} + else + echo "No recording changes" + fi + + - name: Write inference logs to file + if: ${{ always() }} + run: | + sudo docker logs ollama > ollama-recording.log || true + + - name: Upload recording logs + if: ${{ always() }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: recording-logs-${{ github.run_id }} + path: | + *.log + retention-days: 1 + + run-tests: + # Skip this job if we're in recording mode (handled by record-tests job) + if: ${{ needs.discover-tests.outputs.rerecord-tests != 'true' }} + needs: discover-tests + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + client-type: [library] + provider: [ollama] + python-version: ["3.12"] + client-version: ["latest"] + + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + with: + python-version: ${{ matrix.python-version }} + client-version: ${{ matrix.client-version }} + + - name: Build Llama Stack + run: | + uv run llama stack build --template ci-tests --image-type venv + + - name: Check Storage and Memory Available Before Tests + if: ${{ always() }} + run: | + free -h + df -h + + - name: Run Integration Tests (Replay Mode) + uses: ./.github/actions/run-integration-tests + with: + test-types: ${{ needs.discover-tests.outputs.test-types }} + stack-config: ${{ matrix.client-type == 'library' && 'ci-tests' || 'server:ci-tests' }} + provider: ${{ matrix.provider }} + inference-mode: 'replay' + run-vision-tests: 'true' + + - name: Check Storage and Memory Available After Tests + if: ${{ always() }} + run: | + free -h + df -h + + - name: Upload test logs on failure + if: ${{ failure() }} + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: test-logs-${{ github.run_id }}-${{ github.run_attempt }}-${{ matrix.provider }}-${{ matrix.client-type }}-${{ matrix.python-version }}-${{ matrix.client-version }} + path: | + *.log + retention-days: 1 diff --git a/tests/VisionContainerfile b/tests/VisionContainerfile new file mode 100644 index 000000000..e69d0f3a0 --- /dev/null +++ b/tests/VisionContainerfile @@ -0,0 +1,13 @@ +# Containerfile used to build our all in one ollama image to run tests in CI +# podman build --platform linux/amd64 -f VisionContainerfile -t ollama-with-vision-model . +# +FROM --platform=linux/amd64 ollama/ollama:latest + +# Start ollama and pull models in a single layer +RUN ollama serve & \ + sleep 5 && \ + ollama pull llama3.2-vision:11b && \ + ollama pull all-minilm:l6-v2 + +# Set the entrypoint to start ollama serve +ENTRYPOINT ["ollama", "serve"]