From ffe9b3b278fa726dece81a1ea150db7ed1705754 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Tue, 18 Mar 2025 16:54:42 +0100 Subject: [PATCH] ci(ollama): run more integration tests (#1636) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What does this PR do? Run additional tests in a matrix to accelerate the process and clearly identify failing providers. Signed-off-by: Sébastien Han --- .github/workflows/integration-tests.yml | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index ec782c331..86adf8a15 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -15,8 +15,14 @@ on: - '.github/workflows/integration-tests.yml' # This workflow jobs: - ollama: + test-matrix: runs-on: ubuntu-latest + strategy: + matrix: + # Listing tests manually since some of them currently fail + # TODO: generate matrix list from tests/integration when fixed + test-type: [inference, datasets, inspect, scoring, post_training, providers] + fail-fast: false # we want to run all tests regardless of failure steps: - name: Checkout repository @@ -43,6 +49,8 @@ jobs: run: | uv sync --extra dev --extra test uv pip install ollama faiss-cpu + # always test against the latest version of the client + uv pip install git+https://github.com/meta-llama/llama-stack-client-python.git@main uv pip install -e . - name: Wait for Ollama to start @@ -72,17 +80,17 @@ jobs: echo "Waiting for Llama Stack server..." for i in {1..30}; do if curl -s http://localhost:8321/v1/health | grep -q "OK"; then - echo " Llama Stack server is up!" + echo "Llama Stack server is up!" exit 0 fi sleep 1 done - echo " Llama Stack server failed to start" + echo "Llama Stack server failed to start" cat server.log exit 1 - - name: Run Inference Integration Tests + - name: Run Integration Tests env: INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct" run: | - uv run pytest -v tests/integration/inference --stack-config=ollama --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2 + uv run pytest -v tests/integration/${{ matrix.test-type }} --stack-config=ollama --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2