forked from phoenix-oss/llama-stack-mirror
ci(ollama): run more integration tests (#1636)
# What does this PR do? Run additional tests in a matrix to accelerate the process and clearly identify failing providers. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
168cbcbb92
commit
ffe9b3b278
1 changed files with 13 additions and 5 deletions
18
.github/workflows/integration-tests.yml
vendored
18
.github/workflows/integration-tests.yml
vendored
|
@ -15,8 +15,14 @@ on:
|
|||
- '.github/workflows/integration-tests.yml' # This workflow
|
||||
|
||||
jobs:
|
||||
ollama:
|
||||
test-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
# Listing tests manually since some of them currently fail
|
||||
# TODO: generate matrix list from tests/integration when fixed
|
||||
test-type: [inference, datasets, inspect, scoring, post_training, providers]
|
||||
fail-fast: false # we want to run all tests regardless of failure
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
@ -43,6 +49,8 @@ jobs:
|
|||
run: |
|
||||
uv sync --extra dev --extra test
|
||||
uv pip install ollama faiss-cpu
|
||||
# always test against the latest version of the client
|
||||
uv pip install git+https://github.com/meta-llama/llama-stack-client-python.git@main
|
||||
uv pip install -e .
|
||||
|
||||
- name: Wait for Ollama to start
|
||||
|
@ -72,17 +80,17 @@ jobs:
|
|||
echo "Waiting for Llama Stack server..."
|
||||
for i in {1..30}; do
|
||||
if curl -s http://localhost:8321/v1/health | grep -q "OK"; then
|
||||
echo " Llama Stack server is up!"
|
||||
echo "Llama Stack server is up!"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo " Llama Stack server failed to start"
|
||||
echo "Llama Stack server failed to start"
|
||||
cat server.log
|
||||
exit 1
|
||||
|
||||
- name: Run Inference Integration Tests
|
||||
- name: Run Integration Tests
|
||||
env:
|
||||
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
||||
run: |
|
||||
uv run pytest -v tests/integration/inference --stack-config=ollama --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2
|
||||
uv run pytest -v tests/integration/${{ matrix.test-type }} --stack-config=ollama --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue