ci: simplify external provider integration test (#2050)

Do not run Ollama, but only validate that the provider was loaded by the
server.

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-04-28 23:10:27 +02:00 committed by GitHub
parent 8dfce2f596
commit 7807a86358
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -30,18 +30,6 @@ jobs:
with:
python-version: "3.10"
- name: Install Ollama
run: |
curl -fsSL https://ollama.com/install.sh | sh
- name: Pull Ollama image
run: |
ollama pull llama3.2:3b-instruct-fp16
- name: Start Ollama in background
run: |
nohup ollama run llama3.2:3b-instruct-fp16 --keepalive=30m > ollama.log 2>&1 &
- name: Set Up Environment and Install Dependencies
run: |
uv sync --extra dev --extra test
@ -66,21 +54,6 @@ jobs:
run: |
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
- name: Wait for Ollama to start
run: |
echo "Waiting for Ollama..."
for i in {1..30}; do
if curl -s http://localhost:11434 | grep -q "Ollama is running"; then
echo "Ollama is running!"
exit 0
fi
sleep 1
done
echo "Ollama failed to start"
ollama ps
ollama.log
exit 1
- name: Start Llama Stack server in background
if: ${{ matrix.image-type }} == 'venv'
env:
@ -92,24 +65,14 @@ jobs:
- name: Wait for Llama Stack server to be ready
run: |
echo "Waiting for Llama Stack server..."
for i in {1..30}; do
if curl -s http://localhost:8321/v1/health | grep -q "OK"; then
echo "Llama Stack server is up!"
if grep -q "remote::custom_ollama from /tmp/providers.d/remote/inference/custom_ollama.yaml" server.log; then
echo "Llama Stack server is using custom Ollama provider"
exit 0
else
echo "Llama Stack server is not using custom Ollama provider"
exit 1
fi
if ! grep -q "remote::custom_ollama from /tmp/providers.d/remote/inference/custom_ollama.yaml" server.log; then
echo "Waiting for Llama Stack server to load the provider..."
sleep 1
else
echo "Provider loaded"
exit 0
fi
sleep 1
done
echo "Llama Stack server failed to start"
cat server.log
echo "Provider failed to load"
exit 1
- name: run inference tests
run: |
uv run pytest -v tests/integration/inference/test_text_inference.py --stack-config="http://localhost:8321" --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2