From 7807a8635896410519c0c37663471b14f0670a09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 28 Apr 2025 23:10:27 +0200 Subject: [PATCH] ci: simplify external provider integration test (#2050) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do not run Ollama, but only validate that the provider was loaded by the server. Signed-off-by: Sébastien Han --- .github/workflows/test-external-providers.yml | 51 +++---------------- 1 file changed, 7 insertions(+), 44 deletions(-) diff --git a/.github/workflows/test-external-providers.yml b/.github/workflows/test-external-providers.yml index 37f5c45ab..4bbd32cb6 100644 --- a/.github/workflows/test-external-providers.yml +++ b/.github/workflows/test-external-providers.yml @@ -30,18 +30,6 @@ jobs: with: python-version: "3.10" - - name: Install Ollama - run: | - curl -fsSL https://ollama.com/install.sh | sh - - - name: Pull Ollama image - run: | - ollama pull llama3.2:3b-instruct-fp16 - - - name: Start Ollama in background - run: | - nohup ollama run llama3.2:3b-instruct-fp16 --keepalive=30m > ollama.log 2>&1 & - - name: Set Up Environment and Install Dependencies run: | uv sync --extra dev --extra test @@ -66,21 +54,6 @@ jobs: run: | USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml - - name: Wait for Ollama to start - run: | - echo "Waiting for Ollama..." - for i in {1..30}; do - if curl -s http://localhost:11434 | grep -q "Ollama is running"; then - echo "Ollama is running!" - exit 0 - fi - sleep 1 - done - echo "Ollama failed to start" - ollama ps - ollama.log - exit 1 - - name: Start Llama Stack server in background if: ${{ matrix.image-type }} == 'venv' env: @@ -92,24 +65,14 @@ jobs: - name: Wait for Llama Stack server to be ready run: | - echo "Waiting for Llama Stack server..." for i in {1..30}; do - if curl -s http://localhost:8321/v1/health | grep -q "OK"; then - echo "Llama Stack server is up!" - if grep -q "remote::custom_ollama from /tmp/providers.d/remote/inference/custom_ollama.yaml" server.log; then - echo "Llama Stack server is using custom Ollama provider" - exit 0 - else - echo "Llama Stack server is not using custom Ollama provider" - exit 1 - fi + if ! grep -q "remote::custom_ollama from /tmp/providers.d/remote/inference/custom_ollama.yaml" server.log; then + echo "Waiting for Llama Stack server to load the provider..." + sleep 1 + else + echo "Provider loaded" + exit 0 fi - sleep 1 done - echo "Llama Stack server failed to start" - cat server.log + echo "Provider failed to load" exit 1 - - - name: run inference tests - run: | - uv run pytest -v tests/integration/inference/test_text_inference.py --stack-config="http://localhost:8321" --text-model="meta-llama/Llama-3.2-3B-Instruct" --embedding-model=all-MiniLM-L6-v2