From 045f4b5a1ad1a6bb41d6dc96a465433acbd227cd Mon Sep 17 00:00:00 2001 From: Charlie Doern Date: Wed, 23 Jul 2025 11:54:19 -0400 Subject: [PATCH] test: add integration test for module add a test which installs ramalama-stack: https://github.com/containers/ramalama-stack which is one of our validated external providers: https://llama-stack.readthedocs.io/en/latest/providers/external.html Signed-off-by: Charlie Doern --- .../test-external-provider-module.yml | 72 +++++++++++++++++++ tests/external/ramalama-stack/build.yaml | 14 ++++ tests/external/ramalama-stack/run.yaml | 12 ++++ 3 files changed, 98 insertions(+) create mode 100644 .github/workflows/test-external-provider-module.yml create mode 100644 tests/external/ramalama-stack/build.yaml create mode 100644 tests/external/ramalama-stack/run.yaml diff --git a/.github/workflows/test-external-provider-module.yml b/.github/workflows/test-external-provider-module.yml new file mode 100644 index 000000000..30fddb981 --- /dev/null +++ b/.github/workflows/test-external-provider-module.yml @@ -0,0 +1,72 @@ +name: Test External Providers Installed via Module + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + paths: + - 'llama_stack/**' + - 'tests/integration/**' + - 'uv.lock' + - 'pyproject.toml' + - 'requirements.txt' + - '.github/workflows/test-external-providers-module.yml' # This workflow + +jobs: + test-external-providers-from-module: + runs-on: ubuntu-latest + strategy: + matrix: + image-type: [venv] + # We don't do container yet, it's tricky to install a package from the host into the + # container and point 'uv pip install' to the correct path... + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Install dependencies + uses: ./.github/actions/setup-runner + + - name: Install Ramalama + shell: bash + run: | + uv pip install ramalama + + - name: Run Ramalama + shell: bash + run: | + nohup ramalama serve llama3.2:3b-instruct-fp16 > ramalama_server.log 2>&1 & + - name: Apply image type to config file + run: | + yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml + cat tests/external/ramalama-stack/run.yaml + + - name: Build distro from config file + run: | + USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/ramalama-stack/build.yaml + + - name: Start Llama Stack server in background + if: ${{ matrix.image-type }} == 'venv' + env: + INFERENCE_MODEL: "llama3.2:3b-instruct-fp16" + run: | + # Use the virtual environment created by the build step (name comes from build config) + source ramalama-stack-test/bin/activate + uv pip list + nohup llama stack run tests/external/ramalama-stack/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 & + + - name: Wait for Llama Stack server to be ready + run: | + for i in {1..30}; do + if ! grep -q "successfully connected to Ramalama" server.log; then + echo "Waiting for Llama Stack server to load the provider..." + sleep 1 + else + echo "Provider loaded" + exit 0 + fi + done + echo "Provider failed to load" + cat server.log + exit 1 diff --git a/tests/external/ramalama-stack/build.yaml b/tests/external/ramalama-stack/build.yaml new file mode 100644 index 000000000..c781e6537 --- /dev/null +++ b/tests/external/ramalama-stack/build.yaml @@ -0,0 +1,14 @@ +version: 2 +distribution_spec: + description: Use (an external) Ramalama server for running LLM inference + container_image: null + providers: + inference: + - provider_id: ramalama + provider_type: remote::ramalama + module: ramalama_stack==0.3.0a0 +image_type: venv +image_name: ramalama-stack-test +additional_pip_packages: +- aiosqlite +- sqlalchemy[asyncio] diff --git a/tests/external/ramalama-stack/run.yaml b/tests/external/ramalama-stack/run.yaml new file mode 100644 index 000000000..9d1d34df3 --- /dev/null +++ b/tests/external/ramalama-stack/run.yaml @@ -0,0 +1,12 @@ +version: 2 +image_name: ramalama +apis: +- inference +providers: + inference: + - provider_id: ramalama + provider_type: remote::ramalama + module: ramalama_stack==0.3.0a0 + config: {} +server: + port: 8321