name: Test External Providers Installed via Module on: push: branches: [ main ] pull_request: branches: [ main ] paths: - 'llama_stack/**' - 'tests/integration/**' - 'uv.lock' - 'pyproject.toml' - 'requirements.txt' - '.github/workflows/test-external-providers-module.yml' # This workflow jobs: test-external-providers-from-module: runs-on: ubuntu-latest strategy: matrix: image-type: [venv] # We don't do container yet, it's tricky to install a package from the host into the # container and point 'uv pip install' to the correct path... steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Install dependencies uses: ./.github/actions/setup-runner - name: Install Ramalama shell: bash run: | uv pip install ramalama - name: Run Ramalama shell: bash run: | nohup ramalama serve llama3.2:3b-instruct-fp16 > ramalama_server.log 2>&1 & - name: Apply image type to config file run: | yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml cat tests/external/ramalama-stack/run.yaml - name: Build distro from config file run: | USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/ramalama-stack/build.yaml - name: Start Llama Stack server in background if: ${{ matrix.image-type }} == 'venv' env: INFERENCE_MODEL: "llama3.2:3b-instruct-fp16" run: | # Use the virtual environment created by the build step (name comes from build config) source ramalama-stack-test/bin/activate uv pip list nohup llama stack run tests/external/ramalama-stack/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 & - name: Wait for Llama Stack server to be ready run: | for i in {1..30}; do if ! grep -q "successfully connected to Ramalama" server.log; then echo "Waiting for Llama Stack server to load the provider..." sleep 1 else echo "Provider loaded" exit 0 fi done echo "Provider failed to load" cat server.log exit 1