mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-27 06:28:50 +00:00
test: add integration test for module
add a test which installs ramalama-stack: https://github.com/containers/ramalama-stack which is one of our validated external providers: https://llama-stack.readthedocs.io/en/latest/providers/external.html Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
776fabed9e
commit
045f4b5a1a
3 changed files with 98 additions and 0 deletions
72
.github/workflows/test-external-provider-module.yml
vendored
Normal file
72
.github/workflows/test-external-provider-module.yml
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
name: Test External Providers Installed via Module
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
pull_request:
|
||||||
|
branches: [ main ]
|
||||||
|
paths:
|
||||||
|
- 'llama_stack/**'
|
||||||
|
- 'tests/integration/**'
|
||||||
|
- 'uv.lock'
|
||||||
|
- 'pyproject.toml'
|
||||||
|
- 'requirements.txt'
|
||||||
|
- '.github/workflows/test-external-providers-module.yml' # This workflow
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-external-providers-from-module:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
image-type: [venv]
|
||||||
|
# We don't do container yet, it's tricky to install a package from the host into the
|
||||||
|
# container and point 'uv pip install' to the correct path...
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
|
||||||
|
- name: Install Ramalama
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
uv pip install ramalama
|
||||||
|
|
||||||
|
- name: Run Ramalama
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
nohup ramalama serve llama3.2:3b-instruct-fp16 > ramalama_server.log 2>&1 &
|
||||||
|
- name: Apply image type to config file
|
||||||
|
run: |
|
||||||
|
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml
|
||||||
|
cat tests/external/ramalama-stack/run.yaml
|
||||||
|
|
||||||
|
- name: Build distro from config file
|
||||||
|
run: |
|
||||||
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external/ramalama-stack/build.yaml
|
||||||
|
|
||||||
|
- name: Start Llama Stack server in background
|
||||||
|
if: ${{ matrix.image-type }} == 'venv'
|
||||||
|
env:
|
||||||
|
INFERENCE_MODEL: "llama3.2:3b-instruct-fp16"
|
||||||
|
run: |
|
||||||
|
# Use the virtual environment created by the build step (name comes from build config)
|
||||||
|
source ramalama-stack-test/bin/activate
|
||||||
|
uv pip list
|
||||||
|
nohup llama stack run tests/external/ramalama-stack/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 &
|
||||||
|
|
||||||
|
- name: Wait for Llama Stack server to be ready
|
||||||
|
run: |
|
||||||
|
for i in {1..30}; do
|
||||||
|
if ! grep -q "successfully connected to Ramalama" server.log; then
|
||||||
|
echo "Waiting for Llama Stack server to load the provider..."
|
||||||
|
sleep 1
|
||||||
|
else
|
||||||
|
echo "Provider loaded"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo "Provider failed to load"
|
||||||
|
cat server.log
|
||||||
|
exit 1
|
14
tests/external/ramalama-stack/build.yaml
vendored
Normal file
14
tests/external/ramalama-stack/build.yaml
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
version: 2
|
||||||
|
distribution_spec:
|
||||||
|
description: Use (an external) Ramalama server for running LLM inference
|
||||||
|
container_image: null
|
||||||
|
providers:
|
||||||
|
inference:
|
||||||
|
- provider_id: ramalama
|
||||||
|
provider_type: remote::ramalama
|
||||||
|
module: ramalama_stack==0.3.0a0
|
||||||
|
image_type: venv
|
||||||
|
image_name: ramalama-stack-test
|
||||||
|
additional_pip_packages:
|
||||||
|
- aiosqlite
|
||||||
|
- sqlalchemy[asyncio]
|
12
tests/external/ramalama-stack/run.yaml
vendored
Normal file
12
tests/external/ramalama-stack/run.yaml
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
version: 2
|
||||||
|
image_name: ramalama
|
||||||
|
apis:
|
||||||
|
- inference
|
||||||
|
providers:
|
||||||
|
inference:
|
||||||
|
- provider_id: ramalama
|
||||||
|
provider_type: remote::ramalama
|
||||||
|
module: ramalama_stack==0.3.0a0
|
||||||
|
config: {}
|
||||||
|
server:
|
||||||
|
port: 8321
|
Loading…
Add table
Add a link
Reference in a new issue