forked from phoenix-oss/llama-stack-mirror
71 lines
2.7 KiB
YAML
71 lines
2.7 KiB
YAML
name: Test External Providers
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
pull_request:
|
|
branches: [ main ]
|
|
paths:
|
|
- 'llama_stack/**'
|
|
- 'tests/integration/**'
|
|
- 'uv.lock'
|
|
- 'pyproject.toml'
|
|
- 'requirements.txt'
|
|
- '.github/workflows/test-external-providers.yml' # This workflow
|
|
|
|
jobs:
|
|
test-external-providers:
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
image-type: [venv]
|
|
# We don't do container yet, it's tricky to install a package from the host into the
|
|
# container and point 'uv pip install' to the correct path...
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
|
|
- name: Install dependencies
|
|
uses: ./.github/actions/setup-runner
|
|
|
|
- name: Apply image type to config file
|
|
run: |
|
|
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
|
cat tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
|
|
|
- name: Setup directory for Ollama custom provider
|
|
run: |
|
|
mkdir -p tests/external-provider/llama-stack-provider-ollama/src/
|
|
cp -a llama_stack/providers/remote/inference/ollama/ tests/external-provider/llama-stack-provider-ollama/src/llama_stack_provider_ollama
|
|
|
|
- name: Create provider configuration
|
|
run: |
|
|
mkdir -p /home/runner/.llama/providers.d/remote/inference
|
|
cp tests/external-provider/llama-stack-provider-ollama/custom_ollama.yaml /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml
|
|
|
|
- name: Build distro from config file
|
|
run: |
|
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
|
|
|
- name: Start Llama Stack server in background
|
|
if: ${{ matrix.image-type }} == 'venv'
|
|
env:
|
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
|
run: |
|
|
uv run pip list
|
|
nohup uv run --active llama stack run tests/external-provider/llama-stack-provider-ollama/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 &
|
|
|
|
- name: Wait for Llama Stack server to be ready
|
|
run: |
|
|
for i in {1..30}; do
|
|
if ! grep -q "remote::custom_ollama from /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml" server.log; then
|
|
echo "Waiting for Llama Stack server to load the provider..."
|
|
sleep 1
|
|
else
|
|
echo "Provider loaded"
|
|
exit 0
|
|
fi
|
|
done
|
|
echo "Provider failed to load"
|
|
cat server.log
|
|
exit 1
|