mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-23 21:04:29 +00:00
# What does this PR do? We are dropping configuration via CLI flag almost entirely. If any server configuration has to be tweak it must be done through the server section in the run.yaml. This is unfortunately a breaking change for whover was using: * `--tls-*` * `--disable_ipv6` `--port` stays around and get a special treatment since we believe, it's common for user dev to change port for quick experimentations. Closes: https://github.com/meta-llama/llama-stack/issues/1076 ## Test Plan Simply do `llama stack run <config>` nothing should break :) Signed-off-by: Sébastien Han <seb@redhat.com>
79 lines
2.8 KiB
YAML
79 lines
2.8 KiB
YAML
name: Test External Providers
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
pull_request:
|
|
branches: [ main ]
|
|
paths:
|
|
- 'llama_stack/**'
|
|
- 'tests/integration/**'
|
|
- 'uv.lock'
|
|
- 'pyproject.toml'
|
|
- 'requirements.txt'
|
|
- '.github/workflows/test-external-providers.yml' # This workflow
|
|
|
|
jobs:
|
|
test-external-providers:
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
image-type: [venv]
|
|
# We don't do container yet, it's tricky to install a package from the host into the
|
|
# container and point 'uv pip install' to the correct path...
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Install uv
|
|
uses: astral-sh/setup-uv@v6
|
|
with:
|
|
python-version: "3.10"
|
|
|
|
- name: Set Up Environment and Install Dependencies
|
|
run: |
|
|
uv sync --extra dev --extra test
|
|
uv pip install -e .
|
|
|
|
- name: Apply image type to config file
|
|
run: |
|
|
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
|
cat tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
|
|
|
- name: Setup directory for Ollama custom provider
|
|
run: |
|
|
mkdir -p tests/external-provider/llama-stack-provider-ollama/src/
|
|
cp -a llama_stack/providers/remote/inference/ollama/ tests/external-provider/llama-stack-provider-ollama/src/llama_stack_provider_ollama
|
|
|
|
- name: Create provider configuration
|
|
run: |
|
|
mkdir -p /tmp/providers.d/remote/inference
|
|
cp tests/external-provider/llama-stack-provider-ollama/custom_ollama.yaml /tmp/providers.d/remote/inference/custom_ollama.yaml
|
|
|
|
- name: Build distro from config file
|
|
run: |
|
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
|
|
|
- name: Start Llama Stack server in background
|
|
if: ${{ matrix.image-type }} == 'venv'
|
|
env:
|
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
|
run: |
|
|
source ci-test/bin/activate
|
|
uv run pip list
|
|
nohup uv run --active llama stack run tests/external-provider/llama-stack-provider-ollama/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 &
|
|
|
|
- name: Wait for Llama Stack server to be ready
|
|
run: |
|
|
for i in {1..30}; do
|
|
if ! grep -q "remote::custom_ollama from /tmp/providers.d/remote/inference/custom_ollama.yaml" server.log; then
|
|
echo "Waiting for Llama Stack server to load the provider..."
|
|
sleep 1
|
|
else
|
|
echo "Provider loaded"
|
|
exit 0
|
|
fi
|
|
done
|
|
echo "Provider failed to load"
|
|
cat server.log
|
|
exit 1
|