mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-21 16:07:16 +00:00
**This PR changes configurations in a backward incompatible way.** Run configs today repeat full SQLite/Postgres snippets everywhere a store is needed, which means duplicated credentials, extra connection pools, and lots of drift between files. This PR introduces named storage backends so the stack and providers can share a single catalog and reference those backends by name. ## Key Changes - Add `storage.backends` to `StackRunConfig`, register each KV/SQL backend once at startup, and validate that references point to the right family. - Move server stores under `storage.stores` with lightweight references (backend + namespace/table) instead of full configs. - Update every provider/config/doc to use the new reference style; docs/codegen now surface the simplified YAML. ## Migration Before: ```yaml metadata_store: type: sqlite db_path: ~/.llama/distributions/foo/registry.db inference_store: type: postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} conversations_store: type: postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} ``` After: ```yaml storage: backends: kv_default: type: kv_sqlite db_path: ~/.llama/distributions/foo/kvstore.db sql_default: type: sql_postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} stores: metadata: backend: kv_default namespace: registry inference: backend: sql_default table_name: inference_store max_write_queue_size: 10000 num_writers: 4 conversations: backend: sql_default table_name: openai_conversations ``` Provider configs follow the same pattern—for example, a Chroma vector adapter switches from: ```yaml providers: vector_io: - provider_id: chromadb provider_type: remote::chromadb config: url: ${env.CHROMADB_URL} kvstore: type: sqlite db_path: ~/.llama/distributions/foo/chroma.db ``` to: ```yaml providers: vector_io: - provider_id: chromadb provider_type: remote::chromadb config: url: ${env.CHROMADB_URL} persistence: backend: kv_default namespace: vector_io::chroma_remote ``` Once the backends are declared, everything else just points at them, so rotating credentials or swapping to Postgres happens in one place and the stack reuses a single connection pool.
156 lines
6.2 KiB
YAML
156 lines
6.2 KiB
YAML
name: Integration Auth Tests
|
|
|
|
run-name: Run the integration test suite with Kubernetes authentication
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
pull_request:
|
|
branches: [ main ]
|
|
paths:
|
|
- 'distributions/**'
|
|
- 'llama_stack/**'
|
|
- '!llama_stack/ui/**'
|
|
- 'tests/integration/**'
|
|
- 'uv.lock'
|
|
- 'pyproject.toml'
|
|
- 'requirements.txt'
|
|
- '.github/workflows/integration-auth-tests.yml' # This workflow
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
test-matrix:
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
auth-provider: [oauth2_token]
|
|
fail-fast: false # we want to run all tests regardless of failure
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
|
|
|
- name: Install dependencies
|
|
uses: ./.github/actions/setup-runner
|
|
|
|
- name: Install minikube
|
|
if: ${{ matrix.auth-provider == 'kubernetes' }}
|
|
uses: medyagh/setup-minikube@e3c7f79eb1e997eabccc536a6cf318a2b0fe19d9 # v0.0.20
|
|
|
|
- name: Start minikube
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
minikube start
|
|
kubectl get pods -A
|
|
|
|
- name: Configure Kube Auth
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
kubectl create namespace llama-stack
|
|
kubectl create serviceaccount llama-stack-auth -n llama-stack
|
|
kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token
|
|
|
|
- name: Set Kubernetes Config
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
echo "KUBERNETES_API_SERVER_URL=$(kubectl get --raw /.well-known/openid-configuration| jq -r .jwks_uri)" >> $GITHUB_ENV
|
|
echo "KUBERNETES_CA_CERT_PATH=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}')" >> $GITHUB_ENV
|
|
echo "KUBERNETES_ISSUER=$(kubectl get --raw /.well-known/openid-configuration| jq -r .issuer)" >> $GITHUB_ENV
|
|
echo "KUBERNETES_AUDIENCE=$(kubectl create token llama-stack-auth -n llama-stack --duration=1h | cut -d. -f2 | base64 -d | jq -r '.aud[0]')" >> $GITHUB_ENV
|
|
echo "TOKEN=$(cat llama-stack-auth-token)" >> $GITHUB_ENV
|
|
|
|
- name: Set Kube Auth Config and run server
|
|
env:
|
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
run_dir=$(mktemp -d)
|
|
cat <<'EOF' > $run_dir/run.yaml
|
|
version: '2'
|
|
image_name: kube
|
|
apis: []
|
|
providers: {}
|
|
storage:
|
|
backends:
|
|
kv_default:
|
|
type: kv_sqlite
|
|
db_path: $run_dir/kvstore.db
|
|
sql_default:
|
|
type: sql_sqlite
|
|
db_path: $run_dir/sql_store.db
|
|
stores:
|
|
metadata:
|
|
namespace: registry
|
|
backend: kv_default
|
|
inference:
|
|
table_name: inference_store
|
|
backend: sql_default
|
|
conversations:
|
|
table_name: openai_conversations
|
|
backend: sql_default
|
|
server:
|
|
port: 8321
|
|
EOF
|
|
yq eval '.server.auth.provider_config.type = "${{ matrix.auth-provider }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.tls_cafile = "${{ env.KUBERNETES_CA_CERT_PATH }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.issuer = "${{ env.KUBERNETES_ISSUER }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.audience = "${{ env.KUBERNETES_AUDIENCE }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.jwks.uri = "${{ env.KUBERNETES_API_SERVER_URL }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.jwks.token = "${{ env.TOKEN }}"' -i $run_dir/run.yaml
|
|
cat $run_dir/run.yaml
|
|
|
|
# avoid line breaks in the server log, especially because we grep it below.
|
|
export LLAMA_STACK_LOG_WIDTH=200
|
|
nohup uv run llama stack run $run_dir/run.yaml > server.log 2>&1 &
|
|
|
|
- name: Wait for Llama Stack server to be ready
|
|
run: |
|
|
echo "Waiting for Llama Stack server..."
|
|
for i in {1..30}; do
|
|
# Note: /v1/health does not require authentication
|
|
if curl -s -L http://localhost:8321/v1/health | grep -q "OK"; then
|
|
echo "Llama Stack server is up!"
|
|
if grep -q "Enabling authentication with provider: ${{ matrix.auth-provider }}" server.log; then
|
|
echo "Llama Stack server is configured to use ${{ matrix.auth-provider }} auth"
|
|
exit 0
|
|
else
|
|
echo "Llama Stack server is not configured to use ${{ matrix.auth-provider }} auth"
|
|
cat server.log
|
|
exit 1
|
|
fi
|
|
fi
|
|
sleep 1
|
|
done
|
|
echo "Llama Stack server failed to start"
|
|
cat server.log
|
|
exit 1
|
|
|
|
- name: Test auth
|
|
run: |
|
|
echo "Testing /v1/version without token (should succeed)..."
|
|
if curl -s -L -o /dev/null -w "%{http_code}" http://127.0.0.1:8321/v1/version | grep -q "200"; then
|
|
echo "/v1/version accessible without token (200)"
|
|
else
|
|
echo "/v1/version returned non-200 status without token"
|
|
exit 1
|
|
fi
|
|
|
|
echo "Testing /v1/providers without token (should fail with 401)..."
|
|
if curl -s -L -o /dev/null -w "%{http_code}" http://127.0.0.1:8321/v1/providers | grep -q "401"; then
|
|
echo "/v1/providers blocked without token (401)"
|
|
else
|
|
echo "/v1/providers did not return 401 without token"
|
|
exit 1
|
|
fi
|
|
|
|
echo "Testing /v1/providers with valid token (should succeed)..."
|
|
curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers | jq
|
|
if [ $? -eq 0 ]; then
|
|
echo "/v1/providers accessible with valid token"
|
|
else
|
|
echo "/v1/providers failed with valid token"
|
|
exit 1
|
|
fi
|