mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 12:06:04 +00:00
Moved package code from llama_stack/ to src/llama_stack/ following Python packaging best practices. Updated pyproject.toml, MANIFEST.in, and tool configurations accordingly. Public API and import paths remain unchanged. Developers will need to reinstall in editable mode after pulling this change. Also updated paths in pre-commit config, scripts, and GitHub workflows.
159 lines
6.3 KiB
YAML
159 lines
6.3 KiB
YAML
name: Integration Auth Tests
|
|
|
|
run-name: Run the integration test suite with Kubernetes authentication
|
|
|
|
on:
|
|
push:
|
|
branches: [ main ]
|
|
pull_request:
|
|
branches: [ main ]
|
|
paths:
|
|
- 'distributions/**'
|
|
- 'src/llama_stack/**'
|
|
- '!llama_stack/ui/**'
|
|
- 'tests/integration/**'
|
|
- 'uv.lock'
|
|
- 'pyproject.toml'
|
|
- 'requirements.txt'
|
|
- '.github/workflows/integration-auth-tests.yml' # This workflow
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
test-matrix:
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
auth-provider: [oauth2_token]
|
|
fail-fast: false # we want to run all tests regardless of failure
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
|
|
|
- name: Install dependencies
|
|
uses: ./.github/actions/setup-runner
|
|
|
|
- name: Install minikube
|
|
if: ${{ matrix.auth-provider == 'kubernetes' }}
|
|
uses: medyagh/setup-minikube@e3c7f79eb1e997eabccc536a6cf318a2b0fe19d9 # v0.0.20
|
|
|
|
- name: Start minikube
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
minikube start
|
|
kubectl get pods -A
|
|
|
|
- name: Configure Kube Auth
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
kubectl create namespace llama-stack
|
|
kubectl create serviceaccount llama-stack-auth -n llama-stack
|
|
kubectl create token llama-stack-auth -n llama-stack > llama-stack-auth-token
|
|
|
|
- name: Set Kubernetes Config
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
echo "KUBERNETES_API_SERVER_URL=$(kubectl get --raw /.well-known/openid-configuration| jq -r .jwks_uri)" >> $GITHUB_ENV
|
|
echo "KUBERNETES_CA_CERT_PATH=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.certificate-authority}')" >> $GITHUB_ENV
|
|
echo "KUBERNETES_ISSUER=$(kubectl get --raw /.well-known/openid-configuration| jq -r .issuer)" >> $GITHUB_ENV
|
|
echo "KUBERNETES_AUDIENCE=$(kubectl create token llama-stack-auth -n llama-stack --duration=1h | cut -d. -f2 | base64 -d | jq -r '.aud[0]')" >> $GITHUB_ENV
|
|
echo "TOKEN=$(cat llama-stack-auth-token)" >> $GITHUB_ENV
|
|
|
|
- name: Set Kube Auth Config and run server
|
|
env:
|
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
|
if: ${{ matrix.auth-provider == 'oauth2_token' }}
|
|
run: |
|
|
run_dir=$(mktemp -d)
|
|
cat <<'EOF' > $run_dir/run.yaml
|
|
version: '2'
|
|
image_name: kube
|
|
apis: []
|
|
providers: {}
|
|
storage:
|
|
backends:
|
|
kv_default:
|
|
type: kv_sqlite
|
|
db_path: $run_dir/kvstore.db
|
|
sql_default:
|
|
type: sql_sqlite
|
|
db_path: $run_dir/sql_store.db
|
|
stores:
|
|
metadata:
|
|
namespace: registry
|
|
backend: kv_default
|
|
inference:
|
|
table_name: inference_store
|
|
backend: sql_default
|
|
conversations:
|
|
table_name: openai_conversations
|
|
backend: sql_default
|
|
prompts:
|
|
namespace: prompts
|
|
backend: kv_default
|
|
server:
|
|
port: 8321
|
|
EOF
|
|
yq eval '.server.auth.provider_config.type = "${{ matrix.auth-provider }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.tls_cafile = "${{ env.KUBERNETES_CA_CERT_PATH }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.issuer = "${{ env.KUBERNETES_ISSUER }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.audience = "${{ env.KUBERNETES_AUDIENCE }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.jwks.uri = "${{ env.KUBERNETES_API_SERVER_URL }}"' -i $run_dir/run.yaml
|
|
yq eval '.server.auth.provider_config.jwks.token = "${{ env.TOKEN }}"' -i $run_dir/run.yaml
|
|
cat $run_dir/run.yaml
|
|
|
|
# avoid line breaks in the server log, especially because we grep it below.
|
|
export LLAMA_STACK_LOG_WIDTH=200
|
|
nohup uv run llama stack run $run_dir/run.yaml > server.log 2>&1 &
|
|
|
|
- name: Wait for Llama Stack server to be ready
|
|
run: |
|
|
echo "Waiting for Llama Stack server..."
|
|
for i in {1..30}; do
|
|
# Note: /v1/health does not require authentication
|
|
if curl -s -L http://localhost:8321/v1/health | grep -q "OK"; then
|
|
echo "Llama Stack server is up!"
|
|
if grep -q "Enabling authentication with provider: ${{ matrix.auth-provider }}" server.log; then
|
|
echo "Llama Stack server is configured to use ${{ matrix.auth-provider }} auth"
|
|
exit 0
|
|
else
|
|
echo "Llama Stack server is not configured to use ${{ matrix.auth-provider }} auth"
|
|
cat server.log
|
|
exit 1
|
|
fi
|
|
fi
|
|
sleep 1
|
|
done
|
|
echo "Llama Stack server failed to start"
|
|
cat server.log
|
|
exit 1
|
|
|
|
- name: Test auth
|
|
run: |
|
|
echo "Testing /v1/version without token (should succeed)..."
|
|
if curl -s -L -o /dev/null -w "%{http_code}" http://127.0.0.1:8321/v1/version | grep -q "200"; then
|
|
echo "/v1/version accessible without token (200)"
|
|
else
|
|
echo "/v1/version returned non-200 status without token"
|
|
exit 1
|
|
fi
|
|
|
|
echo "Testing /v1/providers without token (should fail with 401)..."
|
|
if curl -s -L -o /dev/null -w "%{http_code}" http://127.0.0.1:8321/v1/providers | grep -q "401"; then
|
|
echo "/v1/providers blocked without token (401)"
|
|
else
|
|
echo "/v1/providers did not return 401 without token"
|
|
exit 1
|
|
fi
|
|
|
|
echo "Testing /v1/providers with valid token (should succeed)..."
|
|
curl -s -L -H "Authorization: Bearer $(cat llama-stack-auth-token)" http://127.0.0.1:8321/v1/providers | jq
|
|
if [ $? -eq 0 ]; then
|
|
echo "/v1/providers accessible with valid token"
|
|
else
|
|
echo "/v1/providers failed with valid token"
|
|
exit 1
|
|
fi
|