mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
Fixes issues in the storage system by guaranteeing immediate durability for responses and ensuring background writers stay alive. Three related fixes: * Responses to the OpenAI-compatible API now write directly to Postgres/SQLite inside the request instead of detouring through an async queue that might never drain; this restores the expected read-after-write behavior and removes the "response not found" races reported by users. * The access-control shim was stamping owner_principal/access_attributes as SQL NULL, which Postgres interprets as non-public rows; fixing it to use the empty-string/JSON-null pattern means conversations and responses stored without an authenticated user stay queryable (matching SQLite). * The inference-store queue remains for batching, but its worker tasks now start lazily on the live event loop so server startup doesn't cancel them—writes keep flowing even when the stack is launched via llama stack run. Closes #4115 ### Test Plan Added a matrix entry to test our "base" suite against Postgres as the store.
81 lines
2.5 KiB
YAML
81 lines
2.5 KiB
YAML
name: 'Setup Test Environment'
|
|
description: 'Common setup steps for integration tests including dependencies, providers, and build'
|
|
|
|
inputs:
|
|
python-version:
|
|
description: 'Python version to use'
|
|
required: true
|
|
client-version:
|
|
description: 'Client version (latest or published)'
|
|
required: true
|
|
setup:
|
|
description: 'Setup to configure (ollama, vllm, gpt, etc.)'
|
|
required: false
|
|
default: 'ollama'
|
|
suite:
|
|
description: 'Test suite to use: base, responses, vision, etc.'
|
|
required: false
|
|
default: ''
|
|
inference-mode:
|
|
description: 'Inference mode (record or replay)'
|
|
required: true
|
|
|
|
runs:
|
|
using: 'composite'
|
|
steps:
|
|
- name: Install dependencies
|
|
uses: ./.github/actions/setup-runner
|
|
with:
|
|
python-version: ${{ inputs.python-version }}
|
|
client-version: ${{ inputs.client-version }}
|
|
|
|
- name: Setup ollama
|
|
if: ${{ (inputs.setup == 'ollama' || inputs.setup == 'ollama-vision') && inputs.inference-mode == 'record' }}
|
|
uses: ./.github/actions/setup-ollama
|
|
with:
|
|
suite: ${{ inputs.suite }}
|
|
|
|
- name: Setup vllm
|
|
if: ${{ inputs.setup == 'vllm' && inputs.inference-mode == 'record' }}
|
|
uses: ./.github/actions/setup-vllm
|
|
|
|
- name: Start Postgres service
|
|
if: ${{ contains(inputs.setup, 'postgres') }}
|
|
shell: bash
|
|
run: |
|
|
sudo docker rm -f postgres-ci || true
|
|
sudo docker run -d --name postgres-ci \
|
|
-e POSTGRES_USER=llamastack \
|
|
-e POSTGRES_PASSWORD=llamastack \
|
|
-e POSTGRES_DB=llamastack \
|
|
-p 5432:5432 \
|
|
postgres:16
|
|
|
|
echo "Waiting for Postgres to become ready..."
|
|
for i in {1..30}; do
|
|
if sudo docker exec postgres-ci pg_isready -U llamastack -d llamastack >/dev/null 2>&1; then
|
|
echo "Postgres is ready"
|
|
break
|
|
fi
|
|
if [ "$i" -eq 30 ]; then
|
|
echo "Postgres failed to start in time"
|
|
sudo docker logs postgres-ci || true
|
|
exit 1
|
|
fi
|
|
sleep 2
|
|
done
|
|
|
|
- name: Build Llama Stack
|
|
shell: bash
|
|
run: |
|
|
# Client is already installed by setup-runner (handles both main and release branches)
|
|
echo "Building Llama Stack"
|
|
|
|
LLAMA_STACK_DIR=. \
|
|
uv run --no-sync llama stack list-deps ci-tests | xargs -L1 uv pip install
|
|
|
|
- name: Configure git for commits
|
|
shell: bash
|
|
run: |
|
|
git config --local user.email "github-actions[bot]@users.noreply.github.com"
|
|
git config --local user.name "github-actions[bot]"
|