mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-24 13:19:54 +00:00
# What does this PR do? allow users to specify only the providers they want in the llama stack build command. If a user wants a non-interactive build, but doesn't want to use a template, `--providers` allows someone to specify something like `--providers inference=remote::ollama` for a distro with JUST ollama ## Test Plan `llama stack build --providers inference=remote::ollama --image-type venv` <img width="1084" alt="Screenshot 2025-03-20 at 9 34 14 AM" src="https://github.com/user-attachments/assets/502b5fa2-edab-4267-a595-4f987204a6a9" /> `llama stack run --image-type venv /Users/charliedoern/projects/Documents/llama-stack/venv-run.yaml` <img width="1149" alt="Screenshot 2025-03-20 at 9 35 19 AM" src="https://github.com/user-attachments/assets/433765f3-6b7f-4383-9241-dad085b69228" /> --------- Signed-off-by: Charlie Doern <cdoern@redhat.com> Signed-off-by: Sébastien Han <seb@redhat.com> Co-authored-by: Sébastien Han <seb@redhat.com>
109 lines
3.4 KiB
YAML
109 lines
3.4 KiB
YAML
name: Test Llama Stack Build
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
paths:
|
|
- 'llama_stack/cli/stack/build.py'
|
|
- 'llama_stack/cli/stack/_build.py'
|
|
- 'llama_stack/distribution/build.*'
|
|
- 'llama_stack/distribution/*.sh'
|
|
- '.github/workflows/providers-build.yml'
|
|
pull_request:
|
|
paths:
|
|
- 'llama_stack/cli/stack/build.py'
|
|
- 'llama_stack/cli/stack/_build.py'
|
|
- 'llama_stack/distribution/build.*'
|
|
- 'llama_stack/distribution/*.sh'
|
|
- '.github/workflows/providers-build.yml'
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.ref }}
|
|
cancel-in-progress: true
|
|
|
|
jobs:
|
|
generate-matrix:
|
|
runs-on: ubuntu-latest
|
|
outputs:
|
|
templates: ${{ steps.set-matrix.outputs.templates }}
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
|
|
- name: Generate Template List
|
|
id: set-matrix
|
|
run: |
|
|
templates=$(ls llama_stack/templates/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
|
echo "templates=$templates" >> "$GITHUB_OUTPUT"
|
|
|
|
build:
|
|
needs: generate-matrix
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
template: ${{ fromJson(needs.generate-matrix.outputs.templates) }}
|
|
image-type: [venv, container]
|
|
fail-fast: false # We want to run all jobs even if some fail
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
|
|
- name: Set up Python
|
|
uses: actions/setup-python@8d9ed9ac5c53483de85588cdf95a591a75ab9f55 # v5.5.0
|
|
with:
|
|
python-version: '3.10'
|
|
|
|
- name: Install uv
|
|
uses: astral-sh/setup-uv@0c5e2b8115b80b4c7c5ddf6ffdd634974642d182 # v5.4.1
|
|
with:
|
|
python-version: "3.10"
|
|
|
|
- name: Install LlamaStack
|
|
run: |
|
|
uv venv
|
|
source .venv/bin/activate
|
|
uv pip install -e .
|
|
|
|
- name: Print build dependencies
|
|
run: |
|
|
uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
|
|
|
|
- name: Run Llama Stack Build
|
|
run: |
|
|
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
|
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test
|
|
|
|
- name: Print dependencies in the image
|
|
if: matrix.image-type == 'venv'
|
|
run: |
|
|
source test/bin/activate
|
|
uv pip list
|
|
|
|
build-single-provider:
|
|
runs-on: ubuntu-latest
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Set up Python
|
|
uses: actions/setup-python@v5
|
|
with:
|
|
python-version: '3.10'
|
|
|
|
- name: Install uv
|
|
uses: astral-sh/setup-uv@v5
|
|
with:
|
|
python-version: "3.10"
|
|
|
|
- name: Install LlamaStack
|
|
run: |
|
|
uv venv
|
|
source .venv/bin/activate
|
|
uv pip install -e .
|
|
|
|
- name: Build a single provider
|
|
run: |
|
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --image-type venv --image-name test --providers inference=remote::ollama
|