mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-25 05:39:47 +00:00
# What does this PR do? A PTY is unnecessary for interactive mode since `subprocess.run()` already inherits the calling terminal’s stdin, stdout, and stderr, allowing natural interaction. Using a PTY can introduce unwanted side effects like buffering issues and inconsistent signal handling. Standard input/output is sufficient for most interactive programs. This commit simplifies the command execution by: 1. Removing PTY-based execution in favor of direct subprocess handling 2. Consolidating command execution into a single run_command function 3. Improving error handling with specific subprocess error types 4. Adding proper type hints and documentation 5. Maintaining Ctrl+C handling for graceful interruption ## Test Plan ``` llama stack run ``` Signed-off-by: Sébastien Han <seb@redhat.com>
79 lines
2.4 KiB
YAML
79 lines
2.4 KiB
YAML
name: Test Llama Stack Build
|
|
|
|
on:
|
|
push:
|
|
branches:
|
|
- main
|
|
paths:
|
|
- 'llama_stack/cli/stack/build.py'
|
|
- 'llama_stack/cli/stack/_build.py'
|
|
- 'llama_stack/distribution/build.*'
|
|
- 'llama_stack/distribution/*.sh'
|
|
- '.github/workflows/providers-build.yml'
|
|
pull_request:
|
|
paths:
|
|
- 'llama_stack/cli/stack/build.py'
|
|
- 'llama_stack/cli/stack/_build.py'
|
|
- 'llama_stack/distribution/build.*'
|
|
- 'llama_stack/distribution/*.sh'
|
|
- '.github/workflows/providers-build.yml'
|
|
|
|
jobs:
|
|
generate-matrix:
|
|
runs-on: ubuntu-latest
|
|
outputs:
|
|
templates: ${{ steps.set-matrix.outputs.templates }}
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Generate Template List
|
|
id: set-matrix
|
|
run: |
|
|
templates=$(ls llama_stack/templates/*/*build.yaml | awk -F'/' '{print $(NF-1)}' | jq -R -s -c 'split("\n")[:-1]')
|
|
echo "templates=$templates" >> "$GITHUB_OUTPUT"
|
|
|
|
build:
|
|
needs: generate-matrix
|
|
runs-on: ubuntu-latest
|
|
strategy:
|
|
matrix:
|
|
template: ${{ fromJson(needs.generate-matrix.outputs.templates) }}
|
|
image-type: [venv, container]
|
|
fail-fast: false # We want to run all jobs even if some fail
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Set up Python
|
|
uses: actions/setup-python@v5
|
|
with:
|
|
python-version: '3.10'
|
|
|
|
- name: Install uv
|
|
uses: astral-sh/setup-uv@v5
|
|
with:
|
|
python-version: "3.10"
|
|
|
|
- name: Install LlamaStack
|
|
run: |
|
|
uv venv
|
|
source .venv/bin/activate
|
|
uv pip install -e .
|
|
|
|
- name: Print build dependencies
|
|
run: |
|
|
uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test --print-deps-only
|
|
|
|
- name: Run Llama Stack Build
|
|
run: |
|
|
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
|
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test
|
|
|
|
- name: Print dependencies in the image
|
|
if: matrix.image-type == 'venv'
|
|
run: |
|
|
source test/bin/activate
|
|
uv pip list
|