forked from phoenix-oss/llama-stack-mirror
refactor: simplify command execution and remove PTY handling (#1641)
# What does this PR do? A PTY is unnecessary for interactive mode since `subprocess.run()` already inherits the calling terminal’s stdin, stdout, and stderr, allowing natural interaction. Using a PTY can introduce unwanted side effects like buffering issues and inconsistent signal handling. Standard input/output is sufficient for most interactive programs. This commit simplifies the command execution by: 1. Removing PTY-based execution in favor of direct subprocess handling 2. Consolidating command execution into a single run_command function 3. Improving error handling with specific subprocess error types 4. Adding proper type hints and documentation 5. Maintaining Ctrl+C handling for graceful interruption ## Test Plan ``` llama stack run ``` Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
77ca09467f
commit
24fd06879e
6 changed files with 31 additions and 131 deletions
3
.github/workflows/integration-tests.yml
vendored
3
.github/workflows/integration-tests.yml
vendored
|
@ -56,8 +56,7 @@ jobs:
|
|||
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
# TODO: use "llama stack run"
|
||||
nohup uv run python -m llama_stack.distribution.server.server --yaml-config ./llama_stack/templates/ollama/run.yaml > server.log 2>&1 &
|
||||
nohup uv run llama stack run ./llama_stack/templates/ollama/run.yaml --image-type venv > server.log 2>&1 &
|
||||
|
||||
- name: Wait for Llama Stack server to be ready
|
||||
run: |
|
||||
|
|
5
.github/workflows/providers-build.yml
vendored
5
.github/workflows/providers-build.yml
vendored
|
@ -40,6 +40,7 @@ jobs:
|
|||
matrix:
|
||||
template: ${{ fromJson(needs.generate-matrix.outputs.templates) }}
|
||||
image-type: [venv, container]
|
||||
fail-fast: false # We want to run all jobs even if some fail
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
@ -67,7 +68,9 @@ jobs:
|
|||
|
||||
- name: Run Llama Stack Build
|
||||
run: |
|
||||
uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test
|
||||
# USE_COPY_NOT_MOUNT is set to true since mounting is not supported by docker buildx, we use COPY instead
|
||||
# LLAMA_STACK_DIR is set to the current directory so we are building from the source
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --template ${{ matrix.template }} --image-type ${{ matrix.image-type }} --image-name test
|
||||
|
||||
- name: Print dependencies in the image
|
||||
if: matrix.image-type == 'venv'
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue