mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 06:00:48 +00:00
test: use uv when starting server
# What does this PR do? ## Test Plan
This commit is contained in:
parent
ffb6306fbd
commit
3067b29425
1 changed files with 3 additions and 2 deletions
|
@ -6,6 +6,7 @@
|
|||
|
||||
import inspect
|
||||
import os
|
||||
import shlex
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
|
@ -38,10 +39,10 @@ def is_port_available(port: int, host: str = "localhost") -> bool:
|
|||
|
||||
def start_llama_stack_server(config_name: str) -> subprocess.Popen:
|
||||
"""Start a llama stack server with the given config."""
|
||||
cmd = ["llama", "stack", "run", config_name]
|
||||
cmd = f"uv run --with llama-stack llama stack build --template {config_name} --image-type venv --run"
|
||||
devnull = open(os.devnull, "w")
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
shlex.split(cmd),
|
||||
stdout=devnull, # redirect stdout to devnull to prevent deadlock
|
||||
stderr=subprocess.PIPE, # keep stderr to see errors
|
||||
text=True,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue