mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-13 04:22:35 +00:00
feat(tests): allow cleanup of stray artifacts for local integration test runs
This commit is contained in:
parent
05a62a6ffb
commit
a0e6c8d383
3 changed files with 65 additions and 2 deletions
|
|
@ -40,6 +40,16 @@ Model parameters can be influenced by the following options:
|
||||||
Each of these are comma-separated lists and can be used to generate multiple parameter combinations. Note that tests will be skipped
|
Each of these are comma-separated lists and can be used to generate multiple parameter combinations. Note that tests will be skipped
|
||||||
if no model is specified.
|
if no model is specified.
|
||||||
|
|
||||||
|
### Database Cleanup
|
||||||
|
|
||||||
|
Tests create persistent databases and files in `~/.llama/distributions/{distro_name}/`. Use when running locally, you can use the `--clean-artifacts` flag to remove once a test finishes executing.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pytest tests/integration/ --stack-config=server:starter --clean-artifacts
|
||||||
|
```
|
||||||
|
|
||||||
|
Artifacts cleaned: `*.db`, `*.log`, and batch files in `files/` directory.
|
||||||
|
|
||||||
### Suites and Setups
|
### Suites and Setups
|
||||||
|
|
||||||
- `--suite`: single named suite that narrows which tests are collected.
|
- `--suite`: single named suite that narrows which tests are collected.
|
||||||
|
|
|
||||||
|
|
@ -138,6 +138,11 @@ def pytest_addoption(parser):
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
parser.addoption("--env", action="append", help="Set environment variables, e.g. --env KEY=value")
|
parser.addoption("--env", action="append", help="Set environment variables, e.g. --env KEY=value")
|
||||||
|
parser.addoption(
|
||||||
|
"--clean-artifacts",
|
||||||
|
action="store_true",
|
||||||
|
help="Clean up all artifacts (databases, logs, batch input/output files) after test execution",
|
||||||
|
)
|
||||||
parser.addoption(
|
parser.addoption(
|
||||||
"--text-model",
|
"--text-model",
|
||||||
help="comma-separated list of text models. Fixture name: text_model_id",
|
help="comma-separated list of text models. Fixture name: text_model_id",
|
||||||
|
|
|
||||||
|
|
@ -4,14 +4,17 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
import glob
|
||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
import shlex
|
import shlex
|
||||||
|
import shutil
|
||||||
import signal
|
import signal
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
|
from pathlib import Path
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
@ -61,7 +64,8 @@ def wait_for_server_ready(base_url: str, timeout: int = 30, process: subprocess.
|
||||||
while time.time() - start_time < timeout:
|
while time.time() - start_time < timeout:
|
||||||
if process and process.poll() is not None:
|
if process and process.poll() is not None:
|
||||||
print(f"Server process terminated with return code: {process.returncode}")
|
print(f"Server process terminated with return code: {process.returncode}")
|
||||||
print(f"Server stderr: {process.stderr.read()}")
|
if process.stderr:
|
||||||
|
print(f"Server stderr: {process.stderr.read()}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
@ -185,7 +189,51 @@ def llama_stack_client(request):
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
client = instantiate_llama_stack_client(request.session)
|
client = instantiate_llama_stack_client(request.session)
|
||||||
print(f"llama_stack_client instantiated in {time.time() - start_time:.3f}s")
|
print(f"llama_stack_client instantiated in {time.time() - start_time:.3f}s")
|
||||||
return client
|
|
||||||
|
yield client
|
||||||
|
|
||||||
|
# Cleanup artifacts after all tests complete (if --clean-artifacts flag is set)
|
||||||
|
# removes databases, logs, and uploaded batch input/output files
|
||||||
|
if request.config.getoption("--clean-artifacts"):
|
||||||
|
config = request.config.getoption("--stack-config") or os.environ.get("LLAMA_STACK_CONFIG", "")
|
||||||
|
if not config:
|
||||||
|
return
|
||||||
|
|
||||||
|
image_names = []
|
||||||
|
if config.startswith("server:"):
|
||||||
|
image_names.append(config.split(":")[1])
|
||||||
|
elif config and not config.startswith("http") and "=" not in config:
|
||||||
|
from llama_stack.core.utils.config_resolution import resolve_config_or_distro, Mode
|
||||||
|
|
||||||
|
try:
|
||||||
|
config_path = resolve_config_or_distro(config, Mode.RUN)
|
||||||
|
with open(config_path) as f:
|
||||||
|
config_data = yaml.safe_load(f)
|
||||||
|
image_names.append(config_data.get("image_name", Path(config).stem.replace("-run", "")))
|
||||||
|
except Exception:
|
||||||
|
if not config.endswith(".yaml"):
|
||||||
|
image_names.append(config)
|
||||||
|
|
||||||
|
for image_name in image_names:
|
||||||
|
dist_dir = Path.home() / ".llama" / "distributions" / image_name
|
||||||
|
if not dist_dir.exists():
|
||||||
|
continue
|
||||||
|
|
||||||
|
artifacts = list(dist_dir.glob("*.db")) + list(dist_dir.glob("*.log"))
|
||||||
|
|
||||||
|
# Also clean uploaded files (batch input/output files)
|
||||||
|
files_dir = dist_dir / "files"
|
||||||
|
if files_dir.exists():
|
||||||
|
artifacts.extend(list(files_dir.glob("*")))
|
||||||
|
|
||||||
|
if artifacts:
|
||||||
|
print(f"\nCleaning {len(artifacts)} artifact(s) for '{image_name}'")
|
||||||
|
for artifact in artifacts:
|
||||||
|
try:
|
||||||
|
os.remove(artifact)
|
||||||
|
print(f" Removed: {artifact.name}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Warning: Could not remove {artifact}: {e}")
|
||||||
|
|
||||||
|
|
||||||
def instantiate_llama_stack_client(session):
|
def instantiate_llama_stack_client(session):
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue