mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 19:04:19 +00:00
Fix report generation for url endpoints (#876)
Earlier, we would have some unknown magic to identify the path for remote endpoints when testing client_sdk/tests. Removed that and now you have to explicitly pass a path
This commit is contained in:
parent
087a83f673
commit
632e60439a
3 changed files with 34 additions and 18 deletions
|
@ -6,6 +6,11 @@ To test on a Llama Stack library with certain configuration, run
|
|||
LLAMA_STACK_CONFIG=./llama_stack/templates/cerebras/run.yaml
|
||||
pytest -s -v tests/client-sdk/inference/test_inference.py
|
||||
```
|
||||
or just the template name
|
||||
```bash
|
||||
LLAMA_STACK_CONFIG=together
|
||||
pytest -s -v tests/client-sdk/inference/test_inference.py
|
||||
```
|
||||
|
||||
To test on a Llama Stack endpoint, run
|
||||
```bash
|
||||
|
@ -13,9 +18,17 @@ LLAMA_STACK_BASE_URL=http//localhost:8089
|
|||
pytest -s -v tests/client-sdk/inference/test_inference.py
|
||||
```
|
||||
|
||||
## Report Generation
|
||||
|
||||
To generate a report, run with `--report` option
|
||||
```bash
|
||||
LLAMA_STACK_CONFIG=together pytest -s -v report.md tests/client-sdk/ --report
|
||||
```
|
||||
|
||||
## Common options
|
||||
Depending on the API, there are custom options enabled
|
||||
- For tests in `inference/` and `agents/, we support `--inference-model` (to be used in text inference tests) and `--vision-inference-model` (only used in image inference tests) overrides
|
||||
- For tests in `vector_io/`, we support `--embedding-model` override
|
||||
- For tests in `safety/`, we support `--safety-shield` override
|
||||
- The param can be `--report` or `--report <path>`
|
||||
If path is not provided, we do a best effort to infer based on the config / template name. For url endpoints, path is required.
|
||||
|
|
|
@ -16,8 +16,15 @@ from report import Report
|
|||
def pytest_configure(config):
|
||||
config.option.tbstyle = "short"
|
||||
config.option.disable_warnings = True
|
||||
if config.getoption("--report"):
|
||||
config.pluginmanager.register(Report())
|
||||
# Note:
|
||||
# if report_path is not provided (aka no option --report in the pytest command),
|
||||
# it will be set to False
|
||||
# if --report will give None ( in this case we infer report_path)
|
||||
# if --report /a/b is provided, it will be set to the path provided
|
||||
# We want to handle all these cases and hence explicitly check for False
|
||||
report_path = config.getoption("--report")
|
||||
if report_path is not False:
|
||||
config.pluginmanager.register(Report(report_path))
|
||||
|
||||
|
||||
TEXT_MODEL = "meta-llama/Llama-3.1-8B-Instruct"
|
||||
|
@ -27,9 +34,11 @@ VISION_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
|
|||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--report",
|
||||
action="store",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Knob to determine if we should generate report, e.g. --output=True",
|
||||
nargs="?",
|
||||
type=str,
|
||||
help="Path where the test report should be written, e.g. --report=/path/to/report.md",
|
||||
)
|
||||
parser.addoption(
|
||||
"--inference-model",
|
||||
|
|
|
@ -9,6 +9,7 @@ import importlib
|
|||
import os
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import pytest
|
||||
|
@ -85,7 +86,7 @@ SUPPORTED_MODELS = {
|
|||
|
||||
class Report:
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, report_path: Optional[str] = None):
|
||||
if os.environ.get("LLAMA_STACK_CONFIG"):
|
||||
config_path_or_template_name = get_env_or_fail("LLAMA_STACK_CONFIG")
|
||||
if config_path_or_template_name.endswith(".yaml"):
|
||||
|
@ -107,23 +108,16 @@ class Report:
|
|||
self.image_name = self.client.async_client.config.image_name
|
||||
elif os.environ.get("LLAMA_STACK_BASE_URL"):
|
||||
url = get_env_or_fail("LLAMA_STACK_BASE_URL")
|
||||
hostname = urlparse(url).netloc
|
||||
domain = hostname.split(".")[-2]
|
||||
self.image_name = domain
|
||||
|
||||
self.image_name = urlparse(url).netloc
|
||||
if report_path is None:
|
||||
raise ValueError(
|
||||
"Report path must be provided when LLAMA_STACK_BASE_URL is set"
|
||||
)
|
||||
self.output_path = Path(report_path)
|
||||
self.client = LlamaStackClient(
|
||||
base_url=url,
|
||||
provider_data=None,
|
||||
)
|
||||
# We assume that the domain maps to a template
|
||||
# i.e. https://llamastack-preview.fireworks.ai --> "fireworks" template
|
||||
# and add report in that directory
|
||||
output_dir = Path(
|
||||
importlib.resources.files("llama_stack") / f"templates/{domain}/"
|
||||
)
|
||||
if not output_dir.exists():
|
||||
raise ValueError(f"Output dir {output_dir} does not exist")
|
||||
self.output_path = Path(output_dir / "remote-hosted-report.md")
|
||||
else:
|
||||
raise ValueError("LLAMA_STACK_CONFIG or LLAMA_STACK_BASE_URL must be set")
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue