Fix report generation for url endpoints (#876)

Earlier, we would have some unknown magic to identify the path for
remote endpoints when testing client_sdk/tests.
Removed that and now you have to explicitly pass a path
This commit is contained in:
Hardik Shah 2025-01-24 13:15:44 -08:00 committed by GitHub
parent 087a83f673
commit 632e60439a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 34 additions and 18 deletions

View file

@ -6,6 +6,11 @@ To test on a Llama Stack library with certain configuration, run
LLAMA_STACK_CONFIG=./llama_stack/templates/cerebras/run.yaml LLAMA_STACK_CONFIG=./llama_stack/templates/cerebras/run.yaml
pytest -s -v tests/client-sdk/inference/test_inference.py pytest -s -v tests/client-sdk/inference/test_inference.py
``` ```
or just the template name
```bash
LLAMA_STACK_CONFIG=together
pytest -s -v tests/client-sdk/inference/test_inference.py
```
To test on a Llama Stack endpoint, run To test on a Llama Stack endpoint, run
```bash ```bash
@ -13,9 +18,17 @@ LLAMA_STACK_BASE_URL=http//localhost:8089
pytest -s -v tests/client-sdk/inference/test_inference.py pytest -s -v tests/client-sdk/inference/test_inference.py
``` ```
## Report Generation
To generate a report, run with `--report` option
```bash
LLAMA_STACK_CONFIG=together pytest -s -v report.md tests/client-sdk/ --report
```
## Common options ## Common options
Depending on the API, there are custom options enabled Depending on the API, there are custom options enabled
- For tests in `inference/` and `agents/, we support `--inference-model` (to be used in text inference tests) and `--vision-inference-model` (only used in image inference tests) overrides - For tests in `inference/` and `agents/, we support `--inference-model` (to be used in text inference tests) and `--vision-inference-model` (only used in image inference tests) overrides
- For tests in `vector_io/`, we support `--embedding-model` override - For tests in `vector_io/`, we support `--embedding-model` override
- For tests in `safety/`, we support `--safety-shield` override - For tests in `safety/`, we support `--safety-shield` override
- The param can be `--report` or `--report <path>`
If path is not provided, we do a best effort to infer based on the config / template name. For url endpoints, path is required.

View file

@ -16,8 +16,15 @@ from report import Report
def pytest_configure(config): def pytest_configure(config):
config.option.tbstyle = "short" config.option.tbstyle = "short"
config.option.disable_warnings = True config.option.disable_warnings = True
if config.getoption("--report"): # Note:
config.pluginmanager.register(Report()) # if report_path is not provided (aka no option --report in the pytest command),
# it will be set to False
# if --report will give None ( in this case we infer report_path)
# if --report /a/b is provided, it will be set to the path provided
# We want to handle all these cases and hence explicitly check for False
report_path = config.getoption("--report")
if report_path is not False:
config.pluginmanager.register(Report(report_path))
TEXT_MODEL = "meta-llama/Llama-3.1-8B-Instruct" TEXT_MODEL = "meta-llama/Llama-3.1-8B-Instruct"
@ -27,9 +34,11 @@ VISION_MODEL = "meta-llama/Llama-3.2-11B-Vision-Instruct"
def pytest_addoption(parser): def pytest_addoption(parser):
parser.addoption( parser.addoption(
"--report", "--report",
action="store",
default=False, default=False,
action="store_true", nargs="?",
help="Knob to determine if we should generate report, e.g. --output=True", type=str,
help="Path where the test report should be written, e.g. --report=/path/to/report.md",
) )
parser.addoption( parser.addoption(
"--inference-model", "--inference-model",

View file

@ -9,6 +9,7 @@ import importlib
import os import os
from collections import defaultdict from collections import defaultdict
from pathlib import Path from pathlib import Path
from typing import Optional
from urllib.parse import urlparse from urllib.parse import urlparse
import pytest import pytest
@ -85,7 +86,7 @@ SUPPORTED_MODELS = {
class Report: class Report:
def __init__(self): def __init__(self, report_path: Optional[str] = None):
if os.environ.get("LLAMA_STACK_CONFIG"): if os.environ.get("LLAMA_STACK_CONFIG"):
config_path_or_template_name = get_env_or_fail("LLAMA_STACK_CONFIG") config_path_or_template_name = get_env_or_fail("LLAMA_STACK_CONFIG")
if config_path_or_template_name.endswith(".yaml"): if config_path_or_template_name.endswith(".yaml"):
@ -107,23 +108,16 @@ class Report:
self.image_name = self.client.async_client.config.image_name self.image_name = self.client.async_client.config.image_name
elif os.environ.get("LLAMA_STACK_BASE_URL"): elif os.environ.get("LLAMA_STACK_BASE_URL"):
url = get_env_or_fail("LLAMA_STACK_BASE_URL") url = get_env_or_fail("LLAMA_STACK_BASE_URL")
hostname = urlparse(url).netloc self.image_name = urlparse(url).netloc
domain = hostname.split(".")[-2] if report_path is None:
self.image_name = domain raise ValueError(
"Report path must be provided when LLAMA_STACK_BASE_URL is set"
)
self.output_path = Path(report_path)
self.client = LlamaStackClient( self.client = LlamaStackClient(
base_url=url, base_url=url,
provider_data=None, provider_data=None,
) )
# We assume that the domain maps to a template
# i.e. https://llamastack-preview.fireworks.ai --> "fireworks" template
# and add report in that directory
output_dir = Path(
importlib.resources.files("llama_stack") / f"templates/{domain}/"
)
if not output_dir.exists():
raise ValueError(f"Output dir {output_dir} does not exist")
self.output_path = Path(output_dir / "remote-hosted-report.md")
else: else:
raise ValueError("LLAMA_STACK_CONFIG or LLAMA_STACK_BASE_URL must be set") raise ValueError("LLAMA_STACK_CONFIG or LLAMA_STACK_BASE_URL must be set")