mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
chore: enable pyupgrade fixes (#1806)
# What does this PR do? The goal of this PR is code base modernization. Schema reflection code needed a minor adjustment to handle UnionTypes and collections.abc.AsyncIterator. (Both are preferred for latest Python releases.) Note to reviewers: almost all changes here are automatically generated by pyupgrade. Some additional unused imports were cleaned up. The only change worth of note can be found under `docs/openapi_generator` and `llama_stack/strong_typing/schema.py` where reflection code was updated to deal with "newer" types. Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
parent
ffe3d0b2cd
commit
9e6561a1ec
319 changed files with 2843 additions and 3033 deletions
|
@ -50,7 +50,7 @@ import subprocess
|
|||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import Any, DefaultDict, Dict, Set, Tuple
|
||||
from typing import Any
|
||||
|
||||
from tests.verifications.openai_api.fixtures.fixtures import _load_all_verification_configs
|
||||
|
||||
|
@ -106,7 +106,7 @@ def run_tests(provider, keyword=None):
|
|||
|
||||
# Check if the JSON file was created
|
||||
if temp_json_file.exists():
|
||||
with open(temp_json_file, "r") as f:
|
||||
with open(temp_json_file) as f:
|
||||
test_results = json.load(f)
|
||||
|
||||
test_results["run_timestamp"] = timestamp
|
||||
|
@ -141,7 +141,7 @@ def run_multiple_tests(providers_to_run: list[str], keyword: str | None):
|
|||
|
||||
def parse_results(
|
||||
result_file,
|
||||
) -> Tuple[DefaultDict[str, DefaultDict[str, Dict[str, bool]]], DefaultDict[str, Set[str]], Set[str], str]:
|
||||
) -> tuple[defaultdict[str, defaultdict[str, dict[str, bool]]], defaultdict[str, set[str]], set[str], str]:
|
||||
"""Parse a single test results file.
|
||||
|
||||
Returns:
|
||||
|
@ -156,13 +156,13 @@ def parse_results(
|
|||
# Return empty defaultdicts/set matching the type hint
|
||||
return defaultdict(lambda: defaultdict(dict)), defaultdict(set), set(), ""
|
||||
|
||||
with open(result_file, "r") as f:
|
||||
with open(result_file) as f:
|
||||
results = json.load(f)
|
||||
|
||||
# Initialize results dictionary with specific types
|
||||
parsed_results: DefaultDict[str, DefaultDict[str, Dict[str, bool]]] = defaultdict(lambda: defaultdict(dict))
|
||||
providers_in_file: DefaultDict[str, Set[str]] = defaultdict(set)
|
||||
tests_in_file: Set[str] = set()
|
||||
parsed_results: defaultdict[str, defaultdict[str, dict[str, bool]]] = defaultdict(lambda: defaultdict(dict))
|
||||
providers_in_file: defaultdict[str, set[str]] = defaultdict(set)
|
||||
tests_in_file: set[str] = set()
|
||||
# Extract provider from filename (e.g., "openai.json" -> "openai")
|
||||
provider: str = result_file.stem
|
||||
|
||||
|
@ -248,10 +248,10 @@ def parse_results(
|
|||
|
||||
|
||||
def generate_report(
|
||||
results_dict: Dict[str, Any],
|
||||
providers: Dict[str, Set[str]],
|
||||
all_tests: Set[str],
|
||||
provider_timestamps: Dict[str, str],
|
||||
results_dict: dict[str, Any],
|
||||
providers: dict[str, set[str]],
|
||||
all_tests: set[str],
|
||||
provider_timestamps: dict[str, str],
|
||||
output_file=None,
|
||||
):
|
||||
"""Generate the markdown report.
|
||||
|
@ -277,8 +277,8 @@ def generate_report(
|
|||
sorted_tests = sorted(all_tests)
|
||||
|
||||
# Calculate counts for each base test name
|
||||
base_test_case_counts: DefaultDict[str, int] = defaultdict(int)
|
||||
base_test_name_map: Dict[str, str] = {}
|
||||
base_test_case_counts: defaultdict[str, int] = defaultdict(int)
|
||||
base_test_name_map: dict[str, str] = {}
|
||||
for test_name in sorted_tests:
|
||||
match = re.match(r"^(.*?)( \([^)]+\))?$", test_name)
|
||||
if match:
|
||||
|
|
|
@ -18,7 +18,7 @@ def pytest_generate_tests(metafunc):
|
|||
|
||||
try:
|
||||
config_data = _load_all_verification_configs()
|
||||
except (FileNotFoundError, IOError) as e:
|
||||
except (OSError, FileNotFoundError) as e:
|
||||
print(f"ERROR loading verification configs: {e}")
|
||||
config_data = {"providers": {}}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ def _load_all_verification_configs():
|
|||
for config_path in yaml_files:
|
||||
provider_name = config_path.stem
|
||||
try:
|
||||
with open(config_path, "r") as f:
|
||||
with open(config_path) as f:
|
||||
provider_config = yaml.safe_load(f)
|
||||
if provider_config:
|
||||
all_provider_configs[provider_name] = provider_config
|
||||
|
@ -41,7 +41,7 @@ def _load_all_verification_configs():
|
|||
# Log warning if possible, or just skip empty files silently
|
||||
print(f"Warning: Config file {config_path} is empty or invalid.")
|
||||
except Exception as e:
|
||||
raise IOError(f"Error loading config file {config_path}: {e}") from e
|
||||
raise OSError(f"Error loading config file {config_path}: {e}") from e
|
||||
|
||||
return {"providers": all_provider_configs}
|
||||
|
||||
|
@ -49,7 +49,7 @@ def _load_all_verification_configs():
|
|||
def case_id_generator(case):
|
||||
"""Generate a test ID from the case's 'case_id' field, or use a default."""
|
||||
case_id = case.get("case_id")
|
||||
if isinstance(case_id, (str, int)):
|
||||
if isinstance(case_id, str | int):
|
||||
return re.sub(r"\\W|^(?=\\d)", "_", str(case_id))
|
||||
return None
|
||||
|
||||
|
@ -77,7 +77,7 @@ def verification_config():
|
|||
"""Pytest fixture to provide the loaded verification config."""
|
||||
try:
|
||||
return _load_all_verification_configs()
|
||||
except (FileNotFoundError, IOError) as e:
|
||||
except (OSError, FileNotFoundError) as e:
|
||||
pytest.fail(str(e)) # Fail test collection if config loading fails
|
||||
|
||||
|
||||
|
|
|
@ -12,5 +12,5 @@ import yaml
|
|||
def load_test_cases(name: str):
|
||||
fixture_dir = Path(__file__).parent / "test_cases"
|
||||
yaml_path = fixture_dir / f"{name}.yaml"
|
||||
with open(yaml_path, "r") as f:
|
||||
with open(yaml_path) as f:
|
||||
return yaml.safe_load(f)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue