chore: enable pyupgrade fixes (#1806)

# What does this PR do?

The goal of this PR is code base modernization.

Schema reflection code needed a minor adjustment to handle UnionTypes
and collections.abc.AsyncIterator. (Both are preferred for latest Python
releases.)

Note to reviewers: almost all changes here are automatically generated
by pyupgrade. Some additional unused imports were cleaned up. The only
change worth of note can be found under `docs/openapi_generator` and
`llama_stack/strong_typing/schema.py` where reflection code was updated
to deal with "newer" types.

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
Ihar Hrachyshka 2025-05-01 17:23:50 -04:00 committed by GitHub
parent ffe3d0b2cd
commit 9e6561a1ec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
319 changed files with 2843 additions and 3033 deletions

View file

@ -11,7 +11,8 @@
# LICENSE file in the root directory of this source tree.
import json
from typing import Any, Mapping
from collections.abc import Mapping
from typing import Any
from llama_stack.providers.utils.common.data_schema_validator import ColumnName

View file

@ -10,7 +10,8 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Mapping
from collections.abc import Mapping
from typing import Any
import numpy as np
from torch.utils.data import Dataset
@ -27,7 +28,7 @@ from llama_stack.providers.inline.post_training.torchtune.datasets.format_adapte
class SFTDataset(Dataset):
def __init__(
self,
rows: List[Dict[str, Any]],
rows: list[dict[str, Any]],
message_transform: Transform,
model_transform: Transform,
dataset_type: str,
@ -40,11 +41,11 @@ class SFTDataset(Dataset):
def __len__(self):
return len(self._rows)
def __getitem__(self, index: int) -> Dict[str, Any]:
def __getitem__(self, index: int) -> dict[str, Any]:
sample = self._rows[index]
return self._prepare_sample(sample)
def _prepare_sample(self, sample: Mapping[str, Any]) -> Dict[str, Any]:
def _prepare_sample(self, sample: Mapping[str, Any]) -> dict[str, Any]:
if self._dataset_type == "instruct":
sample = llama_stack_instruct_to_torchtune_instruct(sample)
elif self._dataset_type == "dialog":