chore: enable pyupgrade fixes (#1806)

# What does this PR do?

The goal of this PR is code base modernization.

Schema reflection code needed a minor adjustment to handle UnionTypes
and collections.abc.AsyncIterator. (Both are preferred for latest Python
releases.)

Note to reviewers: almost all changes here are automatically generated
by pyupgrade. Some additional unused imports were cleaned up. The only
change worth of note can be found under `docs/openapi_generator` and
`llama_stack/strong_typing/schema.py` where reflection code was updated
to deal with "newer" types.

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
Ihar Hrachyshka 2025-05-01 17:23:50 -04:00 committed by GitHub
parent ffe3d0b2cd
commit 9e6561a1ec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
319 changed files with 2843 additions and 3033 deletions

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
import os
from typing import Any, Dict, Optional
from typing import Any
from pydantic import BaseModel, Field
@ -15,23 +15,23 @@ from pydantic import BaseModel, Field
class NvidiaPostTrainingConfig(BaseModel):
"""Configuration for NVIDIA Post Training implementation."""
api_key: Optional[str] = Field(
api_key: str | None = Field(
default_factory=lambda: os.getenv("NVIDIA_API_KEY"),
description="The NVIDIA API key.",
)
dataset_namespace: Optional[str] = Field(
dataset_namespace: str | None = Field(
default_factory=lambda: os.getenv("NVIDIA_DATASET_NAMESPACE", "default"),
description="The NVIDIA dataset namespace.",
)
project_id: Optional[str] = Field(
project_id: str | None = Field(
default_factory=lambda: os.getenv("NVIDIA_PROJECT_ID", "test-example-model@v1"),
description="The NVIDIA project ID.",
)
# ToDO: validate this, add default value
customizer_url: Optional[str] = Field(
customizer_url: str | None = Field(
default_factory=lambda: os.getenv("NVIDIA_CUSTOMIZER_URL"),
description="Base URL for the NeMo Customizer API",
)
@ -53,7 +53,7 @@ class NvidiaPostTrainingConfig(BaseModel):
)
@classmethod
def sample_run_config(cls, **kwargs) -> Dict[str, Any]:
def sample_run_config(cls, **kwargs) -> dict[str, Any]:
return {
"api_key": "${env.NVIDIA_API_KEY:}",
"dataset_namespace": "${env.NVIDIA_DATASET_NAMESPACE:default}",
@ -71,27 +71,27 @@ class SFTLoRADefaultConfig(BaseModel):
n_epochs: int = 50
# NeMo customizer specific parameters
log_every_n_steps: Optional[int] = None
log_every_n_steps: int | None = None
val_check_interval: float = 0.25
sequence_packing_enabled: bool = False
weight_decay: float = 0.01
lr: float = 0.0001
# SFT specific parameters
hidden_dropout: Optional[float] = None
attention_dropout: Optional[float] = None
ffn_dropout: Optional[float] = None
hidden_dropout: float | None = None
attention_dropout: float | None = None
ffn_dropout: float | None = None
# LoRA default parameters
lora_adapter_dim: int = 8
lora_adapter_dropout: Optional[float] = None
lora_adapter_dropout: float | None = None
lora_alpha: int = 16
# Data config
batch_size: int = 8
@classmethod
def sample_config(cls) -> Dict[str, Any]:
def sample_config(cls) -> dict[str, Any]:
"""Return a sample configuration for NVIDIA training."""
return {
"n_epochs": 50,

View file

@ -4,7 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import List
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.utils.inference.model_registry import (
@ -24,5 +23,5 @@ _MODEL_ENTRIES = [
]
def get_model_entries() -> List[ProviderModelEntry]:
def get_model_entries() -> list[ProviderModelEntry]:
return _MODEL_ENTRIES

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
import warnings
from datetime import datetime
from typing import Any, Dict, List, Literal, Optional
from typing import Any, Literal
import aiohttp
from pydantic import BaseModel, ConfigDict
@ -50,7 +50,7 @@ class NvidiaPostTrainingJob(PostTrainingJob):
class ListNvidiaPostTrainingJobs(BaseModel):
data: List[NvidiaPostTrainingJob]
data: list[NvidiaPostTrainingJob]
class NvidiaPostTrainingJobStatusResponse(PostTrainingJobStatusResponse):
@ -83,11 +83,11 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
self,
method: str,
path: str,
headers: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
json: Optional[Dict[str, Any]] = None,
headers: dict[str, Any] | None = None,
params: dict[str, Any] | None = None,
json: dict[str, Any] | None = None,
**kwargs,
) -> Dict[str, Any]:
) -> dict[str, Any]:
"""Helper method to make HTTP requests to the Customizer API."""
url = f"{self.customizer_url}{path}"
request_headers = self.headers.copy()
@ -109,9 +109,9 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
async def get_training_jobs(
self,
page: Optional[int] = 1,
page_size: Optional[int] = 10,
sort: Optional[Literal["created_at", "-created_at"]] = "created_at",
page: int | None = 1,
page_size: int | None = 10,
sort: Literal["created_at", "-created_at"] | None = "created_at",
) -> ListNvidiaPostTrainingJobs:
"""Get all customization jobs.
Updated the base class return type from ListPostTrainingJobsResponse to ListNvidiaPostTrainingJobs.
@ -207,12 +207,12 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
async def supervised_fine_tune(
self,
job_uuid: str,
training_config: Dict[str, Any],
hyperparam_search_config: Dict[str, Any],
logger_config: Dict[str, Any],
training_config: dict[str, Any],
hyperparam_search_config: dict[str, Any],
logger_config: dict[str, Any],
model: str,
checkpoint_dir: Optional[str],
algorithm_config: Optional[AlgorithmConfig] = None,
checkpoint_dir: str | None,
algorithm_config: AlgorithmConfig | None = None,
) -> NvidiaPostTrainingJob:
"""
Fine-tunes a model on a dataset.
@ -423,8 +423,8 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper):
finetuned_model: str,
algorithm_config: DPOAlignmentConfig,
training_config: TrainingConfig,
hyperparam_search_config: Dict[str, Any],
logger_config: Dict[str, Any],
hyperparam_search_config: dict[str, Any],
logger_config: dict[str, Any],
) -> PostTrainingJob:
"""Optimize a model based on preference data."""
raise NotImplementedError("Preference optimization is not implemented yet")

View file

@ -6,7 +6,7 @@
import logging
import warnings
from typing import Any, Dict, Set, Tuple
from typing import Any
from pydantic import BaseModel
@ -18,7 +18,7 @@ from .config import NvidiaPostTrainingConfig
logger = logging.getLogger(__name__)
def warn_unsupported_params(config_dict: Any, supported_keys: Set[str], config_name: str) -> None:
def warn_unsupported_params(config_dict: Any, supported_keys: set[str], config_name: str) -> None:
keys = set(config_dict.__annotations__.keys()) if isinstance(config_dict, BaseModel) else config_dict.keys()
unsupported_params = [k for k in keys if k not in supported_keys]
if unsupported_params:
@ -28,7 +28,7 @@ def warn_unsupported_params(config_dict: Any, supported_keys: Set[str], config_n
def validate_training_params(
training_config: Dict[str, Any], supported_keys: Set[str], config_name: str = "TrainingConfig"
training_config: dict[str, Any], supported_keys: set[str], config_name: str = "TrainingConfig"
) -> None:
"""
Validates training parameters against supported keys.
@ -57,7 +57,7 @@ def validate_training_params(
# ToDo: implement post health checks for customizer are enabled
async def _get_health(url: str) -> Tuple[bool, bool]: ...
async def _get_health(url: str) -> tuple[bool, bool]: ...
async def check_health(config: NvidiaPostTrainingConfig) -> None: ...