chore: enable pyupgrade fixes (#1806)

# What does this PR do?

The goal of this PR is code base modernization.

Schema reflection code needed a minor adjustment to handle UnionTypes
and collections.abc.AsyncIterator. (Both are preferred for latest Python
releases.)

Note to reviewers: almost all changes here are automatically generated
by pyupgrade. Some additional unused imports were cleaned up. The only
change worth of note can be found under `docs/openapi_generator` and
`llama_stack/strong_typing/schema.py` where reflection code was updated
to deal with "newer" types.

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
Ihar Hrachyshka 2025-05-01 17:23:50 -04:00 committed by GitHub
parent ffe3d0b2cd
commit 9e6561a1ec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
319 changed files with 2843 additions and 3033 deletions

View file

@ -6,7 +6,6 @@
from dataclasses import dataclass
from functools import lru_cache
from typing import List, Optional
from .sku_types import (
CheckpointQuantizationFormat,
@ -19,14 +18,14 @@ LLAMA2_VOCAB_SIZE = 32000
LLAMA3_VOCAB_SIZE = 128256
def resolve_model(descriptor: str) -> Optional[Model]:
def resolve_model(descriptor: str) -> Model | None:
for m in all_registered_models():
if descriptor in (m.descriptor(), m.huggingface_repo):
return m
return None
def all_registered_models() -> List[Model]:
def all_registered_models() -> list[Model]:
return (
llama2_family()
+ llama3_family()
@ -38,48 +37,48 @@ def all_registered_models() -> List[Model]:
)
def llama2_family() -> List[Model]:
def llama2_family() -> list[Model]:
return [
*llama2_base_models(),
*llama2_instruct_models(),
]
def llama3_family() -> List[Model]:
def llama3_family() -> list[Model]:
return [
*llama3_base_models(),
*llama3_instruct_models(),
]
def llama3_1_family() -> List[Model]:
def llama3_1_family() -> list[Model]:
return [
*llama3_1_base_models(),
*llama3_1_instruct_models(),
]
def llama3_2_family() -> List[Model]:
def llama3_2_family() -> list[Model]:
return [
*llama3_2_base_models(),
*llama3_2_instruct_models(),
]
def llama3_3_family() -> List[Model]:
def llama3_3_family() -> list[Model]:
return [
*llama3_3_instruct_models(),
]
def llama4_family() -> List[Model]:
def llama4_family() -> list[Model]:
return [
*llama4_base_models(),
*llama4_instruct_models(),
]
def llama4_base_models() -> List[Model]:
def llama4_base_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama4_scout_17b_16e,
@ -98,7 +97,7 @@ def llama4_base_models() -> List[Model]:
]
def llama4_instruct_models() -> List[Model]:
def llama4_instruct_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama4_scout_17b_16e_instruct,
@ -126,7 +125,7 @@ def llama4_instruct_models() -> List[Model]:
]
def llama2_base_models() -> List[Model]:
def llama2_base_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama2_7b,
@ -185,7 +184,7 @@ def llama2_base_models() -> List[Model]:
]
def llama3_base_models() -> List[Model]:
def llama3_base_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_8b,
@ -226,7 +225,7 @@ def llama3_base_models() -> List[Model]:
]
def llama3_1_base_models() -> List[Model]:
def llama3_1_base_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_1_8b,
@ -324,7 +323,7 @@ def llama3_1_base_models() -> List[Model]:
]
def llama3_2_base_models() -> List[Model]:
def llama3_2_base_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_2_1b,
@ -407,7 +406,7 @@ def llama3_2_base_models() -> List[Model]:
]
def llama2_instruct_models() -> List[Model]:
def llama2_instruct_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama2_7b_chat,
@ -466,7 +465,7 @@ def llama2_instruct_models() -> List[Model]:
]
def llama3_instruct_models() -> List[Model]:
def llama3_instruct_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_8b_instruct,
@ -507,7 +506,7 @@ def llama3_instruct_models() -> List[Model]:
]
def llama3_1_instruct_models() -> List[Model]:
def llama3_1_instruct_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_1_8b_instruct,
@ -635,7 +634,7 @@ def arch_args_3b() -> dict:
}
def llama3_2_quantized_models() -> List[Model]:
def llama3_2_quantized_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_2_1b_instruct,
@ -704,7 +703,7 @@ def llama3_2_quantized_models() -> List[Model]:
]
def llama3_2_instruct_models() -> List[Model]:
def llama3_2_instruct_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_2_1b_instruct,
@ -766,7 +765,7 @@ def llama3_2_instruct_models() -> List[Model]:
]
def llama3_3_instruct_models() -> List[Model]:
def llama3_3_instruct_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama3_3_70b_instruct,
@ -790,7 +789,7 @@ def llama3_3_instruct_models() -> List[Model]:
@lru_cache
def safety_models() -> List[Model]:
def safety_models() -> list[Model]:
return [
Model(
core_model_id=CoreModelId.llama_guard_4_12b,
@ -919,7 +918,7 @@ def safety_models() -> List[Model]:
@dataclass
class LlamaDownloadInfo:
folder: str
files: List[str]
files: list[str]
pth_size: int