chore: remove nested imports (#2515)

# What does this PR do?

* Given that our API packages use "import *" in `__init.py__` we don't
need to do `from llama_stack.apis.models.models` but simply from
llama_stack.apis.models. The decision to use `import *` is debatable and
should probably be revisited at one point.

* Remove unneeded Ruff F401 rule
* Consolidate Ruff F403 rule in the pyprojectfrom
llama_stack.apis.models.models

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-06-26 04:31:05 +02:00 committed by GitHub
parent 2d9fd041eb
commit ac5fd57387
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
82 changed files with 143 additions and 164 deletions

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .agents import * # noqa: F401 F403 from .agents import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .batch_inference import * # noqa: F401 F403 from .batch_inference import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .benchmarks import * # noqa: F401 F403 from .benchmarks import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .datasetio import * # noqa: F401 F403 from .datasetio import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .datasets import * # noqa: F401 F403 from .datasets import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .eval import * # noqa: F401 F403 from .eval import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .files import * # noqa: F401 F403 from .files import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .inference import * # noqa: F401 F403 from .inference import *

View file

@ -20,7 +20,7 @@ from typing_extensions import TypedDict
from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent, InterleavedContentItem from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent, InterleavedContentItem
from llama_stack.apis.common.responses import Order from llama_stack.apis.common.responses import Order
from llama_stack.apis.models import Model from llama_stack.apis.models import Model
from llama_stack.apis.telemetry.telemetry import MetricResponseMixin from llama_stack.apis.telemetry import MetricResponseMixin
from llama_stack.models.llama.datatypes import ( from llama_stack.models.llama.datatypes import (
BuiltinTool, BuiltinTool,
StopReason, StopReason,

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .inspect import * # noqa: F401 F403 from .inspect import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .models import * # noqa: F401 F403 from .models import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .post_training import * # noqa: F401 F403 from .post_training import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .providers import * # noqa: F401 F403 from .providers import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .safety import * # noqa: F401 F403 from .safety import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .scoring import * # noqa: F401 F403 from .scoring import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .scoring_functions import * # noqa: F401 F403 from .scoring_functions import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .shields import * # noqa: F401 F403 from .shields import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .synthetic_data_generation import * # noqa: F401 F403 from .synthetic_data_generation import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .telemetry import * # noqa: F401 F403 from .telemetry import *

View file

@ -4,5 +4,5 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .rag_tool import * # noqa: F401 F403 from .rag_tool import *
from .tools import * # noqa: F401 F403 from .tools import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .vector_dbs import * # noqa: F401 F403 from .vector_dbs import *

View file

@ -4,4 +4,4 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .vector_io import * # noqa: F401 F403 from .vector_io import *

View file

@ -30,7 +30,13 @@ from llama_stack.apis.inference import (
ListOpenAIChatCompletionResponse, ListOpenAIChatCompletionResponse,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAICompletionWithInputMessages, OpenAICompletionWithInputMessages,
OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
Order, Order,
ResponseFormat, ResponseFormat,
SamplingParams, SamplingParams,
@ -41,14 +47,6 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.apis.models import Model, ModelType from llama_stack.apis.models import Model, ModelType
from llama_stack.apis.telemetry import MetricEvent, MetricInResponse, Telemetry from llama_stack.apis.telemetry import MetricEvent, MetricInResponse, Telemetry
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -16,17 +16,15 @@ from llama_stack.apis.vector_io import (
QueryChunksResponse, QueryChunksResponse,
SearchRankingOptions, SearchRankingOptions,
VectorIO, VectorIO,
VectorStoreDeleteResponse,
VectorStoreListResponse,
VectorStoreObject,
VectorStoreSearchResponsePage,
)
from llama_stack.apis.vector_io.vector_io import (
VectorStoreChunkingStrategy, VectorStoreChunkingStrategy,
VectorStoreDeleteResponse,
VectorStoreFileContentsResponse, VectorStoreFileContentsResponse,
VectorStoreFileDeleteResponse, VectorStoreFileDeleteResponse,
VectorStoreFileObject, VectorStoreFileObject,
VectorStoreFileStatus, VectorStoreFileStatus,
VectorStoreListResponse,
VectorStoreObject,
VectorStoreSearchResponsePage,
) )
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.datatypes import HealthResponse, HealthStatus, RoutingTable from llama_stack.providers.datatypes import HealthResponse, HealthStatus, RoutingTable

View file

@ -25,7 +25,7 @@ class LlamaStackApi:
def run_scoring(self, row, scoring_function_ids: list[str], scoring_params: dict | None): def run_scoring(self, row, scoring_function_ids: list[str], scoring_params: dict | None):
"""Run scoring on a single row""" """Run scoring on a single row"""
if not scoring_params: if not scoring_params:
scoring_params = {fn_id: None for fn_id in scoring_function_ids} scoring_params = dict.fromkeys(scoring_function_ids)
return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params) return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)

View file

@ -33,7 +33,7 @@ CATEGORIES = [
] ]
# Initialize category levels with default level # Initialize category levels with default level
_category_levels: dict[str, int] = {category: DEFAULT_LOG_LEVEL for category in CATEGORIES} _category_levels: dict[str, int] = dict.fromkeys(CATEGORIES, DEFAULT_LOG_LEVEL)
def config_to_category_levels(category: str, level: str): def config_to_category_levels(category: str, level: str):

View file

@ -45,7 +45,7 @@ from llama_stack.apis.agents.openai_responses import (
WebSearchToolTypes, WebSearchToolTypes,
) )
from llama_stack.apis.common.content_types import TextContentItem from llama_stack.apis.common.content_types import TextContentItem
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
Inference, Inference,
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletion, OpenAIChatCompletion,
@ -584,7 +584,7 @@ class OpenAIResponsesImpl:
from llama_stack.apis.agents.openai_responses import ( from llama_stack.apis.agents.openai_responses import (
MCPListToolsTool, MCPListToolsTool,
) )
from llama_stack.apis.tools.tools import Tool from llama_stack.apis.tools import Tool
mcp_tool_to_server = {} mcp_tool_to_server = {}

View file

@ -208,7 +208,7 @@ class MetaReferenceEvalImpl(
for scoring_fn_id in scoring_functions for scoring_fn_id in scoring_functions
} }
else: else:
scoring_functions_dict = {scoring_fn_id: None for scoring_fn_id in scoring_functions} scoring_functions_dict = dict.fromkeys(scoring_functions)
score_response = await self.scoring_api.score( score_response = await self.scoring_api.score(
input_rows=score_input_rows, scoring_functions=scoring_functions_dict input_rows=score_input_rows, scoring_functions=scoring_functions_dict

View file

@ -7,7 +7,7 @@ from typing import Any
from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasetio import DatasetIO
from llama_stack.apis.datasets import Datasets from llama_stack.apis.datasets import Datasets
from llama_stack.apis.inference.inference import Inference from llama_stack.apis.inference import Inference
from llama_stack.apis.scoring import ( from llama_stack.apis.scoring import (
ScoreBatchResponse, ScoreBatchResponse,
ScoreResponse, ScoreResponse,

View file

@ -6,7 +6,7 @@
import re import re
from typing import Any from typing import Any
from llama_stack.apis.inference.inference import Inference, UserMessage from llama_stack.apis.inference import Inference, UserMessage
from llama_stack.apis.scoring import ScoringResultRow from llama_stack.apis.scoring import ScoringResultRow
from llama_stack.apis.scoring_functions import ScoringFnParams from llama_stack.apis.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -16,8 +16,7 @@ import numpy as np
from numpy.typing import NDArray from numpy.typing import NDArray
from llama_stack.apis.files import Files from llama_stack.apis.files import Files
from llama_stack.apis.inference import InterleavedContent from llama_stack.apis.inference import Inference, InterleavedContent
from llama_stack.apis.inference.inference import Inference
from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.apis.vector_io import ( from llama_stack.apis.vector_io import (
Chunk, Chunk,

View file

@ -15,8 +15,8 @@ import numpy as np
import sqlite_vec import sqlite_vec
from numpy.typing import NDArray from numpy.typing import NDArray
from llama_stack.apis.files.files import Files from llama_stack.apis.files import Files
from llama_stack.apis.inference.inference import Inference from llama_stack.apis.inference import Inference
from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.apis.vector_io import ( from llama_stack.apis.vector_io import (
Chunk, Chunk,
@ -64,7 +64,7 @@ def _normalize_scores(scores: dict[str, float]) -> dict[str, float]:
score_range = max_score - min_score score_range = max_score - min_score
if score_range > 0: if score_range > 0:
return {doc_id: (score - min_score) / score_range for doc_id, score in scores.items()} return {doc_id: (score - min_score) / score_range for doc_id, score in scores.items()}
return {doc_id: 1.0 for doc_id in scores} return dict.fromkeys(scores, 1.0)
def _weighted_rerank( def _weighted_rerank(

View file

@ -66,7 +66,7 @@ class NvidiaDatasetIOAdapter:
Returns: Returns:
Dataset Dataset
""" """
## add warnings for unsupported params # add warnings for unsupported params
request_body = { request_body = {
"name": dataset_def.identifier, "name": dataset_def.identifier,
"namespace": self.config.dataset_namespace, "namespace": self.config.dataset_namespace,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry, ProviderModelEntry,
) )

View file

@ -24,6 +24,12 @@ from llama_stack.apis.inference import (
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
ResponseFormatType, ResponseFormatType,
SamplingParams, SamplingParams,
@ -33,14 +39,6 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.distribution.request_headers import NeedsRequestProviderData from llama_stack.distribution.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.models.llama.sku_types import CoreModelId from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry, ProviderModelEntry,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry, ProviderModelEntry,
) )

View file

@ -9,7 +9,7 @@ from typing import Any
from openai import AsyncOpenAI from openai import AsyncOpenAI
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
OpenAIChoiceDelta, OpenAIChoiceDelta,

View file

@ -29,20 +29,18 @@ from llama_stack.apis.inference import (
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse, OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
SamplingParams, SamplingParams,
TextTruncation, TextTruncation,
ToolChoice, ToolChoice,
ToolConfig, ToolConfig,
) )
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.apis.models import Model, ModelType from llama_stack.apis.models import Model, ModelType
from llama_stack.models.llama.datatypes import ToolDefinition, ToolPromptFormat from llama_stack.models.llama.datatypes import ToolDefinition, ToolPromptFormat
from llama_stack.providers.utils.inference import ( from llama_stack.providers.utils.inference import (

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.models.llama.sku_types import CoreModelId from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry, ProviderModelEntry,

View file

@ -32,15 +32,6 @@ from llama_stack.apis.inference import (
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
LogProbConfig, LogProbConfig,
Message, Message,
ResponseFormat,
SamplingParams,
TextTruncation,
ToolChoice,
ToolConfig,
ToolDefinition,
ToolPromptFormat,
)
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
OpenAICompletion, OpenAICompletion,
@ -48,6 +39,13 @@ from llama_stack.apis.inference.inference import (
OpenAIEmbeddingUsage, OpenAIEmbeddingUsage,
OpenAIMessageParam, OpenAIMessageParam,
OpenAIResponseFormatParam, OpenAIResponseFormatParam,
ResponseFormat,
SamplingParams,
TextTruncation,
ToolChoice,
ToolConfig,
ToolDefinition,
ToolPromptFormat,
) )
from llama_stack.apis.models import Model, ModelType from llama_stack.apis.models import Model, ModelType
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -6,7 +6,7 @@
from dataclasses import dataclass from dataclasses import dataclass
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry, ProviderModelEntry,
) )

View file

@ -10,7 +10,7 @@ from typing import Any
from openai import AsyncOpenAI from openai import AsyncOpenAI
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
OpenAICompletion, OpenAICompletion,

View file

@ -19,7 +19,12 @@ from llama_stack.apis.inference import (
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse, OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
SamplingParams, SamplingParams,
TextTruncation, TextTruncation,
@ -28,13 +33,6 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.apis.models import Model from llama_stack.apis.models import Model
from llama_stack.distribution.library_client import convert_pydantic_to_json_value, convert_to_pydantic from llama_stack.distribution.library_client import convert_pydantic_to_json_value, convert_to_pydantic
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper

View file

@ -8,7 +8,7 @@ from collections.abc import AsyncGenerator
from openai import OpenAI from openai import OpenAI
from llama_stack.apis.inference import * # noqa: F403 from llama_stack.apis.inference import * # noqa: F403
from llama_stack.apis.inference.inference import OpenAIEmbeddingsResponse from llama_stack.apis.inference import OpenAIEmbeddingsResponse
# from llama_stack.providers.datatypes import ModelsProtocolPrivate # from llama_stack.providers.datatypes import ModelsProtocolPrivate
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.models.llama.sku_types import CoreModelId from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
ProviderModelEntry, ProviderModelEntry,

View file

@ -23,7 +23,12 @@ from llama_stack.apis.inference import (
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse, OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
ResponseFormatType, ResponseFormatType,
SamplingParams, SamplingParams,
@ -33,13 +38,6 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.distribution.request_headers import NeedsRequestProviderData from llama_stack.distribution.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper

View file

@ -38,9 +38,13 @@ from llama_stack.apis.inference import (
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAICompletion,
OpenAIEmbeddingData, OpenAIEmbeddingData,
OpenAIEmbeddingsResponse, OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage, OpenAIEmbeddingUsage,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
SamplingParams, SamplingParams,
TextTruncation, TextTruncation,
@ -49,12 +53,6 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import (
OpenAIChatCompletion,
OpenAICompletion,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.apis.models import Model, ModelType from llama_stack.apis.models import Model, ModelType
from llama_stack.models.llama.datatypes import BuiltinTool, StopReason, ToolCall from llama_stack.models.llama.datatypes import BuiltinTool, StopReason, ToolCall
from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.models.llama.sku_list import all_registered_models

View file

@ -18,10 +18,16 @@ from llama_stack.apis.inference import (
CompletionRequest, CompletionRequest,
EmbeddingsResponse, EmbeddingsResponse,
EmbeddingTaskType, EmbeddingTaskType,
GreedySamplingStrategy,
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse, OpenAIEmbeddingsResponse,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
SamplingParams, SamplingParams,
TextTruncation, TextTruncation,
@ -29,14 +35,6 @@ from llama_stack.apis.inference import (
ToolConfig, ToolConfig,
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
)
from llama_stack.apis.inference.inference import (
GreedySamplingStrategy,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIMessageParam,
OpenAIResponseFormatParam,
TopKSamplingStrategy, TopKSamplingStrategy,
TopPSamplingStrategy, TopPSamplingStrategy,
) )

View file

@ -23,6 +23,13 @@ from llama_stack.apis.inference import (
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
LogProbConfig, LogProbConfig,
Message, Message,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
OpenAIMessageParam,
OpenAIResponseFormatParam,
ResponseFormat, ResponseFormat,
SamplingParams, SamplingParams,
TextTruncation, TextTruncation,
@ -31,16 +38,7 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import ( from llama_stack.apis.models import Model
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAICompletion,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
OpenAIMessageParam,
OpenAIResponseFormatParam,
)
from llama_stack.apis.models.models import Model
from llama_stack.distribution.request_headers import NeedsRequestProviderData from llama_stack.distribution.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper

View file

@ -8,7 +8,7 @@ from typing import Any
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.models.llama.sku_list import all_registered_models from llama_stack.models.llama.sku_list import all_registered_models
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
from llama_stack.providers.utils.inference import ( from llama_stack.providers.utils.inference import (

View file

@ -95,27 +95,25 @@ from llama_stack.apis.inference import (
CompletionResponse, CompletionResponse,
CompletionResponseStreamChunk, CompletionResponseStreamChunk,
GreedySamplingStrategy, GreedySamplingStrategy,
Message,
SamplingParams,
SystemMessage,
TokenLogProbs,
ToolChoice,
ToolResponseMessage,
TopKSamplingStrategy,
TopPSamplingStrategy,
UserMessage,
)
from llama_stack.apis.inference.inference import (
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
Message,
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAICompletion, OpenAICompletion,
OpenAICompletionChoice, OpenAICompletionChoice,
OpenAIEmbeddingData, OpenAIEmbeddingData,
OpenAIMessageParam, OpenAIMessageParam,
OpenAIResponseFormatParam, OpenAIResponseFormatParam,
SamplingParams,
SystemMessage,
TokenLogProbs,
ToolChoice,
ToolConfig, ToolConfig,
ToolResponseMessage,
TopKSamplingStrategy,
TopPSamplingStrategy,
UserMessage,
) )
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
OpenAIChoice as OpenAIChatCompletionChoice, OpenAIChoice as OpenAIChatCompletionChoice,
) )
from llama_stack.models.llama.datatypes import ( from llama_stack.models.llama.datatypes import (

View file

@ -12,8 +12,7 @@ import uuid
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Any from typing import Any
from llama_stack.apis.files import Files from llama_stack.apis.files import Files, OpenAIFileObject
from llama_stack.apis.files.files import OpenAIFileObject
from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.apis.vector_io import ( from llama_stack.apis.vector_io import (
Chunk, Chunk,

View file

@ -180,7 +180,7 @@ async def start_trace(name: str, attributes: dict[str, Any] = None) -> TraceCont
trace_id = generate_trace_id() trace_id = generate_trace_id()
context = TraceContext(BACKGROUND_LOGGER, trace_id) context = TraceContext(BACKGROUND_LOGGER, trace_id)
attributes = {marker: True for marker in ROOT_SPAN_MARKERS} | (attributes or {}) attributes = dict.fromkeys(ROOT_SPAN_MARKERS, True) | (attributes or {})
context.push_span(name, attributes) context.push_span(name, attributes)
CURRENT_TRACE_CONTEXT.set(context) CURRENT_TRACE_CONTEXT.set(context)

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.providers.inline.inference.sentence_transformers import ( from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig, SentenceTransformersInferenceConfig,

View file

@ -5,7 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.providers.inline.inference.sentence_transformers import ( from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig, SentenceTransformersInferenceConfig,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -5,7 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from llama_stack.apis.datasets import DatasetPurpose, URIDataSource from llama_stack.apis.datasets import DatasetPurpose, URIDataSource
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
BenchmarkInput, BenchmarkInput,
DatasetInput, DatasetInput,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -5,7 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -5,7 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -13,7 +13,7 @@ import yaml
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.apis.datasets import DatasetPurpose from llama_stack.apis.datasets import DatasetPurpose
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
Api, Api,
BenchmarkInput, BenchmarkInput,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ( from llama_stack.distribution.datatypes import (
ModelInput, ModelInput,
Provider, Provider,

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ModelInput, Provider from llama_stack.distribution.datatypes import ModelInput, Provider
from llama_stack.providers.inline.inference.sentence_transformers import ( from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig, SentenceTransformersInferenceConfig,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack.apis.models.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.providers.inline.inference.sentence_transformers import ( from llama_stack.providers.inline.inference.sentence_transformers import (
SentenceTransformersInferenceConfig, SentenceTransformersInferenceConfig,

View file

@ -204,6 +204,9 @@ unfixable = [
"RUF001", "RUF001",
"PLE2515", "PLE2515",
] ]
"llama_stack/apis/**/__init__.py" = [
"F403",
] # Using import * is acceptable (or at least tolerated) in an __init__.py of a package API
[tool.mypy] [tool.mypy]
mypy_path = ["llama_stack"] mypy_path = ["llama_stack"]

View file

@ -13,7 +13,7 @@ import pytest
from llama_stack.apis.common.type_system import NumberType from llama_stack.apis.common.type_system import NumberType
from llama_stack.apis.datasets.datasets import Dataset, DatasetPurpose, URIDataSource from llama_stack.apis.datasets.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack.apis.datatypes import Api from llama_stack.apis.datatypes import Api
from llama_stack.apis.models.models import Model, ModelType from llama_stack.apis.models import Model, ModelType
from llama_stack.apis.shields.shields import Shield from llama_stack.apis.shields.shields import Shield
from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup, ToolParameter from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup, ToolParameter
from llama_stack.apis.vector_dbs.vector_dbs import VectorDB from llama_stack.apis.vector_dbs.vector_dbs import VectorDB

View file

@ -8,7 +8,7 @@ import os
import yaml import yaml
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
OpenAIChatCompletion, OpenAIChatCompletion,
) )

View file

@ -29,7 +29,7 @@ from llama_stack.apis.agents.openai_responses import (
OpenAIResponseTextFormat, OpenAIResponseTextFormat,
WebSearchToolTypes, WebSearchToolTypes,
) )
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartTextParam, OpenAIChatCompletionContentPartTextParam,
OpenAIDeveloperMessageParam, OpenAIDeveloperMessageParam,

View file

@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest import pytest
from llama_stack.apis.inference.inference import CompletionMessage, UserMessage from llama_stack.apis.inference import CompletionMessage, UserMessage
from llama_stack.apis.safety import RunShieldResponse, ViolationLevel from llama_stack.apis.safety import RunShieldResponse, ViolationLevel
from llama_stack.apis.shields import Shield from llama_stack.apis.shields import Shield
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig

View file

@ -7,7 +7,7 @@
import pytest import pytest
from llama_stack.apis.common.content_types import TextContentItem from llama_stack.apis.common.content_types import TextContentItem
from llama_stack.apis.inference.inference import ( from llama_stack.apis.inference import (
CompletionMessage, CompletionMessage,
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartTextParam, OpenAIChatCompletionContentPartTextParam,

View file

@ -35,7 +35,7 @@
import pytest import pytest
from llama_stack.apis.models.models import Model from llama_stack.apis.models import Model
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry