chore: remove nested imports

* Since our API packages use import * in __init__.py, we can import
  directly from llama_stack.apis.models instead of
  llama_stack.apis.models.models.  However, the choice to use import *
  is debatable and may need to be reconsidered in the future.

* Remove the unnecessary Ruff F401 suppression.

* Consolidate the Ruff F403 rule configuration in
pyproject.toml.

Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
Sébastien Han 2025-06-25 13:07:15 +02:00
parent cfee63bd0d
commit c245cb580c
No known key found for this signature in database
82 changed files with 143 additions and 164 deletions

View file

@ -13,7 +13,7 @@ import pytest
from llama_stack.apis.common.type_system import NumberType
from llama_stack.apis.datasets.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack.apis.datatypes import Api
from llama_stack.apis.models.models import Model, ModelType
from llama_stack.apis.models import Model, ModelType
from llama_stack.apis.shields.shields import Shield
from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup, ToolParameter
from llama_stack.apis.vector_dbs.vector_dbs import VectorDB

View file

@ -8,7 +8,7 @@ import os
import yaml
from llama_stack.apis.inference.inference import (
from llama_stack.apis.inference import (
OpenAIChatCompletion,
)

View file

@ -28,7 +28,7 @@ from llama_stack.apis.agents.openai_responses import (
OpenAIResponseText,
OpenAIResponseTextFormat,
)
from llama_stack.apis.inference.inference import (
from llama_stack.apis.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIDeveloperMessageParam,

View file

@ -11,7 +11,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack.apis.inference.inference import CompletionMessage, UserMessage
from llama_stack.apis.inference import CompletionMessage, UserMessage
from llama_stack.apis.safety import RunShieldResponse, ViolationLevel
from llama_stack.apis.shields import Shield
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig

View file

@ -7,7 +7,7 @@
import pytest
from llama_stack.apis.common.content_types import TextContentItem
from llama_stack.apis.inference.inference import (
from llama_stack.apis.inference import (
CompletionMessage,
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartTextParam,

View file

@ -35,7 +35,7 @@
import pytest
from llama_stack.apis.models.models import Model
from llama_stack.apis.models import Model
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry