mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-15 01:26:10 +00:00
chore: remove nested imports
* Since our API packages use import * in __init__.py, we can import directly from llama_stack.apis.models instead of llama_stack.apis.models.models. However, the choice to use import * is debatable and may need to be reconsidered in the future. * Remove the unnecessary Ruff F401 suppression. * Consolidate the Ruff F403 rule configuration in pyproject.toml. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
cfee63bd0d
commit
c245cb580c
82 changed files with 143 additions and 164 deletions
|
@ -44,7 +44,7 @@ from llama_stack.apis.agents.openai_responses import (
|
|||
OpenAIResponseTextFormat,
|
||||
)
|
||||
from llama_stack.apis.common.content_types import TextContentItem
|
||||
from llama_stack.apis.inference.inference import (
|
||||
from llama_stack.apis.inference import (
|
||||
Inference,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
|
@ -583,7 +583,7 @@ class OpenAIResponsesImpl:
|
|||
from llama_stack.apis.agents.openai_responses import (
|
||||
MCPListToolsTool,
|
||||
)
|
||||
from llama_stack.apis.tools.tools import Tool
|
||||
from llama_stack.apis.tools import Tool
|
||||
|
||||
mcp_tool_to_server = {}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue