fix: linter fixes

This commit is contained in:
Mike Sager 2025-11-14 14:56:24 -05:00
parent b768f73bc3
commit f573baeec0
5 changed files with 50 additions and 50 deletions

View file

@ -15,7 +15,27 @@ from openai.types.chat.chat_completion_chunk import (
ChoiceDeltaToolCallFunction, ChoiceDeltaToolCallFunction,
) )
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
OpenAIResponsesImpl,
)
from llama_stack.providers.utils.responses.responses_store import (
ResponsesStore,
_OpenAIResponseObjectWithInputAndMessages,
)
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api.agents import Order from llama_stack_api.agents import Order
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIDeveloperMessageParam,
OpenAIJSONSchema,
OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema,
OpenAIUserMessageParam,
)
from llama_stack_api.openai_responses import ( from llama_stack_api.openai_responses import (
ListOpenAIResponseInputItem, ListOpenAIResponseInputItem,
OpenAIResponseInputMessageContentText, OpenAIResponseInputMessageContentText,
@ -31,27 +51,7 @@ from llama_stack_api.openai_responses import (
OpenAIResponseTextFormat, OpenAIResponseTextFormat,
WebSearchToolTypes, WebSearchToolTypes,
) )
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIDeveloperMessageParam,
OpenAIJSONSchema,
OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema,
OpenAIUserMessageParam,
)
from llama_stack_api.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime from llama_stack_api.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
OpenAIResponsesImpl,
)
from llama_stack.providers.utils.responses.responses_store import (
ResponsesStore,
_OpenAIResponseObjectWithInputAndMessages,
)
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
from tests.unit.providers.agents.meta_reference.fixtures import load_chat_completion_fixture from tests.unit.providers.agents.meta_reference.fixtures import load_chat_completion_fixture

View file

@ -7,13 +7,6 @@
import pytest import pytest
from llama_stack_api.openai_responses import (
OpenAIResponseMessage,
OpenAIResponseObject,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseOutputItemDone,
OpenAIResponseOutputMessageContentOutputText,
)
from llama_stack_api.common.errors import ( from llama_stack_api.common.errors import (
ConversationNotFoundError, ConversationNotFoundError,
InvalidConversationIdError, InvalidConversationIdError,
@ -21,6 +14,13 @@ from llama_stack_api.common.errors import (
from llama_stack_api.conversations import ( from llama_stack_api.conversations import (
ConversationItemList, ConversationItemList,
) )
from llama_stack_api.openai_responses import (
OpenAIResponseMessage,
OpenAIResponseObject,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseOutputItemDone,
OpenAIResponseOutputMessageContentOutputText,
)
# Import existing fixtures from the main responses test file # Import existing fixtures from the main responses test file
pytest_plugins = ["tests.unit.providers.agents.meta_reference.test_openai_responses"] pytest_plugins = ["tests.unit.providers.agents.meta_reference.test_openai_responses"]

View file

@ -7,18 +7,14 @@
import pytest import pytest
from llama_stack_api.openai_responses import ( from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
OpenAIResponseAnnotationFileCitation, _extract_citations_from_text,
OpenAIResponseInputFunctionToolCallOutput, convert_chat_choice_to_response_message,
OpenAIResponseInputMessageContentImage, convert_response_content_to_chat_content,
OpenAIResponseInputMessageContentText, convert_response_input_to_chat_messages,
OpenAIResponseInputToolFunction, convert_response_text_to_chat_response_format,
OpenAIResponseInputToolWebSearch, get_message_type_by_role,
OpenAIResponseMessage, is_function_tool_call,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseText,
OpenAIResponseTextFormat,
) )
from llama_stack_api.inference import ( from llama_stack_api.inference import (
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
@ -35,14 +31,18 @@ from llama_stack_api.inference import (
OpenAIToolMessageParam, OpenAIToolMessageParam,
OpenAIUserMessageParam, OpenAIUserMessageParam,
) )
from llama_stack.providers.inline.agents.meta_reference.responses.utils import ( from llama_stack_api.openai_responses import (
_extract_citations_from_text, OpenAIResponseAnnotationFileCitation,
convert_chat_choice_to_response_message, OpenAIResponseInputFunctionToolCallOutput,
convert_response_content_to_chat_content, OpenAIResponseInputMessageContentImage,
convert_response_input_to_chat_messages, OpenAIResponseInputMessageContentText,
convert_response_text_to_chat_response_format, OpenAIResponseInputToolFunction,
get_message_type_by_role, OpenAIResponseInputToolWebSearch,
is_function_tool_call, OpenAIResponseMessage,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseText,
OpenAIResponseTextFormat,
) )

View file

@ -5,6 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.providers.inline.agents.meta_reference.responses.types import ToolContext
from llama_stack_api.openai_responses import ( from llama_stack_api.openai_responses import (
MCPListToolsTool, MCPListToolsTool,
OpenAIResponseInputToolFileSearch, OpenAIResponseInputToolFileSearch,
@ -15,7 +16,6 @@ from llama_stack_api.openai_responses import (
OpenAIResponseOutputMessageMCPListTools, OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseToolMCP, OpenAIResponseToolMCP,
) )
from llama_stack.providers.inline.agents.meta_reference.responses.types import ToolContext
class TestToolContext: class TestToolContext:

View file

@ -8,8 +8,6 @@ from unittest.mock import AsyncMock
import pytest import pytest
from llama_stack_api.agents import ResponseGuardrailSpec
from llama_stack_api.safety import ModerationObject, ModerationObjectResults
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import ( from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
OpenAIResponsesImpl, OpenAIResponsesImpl,
) )
@ -17,6 +15,8 @@ from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
extract_guardrail_ids, extract_guardrail_ids,
run_guardrails, run_guardrails,
) )
from llama_stack_api.agents import ResponseGuardrailSpec
from llama_stack_api.safety import ModerationObject, ModerationObjectResults
@pytest.fixture @pytest.fixture