mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix: Update responses unit tests to reflect llama_stack_api structure changes
This commit is contained in:
parent
ff60bb31e6
commit
62ea40cb85
6 changed files with 13 additions and 13 deletions
|
|
@ -8,7 +8,7 @@ import os
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack_api.inference import (
|
||||||
OpenAIChatCompletion,
|
OpenAIChatCompletion,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,8 +15,8 @@ from openai.types.chat.chat_completion_chunk import (
|
||||||
ChoiceDeltaToolCallFunction,
|
ChoiceDeltaToolCallFunction,
|
||||||
)
|
)
|
||||||
|
|
||||||
from llama_stack.apis.agents import Order
|
from llama_stack_api.agents import Order
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack_api.openai_responses import (
|
||||||
ListOpenAIResponseInputItem,
|
ListOpenAIResponseInputItem,
|
||||||
OpenAIResponseInputMessageContentText,
|
OpenAIResponseInputMessageContentText,
|
||||||
OpenAIResponseInputToolFunction,
|
OpenAIResponseInputToolFunction,
|
||||||
|
|
@ -31,7 +31,7 @@ from llama_stack.apis.agents.openai_responses import (
|
||||||
OpenAIResponseTextFormat,
|
OpenAIResponseTextFormat,
|
||||||
WebSearchToolTypes,
|
WebSearchToolTypes,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack_api.inference import (
|
||||||
OpenAIAssistantMessageParam,
|
OpenAIAssistantMessageParam,
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
OpenAIChatCompletionContentPartTextParam,
|
||||||
OpenAIChatCompletionRequestWithExtraBody,
|
OpenAIChatCompletionRequestWithExtraBody,
|
||||||
|
|
@ -41,7 +41,7 @@ from llama_stack.apis.inference import (
|
||||||
OpenAIResponseFormatJSONSchema,
|
OpenAIResponseFormatJSONSchema,
|
||||||
OpenAIUserMessageParam,
|
OpenAIUserMessageParam,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.tools.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime
|
from llama_stack_api.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime
|
||||||
from llama_stack.core.access_control.access_control import default_policy
|
from llama_stack.core.access_control.access_control import default_policy
|
||||||
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
|
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
|
||||||
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
||||||
|
|
|
||||||
|
|
@ -7,18 +7,18 @@
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack_api.openai_responses import (
|
||||||
OpenAIResponseMessage,
|
OpenAIResponseMessage,
|
||||||
OpenAIResponseObject,
|
OpenAIResponseObject,
|
||||||
OpenAIResponseObjectStreamResponseCompleted,
|
OpenAIResponseObjectStreamResponseCompleted,
|
||||||
OpenAIResponseObjectStreamResponseOutputItemDone,
|
OpenAIResponseObjectStreamResponseOutputItemDone,
|
||||||
OpenAIResponseOutputMessageContentOutputText,
|
OpenAIResponseOutputMessageContentOutputText,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.common.errors import (
|
from llama_stack_api.common.errors import (
|
||||||
ConversationNotFoundError,
|
ConversationNotFoundError,
|
||||||
InvalidConversationIdError,
|
InvalidConversationIdError,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.conversations.conversations import (
|
from llama_stack_api.conversations import (
|
||||||
ConversationItemList,
|
ConversationItemList,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack_api.openai_responses import (
|
||||||
OpenAIResponseAnnotationFileCitation,
|
OpenAIResponseAnnotationFileCitation,
|
||||||
OpenAIResponseInputFunctionToolCallOutput,
|
OpenAIResponseInputFunctionToolCallOutput,
|
||||||
OpenAIResponseInputMessageContentImage,
|
OpenAIResponseInputMessageContentImage,
|
||||||
|
|
@ -20,7 +20,7 @@ from llama_stack.apis.agents.openai_responses import (
|
||||||
OpenAIResponseText,
|
OpenAIResponseText,
|
||||||
OpenAIResponseTextFormat,
|
OpenAIResponseTextFormat,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack_api.inference import (
|
||||||
OpenAIAssistantMessageParam,
|
OpenAIAssistantMessageParam,
|
||||||
OpenAIChatCompletionContentPartImageParam,
|
OpenAIChatCompletionContentPartImageParam,
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
OpenAIChatCompletionContentPartTextParam,
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
|
||||||
from llama_stack.apis.agents.openai_responses import (
|
from llama_stack_api.openai_responses import (
|
||||||
MCPListToolsTool,
|
MCPListToolsTool,
|
||||||
OpenAIResponseInputToolFileSearch,
|
OpenAIResponseInputToolFileSearch,
|
||||||
OpenAIResponseInputToolFunction,
|
OpenAIResponseInputToolFunction,
|
||||||
|
|
|
||||||
|
|
@ -8,8 +8,8 @@ from unittest.mock import AsyncMock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.agents.agents import ResponseGuardrailSpec
|
from llama_stack_api.agents import ResponseGuardrailSpec
|
||||||
from llama_stack.apis.safety import ModerationObject, ModerationObjectResults
|
from llama_stack_api.safety import ModerationObject, ModerationObjectResults
|
||||||
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
||||||
OpenAIResponsesImpl,
|
OpenAIResponsesImpl,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue