feat: refactor llama-stack-api structure

move llama_stack_api.apis... to top level llama_stack_api.

merge provider datatypes and the existing apis.datatypes into a common llama_stack_api.datatypes

update all usages of these packages throughout LLS

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-12 15:59:34 -05:00
parent d6b915ce0a
commit b7480e9c88
296 changed files with 906 additions and 1109 deletions

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
from llama_stack_api.apis.conversations.conversations import (
from llama_stack_api.conversations import (
Conversation,
ConversationItem,
ConversationItemList,

View file

@ -8,7 +8,7 @@ import tempfile
from pathlib import Path
import pytest
from llama_stack_api.apis.agents.openai_responses import (
from llama_stack_api.openai_responses import (
OpenAIResponseInputMessageContentText,
OpenAIResponseMessage,
)

View file

@ -6,8 +6,8 @@
from unittest.mock import AsyncMock
from llama_stack_api.apis.safety.safety import ModerationObject, ModerationObjectResults
from llama_stack_api.apis.shields import ListShieldsResponse, Shield
from llama_stack_api.safety import ModerationObject, ModerationObjectResults
from llama_stack_api.shields import ListShieldsResponse, Shield
from llama_stack.core.datatypes import SafetyConfig
from llama_stack.core.routers.safety import SafetyRouter

View file

@ -7,7 +7,7 @@
from unittest.mock import AsyncMock, Mock
import pytest
from llama_stack_api.apis.vector_io import OpenAICreateVectorStoreRequestWithExtraBody
from llama_stack_api.vector_io import OpenAICreateVectorStoreRequestWithExtraBody
from llama_stack.core.routers.vector_io import VectorIORouter

View file

@ -9,9 +9,9 @@
from unittest.mock import AsyncMock
import pytest
from llama_stack_api.apis.models import ListModelsResponse, Model, ModelType
from llama_stack_api.apis.shields import ListShieldsResponse, Shield
from llama_stack_api.providers.datatypes import Api
from llama_stack_api.datatypes import Api
from llama_stack_api.models import ListModelsResponse, Model, ModelType
from llama_stack_api.shields import ListShieldsResponse, Shield
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config

View file

@ -9,14 +9,14 @@
from unittest.mock import AsyncMock
import pytest
from llama_stack_api.apis.common.content_types import URL
from llama_stack_api.apis.common.errors import ModelNotFoundError
from llama_stack_api.apis.common.type_system import NumberType
from llama_stack_api.apis.datasets.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.apis.datatypes import Api
from llama_stack_api.apis.models import Model, ModelType
from llama_stack_api.apis.shields.shields import Shield
from llama_stack_api.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup
from llama_stack_api.common.content_types import URL
from llama_stack_api.common.errors import ModelNotFoundError
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.datatypes import Api
from llama_stack_api.models import Model, ModelType
from llama_stack_api.shields import Shield
from llama_stack_api.tools import ListToolDefsResponse, ToolDef, ToolGroup
from llama_stack.core.datatypes import RegistryEntrySource
from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable

View file

@ -11,7 +11,7 @@ from unittest.mock import patch
import pytest
# Import the real Pydantic response types instead of using Mocks
from llama_stack_api.apis.inference import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,

View file

@ -9,7 +9,7 @@ from unittest.mock import patch
import pytest
import yaml
from llama_stack_api.providers.datatypes import ProviderSpec
from llama_stack_api.datatypes import ProviderSpec
from pydantic import BaseModel, Field, ValidationError
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
@ -312,7 +312,7 @@ pip_packages:
"""Test loading an external provider from a module (success path)."""
from types import SimpleNamespace
from llama_stack_api.providers.datatypes import Api, ProviderSpec
from llama_stack_api.datatypes import Api, ProviderSpec
# Simulate a provider module with get_provider_spec
fake_spec = ProviderSpec(
@ -395,7 +395,7 @@ pip_packages:
def test_external_provider_from_module_building(self, mock_providers):
"""Test loading an external provider from a module during build (building=True, partial spec)."""
from llama_stack_api.providers.datatypes import Api
from llama_stack_api.datatypes import Api
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
@ -457,7 +457,7 @@ class TestGetExternalProvidersFromModule:
"""Test provider with module containing version spec (e.g., package==1.0.0)."""
from types import SimpleNamespace
from llama_stack_api.providers.datatypes import ProviderSpec
from llama_stack_api.datatypes import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -595,7 +595,7 @@ class TestGetExternalProvidersFromModule:
"""Test when get_provider_spec returns a list of specs."""
from types import SimpleNamespace
from llama_stack_api.providers.datatypes import ProviderSpec
from llama_stack_api.datatypes import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -644,7 +644,7 @@ class TestGetExternalProvidersFromModule:
"""Test that list return filters specs by provider_type."""
from types import SimpleNamespace
from llama_stack_api.providers.datatypes import ProviderSpec
from llama_stack_api.datatypes import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -693,7 +693,7 @@ class TestGetExternalProvidersFromModule:
"""Test that list return adds multiple different provider_types when config requests them."""
from types import SimpleNamespace
from llama_stack_api.providers.datatypes import ProviderSpec
from llama_stack_api.datatypes import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -833,7 +833,7 @@ class TestGetExternalProvidersFromModule:
"""Test multiple APIs with providers."""
from types import SimpleNamespace
from llama_stack_api.providers.datatypes import ProviderSpec
from llama_stack_api.datatypes import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module

View file

@ -6,9 +6,9 @@
import pytest
from llama_stack_api.apis.common.errors import ResourceNotFoundError
from llama_stack_api.apis.common.responses import Order
from llama_stack_api.apis.files import OpenAIFilePurpose
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.common.responses import Order
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference

View file

@ -58,8 +58,8 @@ import json
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.apis.batches import BatchObject
from llama_stack_api.apis.common.errors import ConflictError, ResourceNotFoundError
from llama_stack_api.batches import BatchObject
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError
class TestReferenceBatchesImpl:

View file

@ -43,7 +43,7 @@ Key Behaviors Tested:
import asyncio
import pytest
from llama_stack_api.apis.common.errors import ConflictError
from llama_stack_api.common.errors import ConflictError
class TestReferenceBatchesIdempotency:

View file

@ -8,8 +8,8 @@ from unittest.mock import patch
import pytest
from botocore.exceptions import ClientError
from llama_stack_api.apis.common.errors import ResourceNotFoundError
from llama_stack_api.apis.files import OpenAIFilePurpose
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.files import OpenAIFilePurpose
class TestS3FilesImpl:
@ -227,7 +227,7 @@ class TestS3FilesImpl:
mock_now.return_value = 0
from llama_stack_api.apis.files import ExpiresAfter
from llama_stack_api.files import ExpiresAfter
sample_text_file.filename = "test_expired_file"
uploaded = await s3_provider.openai_upload_file(
@ -259,7 +259,7 @@ class TestS3FilesImpl:
async def test_unsupported_expires_after_anchor(self, s3_provider, sample_text_file):
"""Unsupported anchor value should raise ValueError."""
from llama_stack_api.apis.files import ExpiresAfter
from llama_stack_api.files import ExpiresAfter
sample_text_file.filename = "test_unsupported_expires_after_anchor"
@ -272,7 +272,7 @@ class TestS3FilesImpl:
async def test_nonint_expires_after_seconds(self, s3_provider, sample_text_file):
"""Non-integer seconds in expires_after should raise ValueError."""
from llama_stack_api.apis.files import ExpiresAfter
from llama_stack_api.files import ExpiresAfter
sample_text_file.filename = "test_nonint_expires_after_seconds"
@ -285,7 +285,7 @@ class TestS3FilesImpl:
async def test_expires_after_seconds_out_of_bounds(self, s3_provider, sample_text_file):
"""Seconds outside allowed range should raise ValueError."""
from llama_stack_api.apis.files import ExpiresAfter
from llama_stack_api.files import ExpiresAfter
with pytest.raises(ValueError, match="greater than or equal to 3600"):
await s3_provider.openai_upload_file(

View file

@ -7,8 +7,8 @@
from unittest.mock import patch
import pytest
from llama_stack_api.apis.common.errors import ResourceNotFoundError
from llama_stack_api.apis.files import OpenAIFilePurpose
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack.core.datatypes import User
from llama_stack.providers.remote.files.s3.files import S3FilesImpl

View file

@ -8,7 +8,7 @@ from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.apis.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack_api.inference import OpenAIChatCompletionRequestWithExtraBody
from openai import AuthenticationError
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter

View file

@ -9,7 +9,8 @@ import time
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
from llama_stack_api.apis.inference import (
from llama_stack_api.datatypes import HealthStatus
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionRequestWithExtraBody,
@ -19,8 +20,7 @@ from llama_stack_api.apis.inference import (
OpenAICompletionRequestWithExtraBody,
ToolChoice,
)
from llama_stack_api.apis.models import Model
from llama_stack_api.providers.datatypes import HealthStatus
from llama_stack_api.models import Model
from llama_stack.core.routers.inference import InferenceRouter
from llama_stack.core.routing_tables.models import ModelsRoutingTable

View file

@ -7,7 +7,7 @@
from unittest.mock import AsyncMock
import pytest
from llama_stack_api.apis.tools import ToolDef
from llama_stack_api.tools import ToolDef
from llama_stack.providers.inline.agents.meta_reference.responses.streaming import (
convert_tooldef_to_chat_tool,

View file

@ -8,8 +8,8 @@ import os
from unittest.mock import patch
import pytest
from llama_stack_api.apis.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.apis.resource import ResourceType
from llama_stack_api.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.resource import ResourceType
from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig
from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter

View file

@ -8,11 +8,11 @@ import os
from unittest.mock import MagicMock, patch
import pytest
from llama_stack_api.apis.benchmarks import Benchmark
from llama_stack_api.apis.common.job_types import Job, JobStatus
from llama_stack_api.apis.eval.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
from llama_stack_api.apis.inference.inference import TopPSamplingStrategy
from llama_stack_api.apis.resource import ResourceType
from llama_stack_api.benchmarks import Benchmark
from llama_stack_api.common.job_types import Job, JobStatus
from llama_stack_api.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
from llama_stack_api.inference import TopPSamplingStrategy
from llama_stack_api.resource import ResourceType
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.apis.post_training.post_training import (
from llama_stack_api.post_training import (
DataConfig,
DatasetFormat,
EfficiencyConfig,

View file

@ -8,7 +8,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import aiohttp
import pytest
from llama_stack_api.apis.models import ModelType
from llama_stack_api.models import ModelType
from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter

View file

@ -9,13 +9,13 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api.apis.inference import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIUserMessageParam,
)
from llama_stack_api.apis.resource import ResourceType
from llama_stack_api.apis.safety import RunShieldResponse, ViolationLevel
from llama_stack_api.apis.shields import Shield
from llama_stack_api.resource import ResourceType
from llama_stack_api.safety import RunShieldResponse, ViolationLevel
from llama_stack_api.shields import Shield
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.apis.post_training.post_training import (
from llama_stack_api.post_training import (
DataConfig,
DatasetFormat,
LoraFinetuningConfig,

View file

@ -7,7 +7,7 @@
from types import SimpleNamespace
from unittest.mock import AsyncMock, PropertyMock, patch
from llama_stack_api.apis.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack_api.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig

View file

@ -10,8 +10,8 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
import pytest
from llama_stack_api.apis.inference import Model, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
from llama_stack_api.apis.models import ModelType
from llama_stack_api.inference import Model, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
from llama_stack_api.models import ModelType
from pydantic import BaseModel, Field
from llama_stack.core.request_headers import request_provider_data_context

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.apis.inference import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIUserMessageParam,
)

View file

@ -7,8 +7,8 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api.apis.common.content_types import URL, TextContentItem
from llama_stack_api.apis.tools import RAGDocument
from llama_stack_api.common.content_types import URL, TextContentItem
from llama_stack_api.rag_tool import RAGDocument
from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, content_from_doc

View file

@ -34,7 +34,7 @@
#
import pytest
from llama_stack_api.apis.models import Model
from llama_stack_api.models import Model
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry

View file

@ -9,8 +9,8 @@ from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import pytest
from llama_stack_api.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
from llama_stack_api.apis.vector_stores import VectorStore
from llama_stack_api.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
from llama_stack_api.vector_stores import VectorStore
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig

View file

@ -9,10 +9,10 @@ from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from llama_stack_api.apis.files import Files
from llama_stack_api.apis.vector_io import Chunk, QueryChunksResponse
from llama_stack_api.apis.vector_stores import VectorStore
from llama_stack_api.providers.datatypes import HealthStatus
from llama_stack_api.datatypes import HealthStatus
from llama_stack_api.files import Files
from llama_stack_api.vector_io import Chunk, QueryChunksResponse
from llama_stack_api.vector_stores import VectorStore
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import (

View file

@ -8,7 +8,7 @@ import asyncio
import numpy as np
import pytest
from llama_stack_api.apis.vector_io import Chunk, QueryChunksResponse
from llama_stack_api.vector_io import Chunk, QueryChunksResponse
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import (
SQLiteVecIndex,

View file

@ -10,8 +10,8 @@ from unittest.mock import AsyncMock, patch
import numpy as np
import pytest
from llama_stack_api.apis.common.errors import VectorStoreNotFoundError
from llama_stack_api.apis.vector_io import (
from llama_stack_api.common.errors import VectorStoreNotFoundError
from llama_stack_api.vector_io import (
Chunk,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody,
@ -19,7 +19,7 @@ from llama_stack_api.apis.vector_io import (
VectorStoreChunkingStrategyAuto,
VectorStoreFileObject,
)
from llama_stack_api.apis.vector_stores import VectorStore
from llama_stack_api.vector_stores import VectorStore
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
@ -222,7 +222,7 @@ async def test_insert_chunks_missing_db_raises(vector_io_adapter):
async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
"""Ensure no KeyError when document_id is missing or in different places."""
from llama_stack_api.apis.vector_io import Chunk, ChunkMetadata
from llama_stack_api.vector_io import Chunk, ChunkMetadata
fake_index = AsyncMock()
vector_io_adapter.cache["db1"] = fake_index
@ -255,7 +255,7 @@ async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
async def test_document_id_with_invalid_type_raises_error():
"""Ensure TypeError is raised when document_id is not a string."""
from llama_stack_api.apis.vector_io import Chunk
from llama_stack_api.vector_io import Chunk
# Integer document_id should raise TypeError
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.apis.vector_io import Chunk, ChunkMetadata
from llama_stack_api.vector_io import Chunk, ChunkMetadata
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id

View file

@ -7,8 +7,8 @@
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.apis.tools.rag_tool import RAGQueryConfig
from llama_stack_api.apis.vector_io import (
from llama_stack_api.rag_tool import RAGQueryConfig
from llama_stack_api.vector_io import (
Chunk,
ChunkMetadata,
QueryChunksResponse,

View file

@ -12,12 +12,12 @@ from unittest.mock import AsyncMock, MagicMock
import numpy as np
import pytest
from llama_stack_api.apis.inference.inference import (
from llama_stack_api.inference import (
OpenAIEmbeddingData,
OpenAIEmbeddingsRequestWithExtraBody,
)
from llama_stack_api.apis.tools import RAGDocument
from llama_stack_api.apis.vector_io import Chunk
from llama_stack_api.rag_tool import RAGDocument
from llama_stack_api.vector_io import Chunk
from llama_stack.providers.utils.memory.vector_store import (
URL,

View file

@ -6,8 +6,8 @@
import pytest
from llama_stack_api.apis.inference import Model
from llama_stack_api.apis.vector_stores import VectorStore
from llama_stack_api.inference import Model
from llama_stack_api.vector_stores import VectorStore
from llama_stack.core.datatypes import VectorStoreWithOwner
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
@ -304,7 +304,7 @@ async def test_double_registration_different_objects(disk_dist_registry):
async def test_double_registration_with_cache(cached_disk_dist_registry):
"""Test double registration behavior with caching enabled."""
from llama_stack_api.apis.models import ModelType
from llama_stack_api.models import ModelType
from llama_stack.core.datatypes import ModelWithOwner

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
from llama_stack_api.apis.models import ModelType
from llama_stack_api.models import ModelType
from llama_stack.core.datatypes import ModelWithOwner, User
from llama_stack.core.store.registry import CachedDiskDistributionRegistry

View file

@ -8,8 +8,8 @@ from unittest.mock import MagicMock, Mock, patch
import pytest
import yaml
from llama_stack_api.apis.datatypes import Api
from llama_stack_api.apis.models import ModelType
from llama_stack_api.datatypes import Api
from llama_stack_api.models import ModelType
from pydantic import TypeAdapter, ValidationError
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed

View file

@ -9,8 +9,8 @@ import sys
from typing import Any, Protocol
from unittest.mock import AsyncMock, MagicMock
from llama_stack_api.apis.inference import Inference
from llama_stack_api.providers.datatypes import InlineProviderSpec, ProviderSpec
from llama_stack_api.datatypes import InlineProviderSpec, ProviderSpec
from llama_stack_api.inference import Inference
from pydantic import BaseModel, Field
from llama_stack.core.datatypes import Api, Provider, StackRunConfig

View file

@ -9,7 +9,7 @@ import logging # allow-direct-logging
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.apis.common.responses import PaginatedResponse
from llama_stack_api.common.responses import PaginatedResponse
from llama_stack.core.server.server import create_dynamic_typed_route, create_sse_event, sse_generator

View file

@ -9,7 +9,7 @@ Unit tests for JSON Schema-based tool definitions.
Tests the new input_schema and output_schema fields.
"""
from llama_stack_api.apis.tools import ToolDef
from llama_stack_api.tools import ToolDef
from pydantic import ValidationError
from llama_stack.models.llama.datatypes import BuiltinTool, ToolDefinition

View file

@ -7,7 +7,7 @@
import time
import pytest
from llama_stack_api.apis.inference import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,

View file

@ -9,12 +9,12 @@ from tempfile import TemporaryDirectory
from uuid import uuid4
import pytest
from llama_stack_api.apis.agents import Order
from llama_stack_api.apis.agents.openai_responses import (
from llama_stack_api.agents import Order
from llama_stack_api.inference import OpenAIMessageParam, OpenAIUserMessageParam
from llama_stack_api.openai_responses import (
OpenAIResponseInput,
OpenAIResponseObject,
)
from llama_stack_api.apis.inference import OpenAIMessageParam, OpenAIUserMessageParam
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
@ -46,7 +46,7 @@ def create_test_response_object(
def create_test_response_input(content: str, input_id: str) -> OpenAIResponseInput:
"""Helper to create a test response input."""
from llama_stack_api.apis.agents.openai_responses import OpenAIResponseMessage
from llama_stack_api.openai_responses import OpenAIResponseMessage
return OpenAIResponseMessage(
id=input_id,