refactor: enforce top-level imports for llama-stack-api

Enforce that all imports from llama-stack-api use the form:

from llama_stack_api import <symbol>

 This prevents external code from accessing internal package structure
 (e.g., llama_stack_api.agents, llama_stack_api.common.*) and establishes
 a clear public API boundary.

 Changes:
 - Export 400+ symbols from llama_stack_api/__init__.py
 - Include all API types, common utilities, and strong_typing helpers
 - Update files across src/llama_stack, docs/, tests/, scripts/
 - Convert all submodule imports to top-level imports
 - ensure docs use the proper importing structure

 Addresses PR review feedback requiring explicit __all__ definition to
 prevent "peeking inside" the API package.

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-13 14:03:30 -05:00
parent b7480e9c88
commit 2e5d1c8881
270 changed files with 1587 additions and 750 deletions

View file

@ -6,9 +6,7 @@
from typing import Protocol
from llama_stack_api.datatypes import Api, ProviderSpec, RemoteProviderSpec
from llama_stack_api.schema_utils import webmethod
from llama_stack_api.version import LLAMA_STACK_API_V1
from llama_stack_api import LLAMA_STACK_API_V1, Api, ProviderSpec, RemoteProviderSpec, webmethod
def available_providers() -> list[ProviderSpec]:

View file

@ -13,7 +13,7 @@ from contextlib import contextmanager
from io import BytesIO
import pytest
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack_api import OpenAIFilePurpose
class BatchHelper:

View file

@ -9,7 +9,7 @@ from unittest.mock import patch
import pytest
import requests
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack_api import OpenAIFilePurpose
from llama_stack.core.datatypes import User

View file

@ -15,8 +15,8 @@ that enables routing based on provider_data alone.
from unittest.mock import AsyncMock, patch
import pytest
from llama_stack_api.datatypes import Api
from llama_stack_api.inference import (
from llama_stack_api import (
Api,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionUsage,

View file

@ -9,7 +9,7 @@ import time
import uuid
import pytest
from llama_stack_api.post_training import (
from llama_stack_api import (
DataConfig,
DatasetFormat,
DPOAlignmentConfig,

View file

@ -12,7 +12,7 @@ import warnings
from collections.abc import Generator
import pytest
from llama_stack_api.safety import ViolationLevel
from llama_stack_api import ViolationLevel
from llama_stack.models.llama.sku_types import CoreModelId

View file

@ -7,7 +7,7 @@ import base64
import mimetypes
import pytest
from llama_stack_api.safety import ViolationLevel
from llama_stack_api import ViolationLevel
CODE_SCANNER_ENABLED_PROVIDERS = {"ollama", "together", "fireworks"}

View file

@ -9,7 +9,7 @@ import mimetypes
import os
import pytest
from llama_stack_api.safety import ViolationLevel
from llama_stack_api import ViolationLevel
VISION_SHIELD_ENABLED_PROVIDERS = {"together"}

View file

@ -7,7 +7,7 @@
import re
import pytest
from llama_stack_api.common.errors import ToolGroupNotFoundError
from llama_stack_api import ToolGroupNotFoundError
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server

View file

@ -8,8 +8,7 @@ import time
from io import BytesIO
import pytest
from llama_stack_api.files import ExpiresAfter
from llama_stack_api.vector_io import Chunk
from llama_stack_api import Chunk, ExpiresAfter
from llama_stack_client import BadRequestError
from openai import BadRequestError as OpenAIBadRequestError
@ -646,7 +645,7 @@ def test_openai_vector_store_attach_file(
):
"""Test OpenAI vector store attach file."""
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
compat_client = compat_client_with_empty_stores
@ -710,7 +709,7 @@ def test_openai_vector_store_attach_files_on_creation(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
# Create some files and attach them to the vector store
valid_file_ids = []
@ -775,7 +774,7 @@ def test_openai_vector_store_list_files(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@ -867,7 +866,7 @@ def test_openai_vector_store_retrieve_file_contents(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@ -928,7 +927,7 @@ def test_openai_vector_store_delete_file(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@ -994,7 +993,7 @@ def test_openai_vector_store_delete_file_removes_from_vector_store(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@ -1046,7 +1045,7 @@ def test_openai_vector_store_update_file(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@ -1103,7 +1102,7 @@ def test_create_vector_store_files_duplicate_vector_store_name(
This test confirms that client.vector_stores.create() creates a unique ID
"""
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
compat_client = compat_client_with_empty_stores

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
import pytest
from llama_stack_api.vector_io import Chunk
from llama_stack_api import Chunk
from ..conftest import vector_provider_wrapper

View file

@ -5,11 +5,7 @@
# the root directory of this source tree.
from llama_stack_api.conversations import (
Conversation,
ConversationItem,
ConversationItemList,
)
from llama_stack_api import Conversation, ConversationItem, ConversationItemList
def test_conversation_model_defaults():

View file

@ -8,10 +8,7 @@ import tempfile
from pathlib import Path
import pytest
from llama_stack_api.openai_responses import (
OpenAIResponseInputMessageContentText,
OpenAIResponseMessage,
)
from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage
from openai.types.conversations.conversation import Conversation as OpenAIConversation
from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
from pydantic import TypeAdapter

View file

@ -6,8 +6,7 @@
from unittest.mock import AsyncMock
from llama_stack_api.safety import ModerationObject, ModerationObjectResults
from llama_stack_api.shields import ListShieldsResponse, Shield
from llama_stack_api import ListShieldsResponse, ModerationObject, ModerationObjectResults, Shield
from llama_stack.core.datatypes import SafetyConfig
from llama_stack.core.routers.safety import SafetyRouter

View file

@ -7,7 +7,7 @@
from unittest.mock import AsyncMock, Mock
import pytest
from llama_stack_api.vector_io import OpenAICreateVectorStoreRequestWithExtraBody
from llama_stack_api import OpenAICreateVectorStoreRequestWithExtraBody
from llama_stack.core.routers.vector_io import VectorIORouter

View file

@ -9,9 +9,7 @@
from unittest.mock import AsyncMock
import pytest
from llama_stack_api.datatypes import Api
from llama_stack_api.models import ListModelsResponse, Model, ModelType
from llama_stack_api.shields import ListShieldsResponse, Shield
from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config

View file

@ -9,14 +9,21 @@
from unittest.mock import AsyncMock
import pytest
from llama_stack_api.common.content_types import URL
from llama_stack_api.common.errors import ModelNotFoundError
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.datatypes import Api
from llama_stack_api.models import Model, ModelType
from llama_stack_api.shields import Shield
from llama_stack_api.tools import ListToolDefsResponse, ToolDef, ToolGroup
from llama_stack_api import (
URL,
Api,
Dataset,
DatasetPurpose,
ListToolDefsResponse,
Model,
ModelNotFoundError,
ModelType,
NumberType,
Shield,
ToolDef,
ToolGroup,
URIDataSource,
)
from llama_stack.core.datatypes import RegistryEntrySource
from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable

View file

@ -11,7 +11,7 @@ from unittest.mock import patch
import pytest
# Import the real Pydantic response types instead of using Mocks
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,

View file

@ -9,7 +9,7 @@ from unittest.mock import patch
import pytest
import yaml
from llama_stack_api.datatypes import ProviderSpec
from llama_stack_api import ProviderSpec
from pydantic import BaseModel, Field, ValidationError
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
@ -312,7 +312,7 @@ pip_packages:
"""Test loading an external provider from a module (success path)."""
from types import SimpleNamespace
from llama_stack_api.datatypes import Api, ProviderSpec
from llama_stack_api import Api, ProviderSpec
# Simulate a provider module with get_provider_spec
fake_spec = ProviderSpec(
@ -395,7 +395,7 @@ pip_packages:
def test_external_provider_from_module_building(self, mock_providers):
"""Test loading an external provider from a module during build (building=True, partial spec)."""
from llama_stack_api.datatypes import Api
from llama_stack_api import Api
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
@ -457,7 +457,7 @@ class TestGetExternalProvidersFromModule:
"""Test provider with module containing version spec (e.g., package==1.0.0)."""
from types import SimpleNamespace
from llama_stack_api.datatypes import ProviderSpec
from llama_stack_api import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -595,7 +595,7 @@ class TestGetExternalProvidersFromModule:
"""Test when get_provider_spec returns a list of specs."""
from types import SimpleNamespace
from llama_stack_api.datatypes import ProviderSpec
from llama_stack_api import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -644,7 +644,7 @@ class TestGetExternalProvidersFromModule:
"""Test that list return filters specs by provider_type."""
from types import SimpleNamespace
from llama_stack_api.datatypes import ProviderSpec
from llama_stack_api import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -693,7 +693,7 @@ class TestGetExternalProvidersFromModule:
"""Test that list return adds multiple different provider_types when config requests them."""
from types import SimpleNamespace
from llama_stack_api.datatypes import ProviderSpec
from llama_stack_api import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module
@ -833,7 +833,7 @@ class TestGetExternalProvidersFromModule:
"""Test multiple APIs with providers."""
from types import SimpleNamespace
from llama_stack_api.datatypes import ProviderSpec
from llama_stack_api import ProviderSpec
from llama_stack.core.distribution import get_external_providers_from_module

View file

@ -6,9 +6,7 @@
import pytest
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.common.responses import Order
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference

View file

@ -58,8 +58,7 @@ import json
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.batches import BatchObject
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError
from llama_stack_api import BatchObject, ConflictError, ResourceNotFoundError
class TestReferenceBatchesImpl:

View file

@ -43,7 +43,7 @@ Key Behaviors Tested:
import asyncio
import pytest
from llama_stack_api.common.errors import ConflictError
from llama_stack_api import ConflictError
class TestReferenceBatchesIdempotency:

View file

@ -8,8 +8,7 @@ from unittest.mock import patch
import pytest
from botocore.exceptions import ClientError
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError
class TestS3FilesImpl:
@ -227,7 +226,7 @@ class TestS3FilesImpl:
mock_now.return_value = 0
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
sample_text_file.filename = "test_expired_file"
uploaded = await s3_provider.openai_upload_file(
@ -259,7 +258,7 @@ class TestS3FilesImpl:
async def test_unsupported_expires_after_anchor(self, s3_provider, sample_text_file):
"""Unsupported anchor value should raise ValueError."""
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
sample_text_file.filename = "test_unsupported_expires_after_anchor"
@ -272,7 +271,7 @@ class TestS3FilesImpl:
async def test_nonint_expires_after_seconds(self, s3_provider, sample_text_file):
"""Non-integer seconds in expires_after should raise ValueError."""
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
sample_text_file.filename = "test_nonint_expires_after_seconds"
@ -285,7 +284,7 @@ class TestS3FilesImpl:
async def test_expires_after_seconds_out_of_bounds(self, s3_provider, sample_text_file):
"""Seconds outside allowed range should raise ValueError."""
from llama_stack_api.files import ExpiresAfter
from llama_stack_api import ExpiresAfter
with pytest.raises(ValueError, match="greater than or equal to 3600"):
await s3_provider.openai_upload_file(

View file

@ -7,8 +7,7 @@
from unittest.mock import patch
import pytest
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.files import OpenAIFilePurpose
from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError
from llama_stack.core.datatypes import User
from llama_stack.providers.remote.files.s3.files import S3FilesImpl

View file

@ -8,7 +8,7 @@ from types import SimpleNamespace
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody
from openai import AuthenticationError
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter

View file

@ -9,8 +9,9 @@ import time
from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
from llama_stack_api.datatypes import HealthStatus
from llama_stack_api.inference import (
from llama_stack_api import (
HealthStatus,
Model,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionRequestWithExtraBody,
@ -20,7 +21,6 @@ from llama_stack_api.inference import (
OpenAICompletionRequestWithExtraBody,
ToolChoice,
)
from llama_stack_api.models import Model
from llama_stack.core.routers.inference import InferenceRouter
from llama_stack.core.routing_tables.models import ModelsRoutingTable

View file

@ -7,7 +7,7 @@
from unittest.mock import AsyncMock
import pytest
from llama_stack_api.tools import ToolDef
from llama_stack_api import ToolDef
from llama_stack.providers.inline.agents.meta_reference.responses.streaming import (
convert_tooldef_to_chat_tool,

View file

@ -8,8 +8,7 @@ import os
from unittest.mock import patch
import pytest
from llama_stack_api.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.resource import ResourceType
from llama_stack_api import Dataset, DatasetPurpose, ResourceType, URIDataSource
from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig
from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter

View file

@ -8,11 +8,17 @@ import os
from unittest.mock import MagicMock, patch
import pytest
from llama_stack_api.benchmarks import Benchmark
from llama_stack_api.common.job_types import Job, JobStatus
from llama_stack_api.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
from llama_stack_api.inference import TopPSamplingStrategy
from llama_stack_api.resource import ResourceType
from llama_stack_api import (
Benchmark,
BenchmarkConfig,
EvaluateResponse,
Job,
JobStatus,
ModelCandidate,
ResourceType,
SamplingParams,
TopPSamplingStrategy,
)
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.post_training import (
from llama_stack_api import (
DataConfig,
DatasetFormat,
EfficiencyConfig,

View file

@ -8,7 +8,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import aiohttp
import pytest
from llama_stack_api.models import ModelType
from llama_stack_api import ModelType
from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter

View file

@ -9,13 +9,14 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIUserMessageParam,
ResourceType,
RunShieldResponse,
Shield,
ViolationLevel,
)
from llama_stack_api.resource import ResourceType
from llama_stack_api.safety import RunShieldResponse, ViolationLevel
from llama_stack_api.shields import Shield
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.post_training import (
from llama_stack_api import (
DataConfig,
DatasetFormat,
LoraFinetuningConfig,

View file

@ -7,7 +7,7 @@
from types import SimpleNamespace
from unittest.mock import AsyncMock, PropertyMock, patch
from llama_stack_api.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig

View file

@ -10,8 +10,7 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
import pytest
from llama_stack_api.inference import Model, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
from llama_stack_api.models import ModelType
from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
from pydantic import BaseModel, Field
from llama_stack.core.request_headers import request_provider_data_context

View file

@ -4,10 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIUserMessageParam,
)
from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam
from llama_stack.models.llama.datatypes import RawTextItem
from llama_stack.providers.utils.inference.prompt_adapter import (

View file

@ -7,8 +7,7 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api.common.content_types import URL, TextContentItem
from llama_stack_api.rag_tool import RAGDocument
from llama_stack_api import URL, RAGDocument, TextContentItem
from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, content_from_doc

View file

@ -34,7 +34,7 @@
#
import pytest
from llama_stack_api.models import Model
from llama_stack_api import Model
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry

View file

@ -9,8 +9,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import pytest
from llama_stack_api.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
from llama_stack_api.vector_stores import VectorStore
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig

View file

@ -9,10 +9,7 @@ from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from llama_stack_api.datatypes import HealthStatus
from llama_stack_api.files import Files
from llama_stack_api.vector_io import Chunk, QueryChunksResponse
from llama_stack_api.vector_stores import VectorStore
from llama_stack_api import Chunk, Files, HealthStatus, QueryChunksResponse, VectorStore
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import (

View file

@ -8,7 +8,7 @@ import asyncio
import numpy as np
import pytest
from llama_stack_api.vector_io import Chunk, QueryChunksResponse
from llama_stack_api import Chunk, QueryChunksResponse
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import (
SQLiteVecIndex,

View file

@ -10,16 +10,16 @@ from unittest.mock import AsyncMock, patch
import numpy as np
import pytest
from llama_stack_api.common.errors import VectorStoreNotFoundError
from llama_stack_api.vector_io import (
from llama_stack_api import (
Chunk,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody,
QueryChunksResponse,
VectorStore,
VectorStoreChunkingStrategyAuto,
VectorStoreFileObject,
VectorStoreNotFoundError,
)
from llama_stack_api.vector_stores import VectorStore
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
@ -222,7 +222,7 @@ async def test_insert_chunks_missing_db_raises(vector_io_adapter):
async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
"""Ensure no KeyError when document_id is missing or in different places."""
from llama_stack_api.vector_io import Chunk, ChunkMetadata
from llama_stack_api import Chunk, ChunkMetadata
fake_index = AsyncMock()
vector_io_adapter.cache["db1"] = fake_index
@ -255,7 +255,7 @@ async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
async def test_document_id_with_invalid_type_raises_error():
"""Ensure TypeError is raised when document_id is not a string."""
from llama_stack_api.vector_io import Chunk
from llama_stack_api import Chunk
# Integer document_id should raise TypeError
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.vector_io import Chunk, ChunkMetadata
from llama_stack_api import Chunk, ChunkMetadata
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id

View file

@ -7,12 +7,7 @@
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.rag_tool import RAGQueryConfig
from llama_stack_api.vector_io import (
Chunk,
ChunkMetadata,
QueryChunksResponse,
)
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, RAGQueryConfig
from llama_stack.providers.inline.tool_runtime.rag.memory import MemoryToolRuntimeImpl

View file

@ -12,12 +12,7 @@ from unittest.mock import AsyncMock, MagicMock
import numpy as np
import pytest
from llama_stack_api.inference import (
OpenAIEmbeddingData,
OpenAIEmbeddingsRequestWithExtraBody,
)
from llama_stack_api.rag_tool import RAGDocument
from llama_stack_api.vector_io import Chunk
from llama_stack_api import Chunk, OpenAIEmbeddingData, OpenAIEmbeddingsRequestWithExtraBody, RAGDocument
from llama_stack.providers.utils.memory.vector_store import (
URL,

View file

@ -6,8 +6,7 @@
import pytest
from llama_stack_api.inference import Model
from llama_stack_api.vector_stores import VectorStore
from llama_stack_api import Model, VectorStore
from llama_stack.core.datatypes import VectorStoreWithOwner
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
@ -304,7 +303,7 @@ async def test_double_registration_different_objects(disk_dist_registry):
async def test_double_registration_with_cache(cached_disk_dist_registry):
"""Test double registration behavior with caching enabled."""
from llama_stack_api.models import ModelType
from llama_stack_api import ModelType
from llama_stack.core.datatypes import ModelWithOwner

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
from llama_stack_api.models import ModelType
from llama_stack_api import ModelType
from llama_stack.core.datatypes import ModelWithOwner, User
from llama_stack.core.store.registry import CachedDiskDistributionRegistry

View file

@ -8,8 +8,7 @@ from unittest.mock import MagicMock, Mock, patch
import pytest
import yaml
from llama_stack_api.datatypes import Api
from llama_stack_api.models import ModelType
from llama_stack_api import Api, ModelType
from pydantic import TypeAdapter, ValidationError
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed

View file

@ -144,7 +144,7 @@ def middleware_with_mocks(mock_auth_endpoint):
middleware = AuthenticationMiddleware(mock_app, auth_config, {})
# Mock the route_impls to simulate finding routes with required scopes
from llama_stack_api.schema_utils import WebMethod
from llama_stack_api import WebMethod
routes = {
("POST", "/test/scoped"): WebMethod(route="/test/scoped", method="POST", required_scope="test.read"),

View file

@ -9,8 +9,7 @@ import sys
from typing import Any, Protocol
from unittest.mock import AsyncMock, MagicMock
from llama_stack_api.datatypes import InlineProviderSpec, ProviderSpec
from llama_stack_api.inference import Inference
from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec
from pydantic import BaseModel, Field
from llama_stack.core.datatypes import Api, Provider, StackRunConfig

View file

@ -9,7 +9,7 @@ import logging # allow-direct-logging
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_stack_api.common.responses import PaginatedResponse
from llama_stack_api import PaginatedResponse
from llama_stack.core.server.server import create_dynamic_typed_route, create_sse_event, sse_generator

View file

@ -9,7 +9,7 @@ Unit tests for JSON Schema-based tool definitions.
Tests the new input_schema and output_schema fields.
"""
from llama_stack_api.tools import ToolDef
from llama_stack_api import ToolDef
from pydantic import ValidationError
from llama_stack.models.llama.datatypes import BuiltinTool, ToolDefinition

View file

@ -7,7 +7,7 @@
import time
import pytest
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,

View file

@ -9,12 +9,7 @@ from tempfile import TemporaryDirectory
from uuid import uuid4
import pytest
from llama_stack_api.agents import Order
from llama_stack_api.inference import OpenAIMessageParam, OpenAIUserMessageParam
from llama_stack_api.openai_responses import (
OpenAIResponseInput,
OpenAIResponseObject,
)
from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
@ -46,7 +41,7 @@ def create_test_response_object(
def create_test_response_input(content: str, input_id: str) -> OpenAIResponseInput:
"""Helper to create a test response input."""
from llama_stack_api.openai_responses import OpenAIResponseMessage
from llama_stack_api import OpenAIResponseMessage
return OpenAIResponseMessage(
id=input_id,