mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-04 10:10:36 +00:00
Merge branch 'main' into feat/add-dana-agent-provider-stub
This commit is contained in:
commit
3b3a2d0ceb
418 changed files with 24245 additions and 1794 deletions
|
|
@ -5,11 +5,7 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack.apis.conversations.conversations import (
|
||||
Conversation,
|
||||
ConversationItem,
|
||||
ConversationItemList,
|
||||
)
|
||||
from llama_stack_api import Conversation, ConversationItem, ConversationItemList
|
||||
|
||||
|
||||
def test_conversation_model_defaults():
|
||||
|
|
|
|||
|
|
@ -12,10 +12,6 @@ from openai.types.conversations.conversation import Conversation as OpenAIConver
|
|||
from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
|
||||
from pydantic import TypeAdapter
|
||||
|
||||
from llama_stack.apis.agents.openai_responses import (
|
||||
OpenAIResponseInputMessageContentText,
|
||||
OpenAIResponseMessage,
|
||||
)
|
||||
from llama_stack.core.conversations.conversations import (
|
||||
ConversationServiceConfig,
|
||||
ConversationServiceImpl,
|
||||
|
|
@ -28,6 +24,7 @@ from llama_stack.core.storage.datatypes import (
|
|||
StorageConfig,
|
||||
)
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
||||
from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@
|
|||
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
from llama_stack.apis.safety.safety import ModerationObject, ModerationObjectResults
|
||||
from llama_stack.apis.shields import ListShieldsResponse, Shield
|
||||
from llama_stack.core.datatypes import SafetyConfig
|
||||
from llama_stack.core.routers.safety import SafetyRouter
|
||||
from llama_stack_api import ListShieldsResponse, ModerationObject, ModerationObjectResults, Shield
|
||||
|
||||
|
||||
async def test_run_moderation_uses_default_shield_when_model_missing():
|
||||
|
|
|
|||
|
|
@ -8,8 +8,13 @@ from unittest.mock import AsyncMock, Mock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.vector_io import OpenAICreateVectorStoreRequestWithExtraBody
|
||||
from llama_stack.core.routers.vector_io import VectorIORouter
|
||||
from llama_stack_api import (
|
||||
ModelNotFoundError,
|
||||
ModelType,
|
||||
ModelTypeError,
|
||||
OpenAICreateVectorStoreRequestWithExtraBody,
|
||||
)
|
||||
|
||||
|
||||
async def test_single_provider_auto_selection():
|
||||
|
|
@ -21,6 +26,7 @@ async def test_single_provider_auto_selection():
|
|||
Mock(identifier="all-MiniLM-L6-v2", model_type="embedding", metadata={"embedding_dimension": 384})
|
||||
]
|
||||
)
|
||||
mock_routing_table.get_object_by_identifier = AsyncMock(return_value=Mock(model_type=ModelType.embedding))
|
||||
mock_routing_table.register_vector_store = AsyncMock(
|
||||
return_value=Mock(identifier="vs_123", provider_id="inline::faiss", provider_resource_id="vs_123")
|
||||
)
|
||||
|
|
@ -48,6 +54,7 @@ async def test_create_vector_stores_multiple_providers_missing_provider_id_error
|
|||
Mock(identifier="all-MiniLM-L6-v2", model_type="embedding", metadata={"embedding_dimension": 384})
|
||||
]
|
||||
)
|
||||
mock_routing_table.get_object_by_identifier = AsyncMock(return_value=Mock(model_type=ModelType.embedding))
|
||||
router = VectorIORouter(mock_routing_table)
|
||||
request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
|
||||
{"name": "test_store", "embedding_model": "all-MiniLM-L6-v2"}
|
||||
|
|
@ -117,3 +124,32 @@ async def test_update_vector_store_same_provider_id_succeeds():
|
|||
provider.openai_update_vector_store.assert_called_once_with(
|
||||
vector_store_id="vs_123", name="updated_name", expires_after=None, metadata={"provider_id": "inline::faiss"}
|
||||
)
|
||||
|
||||
|
||||
async def test_create_vector_store_with_unknown_embedding_model_raises_error():
|
||||
"""Test that creating a vector store with an unknown embedding model raises
|
||||
FoundError."""
|
||||
mock_routing_table = Mock(impls_by_provider_id={"provider": "mock"})
|
||||
mock_routing_table.get_object_by_identifier = AsyncMock(return_value=None)
|
||||
|
||||
router = VectorIORouter(mock_routing_table)
|
||||
request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
|
||||
{"embedding_model": "unknown-model", "embedding_dimension": 384}
|
||||
)
|
||||
|
||||
with pytest.raises(ModelNotFoundError, match="Model 'unknown-model' not found"):
|
||||
await router.openai_create_vector_store(request)
|
||||
|
||||
|
||||
async def test_create_vector_store_with_wrong_model_type_raises_error():
|
||||
"""Test that creating a vector store with a non-embedding model raises ModelTypeError."""
|
||||
mock_routing_table = Mock(impls_by_provider_id={"provider": "mock"})
|
||||
mock_routing_table.get_object_by_identifier = AsyncMock(return_value=Mock(model_type=ModelType.llm))
|
||||
|
||||
router = VectorIORouter(mock_routing_table)
|
||||
request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
|
||||
{"embedding_model": "text-model", "embedding_dimension": 384}
|
||||
)
|
||||
|
||||
with pytest.raises(ModelTypeError, match="Model 'text-model' is of type"):
|
||||
await router.openai_create_vector_store(request)
|
||||
|
|
|
|||
|
|
@ -10,11 +10,9 @@ from unittest.mock import AsyncMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.models import ListModelsResponse, Model, ModelType
|
||||
from llama_stack.apis.shields import ListShieldsResponse, Shield
|
||||
from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig
|
||||
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config
|
||||
from llama_stack.providers.datatypes import Api
|
||||
from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
|
||||
|
||||
|
||||
class TestVectorStoresValidation:
|
||||
|
|
|
|||
|
|
@ -10,14 +10,6 @@ from unittest.mock import AsyncMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.content_types import URL
|
||||
from llama_stack.apis.common.errors import ModelNotFoundError
|
||||
from llama_stack.apis.common.type_system import NumberType
|
||||
from llama_stack.apis.datasets.datasets import Dataset, DatasetPurpose, URIDataSource
|
||||
from llama_stack.apis.datatypes import Api
|
||||
from llama_stack.apis.models import Model, ModelType
|
||||
from llama_stack.apis.shields.shields import Shield
|
||||
from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup
|
||||
from llama_stack.core.datatypes import RegistryEntrySource
|
||||
from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable
|
||||
from llama_stack.core.routing_tables.datasets import DatasetsRoutingTable
|
||||
|
|
@ -25,6 +17,21 @@ from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
|||
from llama_stack.core.routing_tables.scoring_functions import ScoringFunctionsRoutingTable
|
||||
from llama_stack.core.routing_tables.shields import ShieldsRoutingTable
|
||||
from llama_stack.core.routing_tables.toolgroups import ToolGroupsRoutingTable
|
||||
from llama_stack_api import (
|
||||
URL,
|
||||
Api,
|
||||
Dataset,
|
||||
DatasetPurpose,
|
||||
ListToolDefsResponse,
|
||||
Model,
|
||||
ModelNotFoundError,
|
||||
ModelType,
|
||||
NumberType,
|
||||
Shield,
|
||||
ToolDef,
|
||||
ToolGroup,
|
||||
URIDataSource,
|
||||
)
|
||||
|
||||
|
||||
class Impl:
|
||||
|
|
@ -130,7 +137,7 @@ class ToolGroupsImpl(Impl):
|
|||
async def unregister_toolgroup(self, toolgroup_id: str):
|
||||
return toolgroup_id
|
||||
|
||||
async def list_runtime_tools(self, toolgroup_id, mcp_endpoint):
|
||||
async def list_runtime_tools(self, toolgroup_id, mcp_endpoint, authorization=None):
|
||||
return ListToolDefsResponse(
|
||||
data=[
|
||||
ToolDef(
|
||||
|
|
|
|||
|
|
@ -11,8 +11,15 @@ from unittest.mock import patch
|
|||
import pytest
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from llama_stack.testing.api_recorder import (
|
||||
APIRecordingMode,
|
||||
ResponseStorage,
|
||||
api_recording,
|
||||
normalize_inference_request,
|
||||
)
|
||||
|
||||
# Import the real Pydantic response types instead of using Mocks
|
||||
from llama_stack.apis.inference import (
|
||||
from llama_stack_api import (
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChoice,
|
||||
|
|
@ -20,12 +27,6 @@ from llama_stack.apis.inference import (
|
|||
OpenAIEmbeddingsResponse,
|
||||
OpenAIEmbeddingUsage,
|
||||
)
|
||||
from llama_stack.testing.api_recorder import (
|
||||
APIRecordingMode,
|
||||
ResponseStorage,
|
||||
api_recording,
|
||||
normalize_inference_request,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ from llama_stack.core.storage.datatypes import (
|
|||
SqlStoreReference,
|
||||
StorageConfig,
|
||||
)
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api import ProviderSpec
|
||||
|
||||
|
||||
class SampleConfig(BaseModel):
|
||||
|
|
@ -312,7 +312,7 @@ pip_packages:
|
|||
"""Test loading an external provider from a module (success path)."""
|
||||
from types import SimpleNamespace
|
||||
|
||||
from llama_stack.providers.datatypes import Api, ProviderSpec
|
||||
from llama_stack_api import Api, ProviderSpec
|
||||
|
||||
# Simulate a provider module with get_provider_spec
|
||||
fake_spec = ProviderSpec(
|
||||
|
|
@ -396,7 +396,7 @@ pip_packages:
|
|||
def test_external_provider_from_module_building(self, mock_providers):
|
||||
"""Test loading an external provider from a module during build (building=True, partial spec)."""
|
||||
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
|
||||
from llama_stack.providers.datatypes import Api
|
||||
from llama_stack_api import Api
|
||||
|
||||
# No importlib patch needed, should not import module when type of `config` is BuildConfig or DistributionSpec
|
||||
build_config = BuildConfig(
|
||||
|
|
@ -457,7 +457,7 @@ class TestGetExternalProvidersFromModule:
|
|||
from types import SimpleNamespace
|
||||
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api import ProviderSpec
|
||||
|
||||
fake_spec = ProviderSpec(
|
||||
api=Api.inference,
|
||||
|
|
@ -594,7 +594,7 @@ class TestGetExternalProvidersFromModule:
|
|||
from types import SimpleNamespace
|
||||
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api import ProviderSpec
|
||||
|
||||
spec1 = ProviderSpec(
|
||||
api=Api.inference,
|
||||
|
|
@ -642,7 +642,7 @@ class TestGetExternalProvidersFromModule:
|
|||
from types import SimpleNamespace
|
||||
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api import ProviderSpec
|
||||
|
||||
spec1 = ProviderSpec(
|
||||
api=Api.inference,
|
||||
|
|
@ -690,7 +690,7 @@ class TestGetExternalProvidersFromModule:
|
|||
from types import SimpleNamespace
|
||||
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api import ProviderSpec
|
||||
|
||||
# Module returns both inline and remote variants
|
||||
spec1 = ProviderSpec(
|
||||
|
|
@ -829,7 +829,7 @@ class TestGetExternalProvidersFromModule:
|
|||
from types import SimpleNamespace
|
||||
|
||||
from llama_stack.core.distribution import get_external_providers_from_module
|
||||
from llama_stack.providers.datatypes import ProviderSpec
|
||||
from llama_stack_api import ProviderSpec
|
||||
|
||||
inference_spec = ProviderSpec(
|
||||
api=Api.inference,
|
||||
|
|
|
|||
|
|
@ -7,9 +7,6 @@
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.errors import ResourceNotFoundError
|
||||
from llama_stack.apis.common.responses import Order
|
||||
from llama_stack.apis.files import OpenAIFilePurpose
|
||||
from llama_stack.core.access_control.access_control import default_policy
|
||||
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
|
||||
from llama_stack.providers.inline.files.localfs import (
|
||||
|
|
@ -17,6 +14,7 @@ from llama_stack.providers.inline.files.localfs import (
|
|||
LocalfsFilesImplConfig,
|
||||
)
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
||||
from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError
|
||||
|
||||
|
||||
class MockUploadFile:
|
||||
|
|
|
|||
|
|
@ -59,8 +59,7 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.batches import BatchObject
|
||||
from llama_stack.apis.common.errors import ConflictError, ResourceNotFoundError
|
||||
from llama_stack_api import BatchObject, ConflictError, ResourceNotFoundError
|
||||
|
||||
|
||||
class TestReferenceBatchesImpl:
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ import asyncio
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.errors import ConflictError
|
||||
from llama_stack_api import ConflictError
|
||||
|
||||
|
||||
class TestReferenceBatchesIdempotency:
|
||||
|
|
|
|||
|
|
@ -9,8 +9,7 @@ from unittest.mock import patch
|
|||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from llama_stack.apis.common.errors import ResourceNotFoundError
|
||||
from llama_stack.apis.files import OpenAIFilePurpose
|
||||
from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError
|
||||
|
||||
|
||||
class TestS3FilesImpl:
|
||||
|
|
@ -228,7 +227,7 @@ class TestS3FilesImpl:
|
|||
|
||||
mock_now.return_value = 0
|
||||
|
||||
from llama_stack.apis.files import ExpiresAfter
|
||||
from llama_stack_api import ExpiresAfter
|
||||
|
||||
sample_text_file.filename = "test_expired_file"
|
||||
uploaded = await s3_provider.openai_upload_file(
|
||||
|
|
@ -260,7 +259,7 @@ class TestS3FilesImpl:
|
|||
|
||||
async def test_unsupported_expires_after_anchor(self, s3_provider, sample_text_file):
|
||||
"""Unsupported anchor value should raise ValueError."""
|
||||
from llama_stack.apis.files import ExpiresAfter
|
||||
from llama_stack_api import ExpiresAfter
|
||||
|
||||
sample_text_file.filename = "test_unsupported_expires_after_anchor"
|
||||
|
||||
|
|
@ -273,7 +272,7 @@ class TestS3FilesImpl:
|
|||
|
||||
async def test_nonint_expires_after_seconds(self, s3_provider, sample_text_file):
|
||||
"""Non-integer seconds in expires_after should raise ValueError."""
|
||||
from llama_stack.apis.files import ExpiresAfter
|
||||
from llama_stack_api import ExpiresAfter
|
||||
|
||||
sample_text_file.filename = "test_nonint_expires_after_seconds"
|
||||
|
||||
|
|
@ -286,7 +285,7 @@ class TestS3FilesImpl:
|
|||
|
||||
async def test_expires_after_seconds_out_of_bounds(self, s3_provider, sample_text_file):
|
||||
"""Seconds outside allowed range should raise ValueError."""
|
||||
from llama_stack.apis.files import ExpiresAfter
|
||||
from llama_stack_api import ExpiresAfter
|
||||
|
||||
with pytest.raises(ValueError, match="greater than or equal to 3600"):
|
||||
await s3_provider.openai_upload_file(
|
||||
|
|
|
|||
|
|
@ -8,10 +8,9 @@ from unittest.mock import patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.errors import ResourceNotFoundError
|
||||
from llama_stack.apis.files import OpenAIFilePurpose
|
||||
from llama_stack.core.datatypes import User
|
||||
from llama_stack.providers.remote.files.s3.files import S3FilesImpl
|
||||
from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError
|
||||
|
||||
|
||||
async def test_listing_hides_other_users_file(s3_provider, sample_text_file):
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
import pytest
|
||||
from openai import AuthenticationError
|
||||
|
||||
from llama_stack.apis.inference import OpenAIChatCompletionRequestWithExtraBody
|
||||
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter
|
||||
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
|
||||
from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody
|
||||
|
||||
|
||||
def test_adapter_initialization():
|
||||
|
|
|
|||
|
|
@ -10,7 +10,13 @@ from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.inference import (
|
||||
from llama_stack.core.routers.inference import InferenceRouter
|
||||
from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
||||
from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig
|
||||
from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter
|
||||
from llama_stack_api import (
|
||||
HealthStatus,
|
||||
Model,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
|
|
@ -20,12 +26,6 @@ from llama_stack.apis.inference import (
|
|||
OpenAICompletionRequestWithExtraBody,
|
||||
ToolChoice,
|
||||
)
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.core.routers.inference import InferenceRouter
|
||||
from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
||||
from llama_stack.providers.datatypes import HealthStatus
|
||||
from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig
|
||||
from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter
|
||||
|
||||
# These are unit test for the remote vllm provider
|
||||
# implementation. This should only contain tests which are specific to
|
||||
|
|
|
|||
|
|
@ -8,11 +8,11 @@ from unittest.mock import AsyncMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.tools import ToolDef
|
||||
from llama_stack.providers.inline.agents.meta_reference.responses.streaming import (
|
||||
convert_tooldef_to_chat_tool,
|
||||
)
|
||||
from llama_stack.providers.inline.agents.meta_reference.responses.types import ChatCompletionContext
|
||||
from llama_stack_api import ToolDef
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -9,10 +9,9 @@ from unittest.mock import patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.datasets import Dataset, DatasetPurpose, URIDataSource
|
||||
from llama_stack.apis.resource import ResourceType
|
||||
from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig
|
||||
from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter
|
||||
from llama_stack_api import Dataset, DatasetPurpose, ResourceType, URIDataSource
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -9,14 +9,20 @@ from unittest.mock import MagicMock, patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.benchmarks import Benchmark
|
||||
from llama_stack.apis.common.job_types import Job, JobStatus
|
||||
from llama_stack.apis.eval.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
|
||||
from llama_stack.apis.inference.inference import TopPSamplingStrategy
|
||||
from llama_stack.apis.resource import ResourceType
|
||||
from llama_stack.models.llama.sku_types import CoreModelId
|
||||
from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig
|
||||
from llama_stack.providers.remote.eval.nvidia.eval import NVIDIAEvalImpl
|
||||
from llama_stack_api import (
|
||||
Benchmark,
|
||||
BenchmarkConfig,
|
||||
EvaluateResponse,
|
||||
Job,
|
||||
JobStatus,
|
||||
ModelCandidate,
|
||||
ResourceType,
|
||||
SamplingParams,
|
||||
TopPSamplingStrategy,
|
||||
)
|
||||
|
||||
MOCK_DATASET_ID = "default/test-dataset"
|
||||
MOCK_BENCHMARK_ID = "test-benchmark"
|
||||
|
|
|
|||
|
|
@ -10,7 +10,12 @@ from unittest.mock import patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.post_training.post_training import (
|
||||
from llama_stack.core.library_client import convert_pydantic_to_json_value
|
||||
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
||||
NvidiaPostTrainingAdapter,
|
||||
NvidiaPostTrainingConfig,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
DataConfig,
|
||||
DatasetFormat,
|
||||
EfficiencyConfig,
|
||||
|
|
@ -19,11 +24,6 @@ from llama_stack.apis.post_training.post_training import (
|
|||
OptimizerType,
|
||||
TrainingConfig,
|
||||
)
|
||||
from llama_stack.core.library_client import convert_pydantic_to_json_value
|
||||
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
||||
NvidiaPostTrainingAdapter,
|
||||
NvidiaPostTrainingConfig,
|
||||
)
|
||||
|
||||
|
||||
class TestNvidiaParameters:
|
||||
|
|
|
|||
|
|
@ -9,10 +9,10 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
|||
import aiohttp
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.models import ModelType
|
||||
from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig
|
||||
from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import ModelType
|
||||
|
||||
|
||||
class MockResponse:
|
||||
|
|
|
|||
|
|
@ -10,15 +10,16 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.inference import (
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIUserMessageParam,
|
||||
)
|
||||
from llama_stack.apis.resource import ResourceType
|
||||
from llama_stack.apis.safety import RunShieldResponse, ViolationLevel
|
||||
from llama_stack.apis.shields import Shield
|
||||
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
|
||||
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter
|
||||
from llama_stack_api import (
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIUserMessageParam,
|
||||
ResourceType,
|
||||
RunShieldResponse,
|
||||
Shield,
|
||||
ViolationLevel,
|
||||
)
|
||||
|
||||
|
||||
class FakeNVIDIASafetyAdapter(NVIDIASafetyAdapter):
|
||||
|
|
|
|||
|
|
@ -10,15 +10,6 @@ from unittest.mock import patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.post_training.post_training import (
|
||||
DataConfig,
|
||||
DatasetFormat,
|
||||
LoraFinetuningConfig,
|
||||
OptimizerConfig,
|
||||
OptimizerType,
|
||||
QATFinetuningConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
from llama_stack.core.library_client import convert_pydantic_to_json_value
|
||||
from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
||||
ListNvidiaPostTrainingJobs,
|
||||
|
|
@ -27,6 +18,15 @@ from llama_stack.providers.remote.post_training.nvidia.post_training import (
|
|||
NvidiaPostTrainingJob,
|
||||
NvidiaPostTrainingJobStatusResponse,
|
||||
)
|
||||
from llama_stack_api import (
|
||||
DataConfig,
|
||||
DatasetFormat,
|
||||
LoraFinetuningConfig,
|
||||
OptimizerConfig,
|
||||
OptimizerType,
|
||||
QATFinetuningConfig,
|
||||
TrainingConfig,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@
|
|||
from types import SimpleNamespace
|
||||
from unittest.mock import AsyncMock, PropertyMock, patch
|
||||
|
||||
from llama_stack.apis.inference import OpenAIChatCompletionRequestWithExtraBody
|
||||
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter
|
||||
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
|
||||
from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody
|
||||
|
||||
|
||||
def test_can_create_adapter():
|
||||
|
|
|
|||
|
|
@ -12,11 +12,10 @@ from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
|
|||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.apis.inference import Model, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
|
||||
from llama_stack.apis.models import ModelType
|
||||
from llama_stack.core.request_headers import request_provider_data_context
|
||||
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
|
||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||
from llama_stack_api import Model, ModelType, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
|
||||
|
||||
|
||||
class OpenAIMixinImpl(OpenAIMixin):
|
||||
|
|
|
|||
|
|
@ -4,14 +4,11 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.apis.inference import (
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIUserMessageParam,
|
||||
)
|
||||
from llama_stack.models.llama.datatypes import RawTextItem
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
convert_openai_message_to_raw_message,
|
||||
)
|
||||
from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam
|
||||
|
||||
|
||||
class TestConvertOpenAIMessageToRawMessage:
|
||||
|
|
|
|||
|
|
@ -8,9 +8,8 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.content_types import URL, TextContentItem
|
||||
from llama_stack.apis.tools import RAGDocument
|
||||
from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, content_from_doc
|
||||
from llama_stack_api import URL, RAGDocument, TextContentItem
|
||||
|
||||
|
||||
async def test_content_from_doc_with_url():
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.models import Model
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry
|
||||
from llama_stack_api import Model
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -10,8 +10,6 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
|
||||
from llama_stack.apis.vector_stores import VectorStore
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
|
||||
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
||||
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
|
||||
|
|
@ -20,6 +18,7 @@ from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteV
|
|||
from llama_stack.providers.remote.vector_io.pgvector.config import PGVectorVectorIOConfig
|
||||
from llama_stack.providers.remote.vector_io.pgvector.pgvector import PGVectorIndex, PGVectorVectorIOAdapter
|
||||
from llama_stack.providers.utils.kvstore import register_kvstore_backends
|
||||
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore
|
||||
|
||||
EMBEDDING_DIMENSION = 768
|
||||
COLLECTION_PREFIX = "test_collection"
|
||||
|
|
|
|||
|
|
@ -10,15 +10,12 @@ from unittest.mock import MagicMock, patch
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.files import Files
|
||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
||||
from llama_stack.apis.vector_stores import VectorStore
|
||||
from llama_stack.providers.datatypes import HealthStatus
|
||||
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
||||
from llama_stack.providers.inline.vector_io.faiss.faiss import (
|
||||
FaissIndex,
|
||||
FaissVectorIOAdapter,
|
||||
)
|
||||
from llama_stack_api import Chunk, Files, HealthStatus, QueryChunksResponse, VectorStore
|
||||
|
||||
# This test is a unit test for the FaissVectorIOAdapter class. This should only contain
|
||||
# tests which are specific to this class. More general (API-level) tests should be placed in
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@ import asyncio
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
||||
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import (
|
||||
SQLiteVecIndex,
|
||||
SQLiteVecVectorIOAdapter,
|
||||
_create_sqlite_connection,
|
||||
)
|
||||
from llama_stack_api import Chunk, QueryChunksResponse
|
||||
|
||||
# This test is a unit test for the SQLiteVecVectorIOAdapter class. This should only contain
|
||||
# tests which are specific to this class. More general (API-level) tests should be placed in
|
||||
|
|
|
|||
|
|
@ -11,17 +11,17 @@ from unittest.mock import AsyncMock, patch
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||
from llama_stack.apis.vector_io import (
|
||||
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
|
||||
from llama_stack_api import (
|
||||
Chunk,
|
||||
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
|
||||
OpenAICreateVectorStoreRequestWithExtraBody,
|
||||
QueryChunksResponse,
|
||||
VectorStore,
|
||||
VectorStoreChunkingStrategyAuto,
|
||||
VectorStoreFileObject,
|
||||
VectorStoreNotFoundError,
|
||||
)
|
||||
from llama_stack.apis.vector_stores import VectorStore
|
||||
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
|
||||
|
||||
# This test is a unit test for the inline VectorIO providers. This should only contain
|
||||
# tests which are specific to this class. More general (API-level) tests should be placed in
|
||||
|
|
@ -222,7 +222,7 @@ async def test_insert_chunks_missing_db_raises(vector_io_adapter):
|
|||
|
||||
async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
|
||||
"""Ensure no KeyError when document_id is missing or in different places."""
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata
|
||||
from llama_stack_api import Chunk, ChunkMetadata
|
||||
|
||||
fake_index = AsyncMock()
|
||||
vector_io_adapter.cache["db1"] = fake_index
|
||||
|
|
@ -255,10 +255,9 @@ async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
|
|||
|
||||
async def test_document_id_with_invalid_type_raises_error():
|
||||
"""Ensure TypeError is raised when document_id is not a string."""
|
||||
from llama_stack.apis.vector_io import Chunk
|
||||
|
||||
# Integer document_id should raise TypeError
|
||||
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
|
||||
from llama_stack_api import Chunk
|
||||
|
||||
chunk = Chunk(content="test", chunk_id=generate_chunk_id("test", "test"), metadata={"document_id": 12345})
|
||||
with pytest.raises(TypeError) as exc_info:
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata
|
||||
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
|
||||
from llama_stack_api import Chunk, ChunkMetadata
|
||||
|
||||
# This test is a unit test for the chunk_utils.py helpers. This should only contain
|
||||
# tests which are specific to this file. More general (API-level) tests should be placed in
|
||||
|
|
|
|||
|
|
@ -8,13 +8,8 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.tools.rag_tool import RAGQueryConfig
|
||||
from llama_stack.apis.vector_io import (
|
||||
Chunk,
|
||||
ChunkMetadata,
|
||||
QueryChunksResponse,
|
||||
)
|
||||
from llama_stack.providers.inline.tool_runtime.rag.memory import MemoryToolRuntimeImpl
|
||||
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, RAGQueryConfig
|
||||
|
||||
|
||||
class TestRagQuery:
|
||||
|
|
|
|||
|
|
@ -13,12 +13,6 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.inference.inference import (
|
||||
OpenAIEmbeddingData,
|
||||
OpenAIEmbeddingsRequestWithExtraBody,
|
||||
)
|
||||
from llama_stack.apis.tools import RAGDocument
|
||||
from llama_stack.apis.vector_io import Chunk
|
||||
from llama_stack.providers.utils.memory.vector_store import (
|
||||
URL,
|
||||
VectorStoreWithIndex,
|
||||
|
|
@ -27,6 +21,7 @@ from llama_stack.providers.utils.memory.vector_store import (
|
|||
make_overlapped_chunks,
|
||||
)
|
||||
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
|
||||
from llama_stack_api import Chunk, OpenAIEmbeddingData, OpenAIEmbeddingsRequestWithExtraBody, RAGDocument
|
||||
|
||||
DUMMY_PDF_PATH = Path(os.path.abspath(__file__)).parent / "fixtures" / "dummy.pdf"
|
||||
# Depending on the machine, this can get parsed a couple of ways
|
||||
|
|
|
|||
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.inference import Model
|
||||
from llama_stack.apis.vector_stores import VectorStore
|
||||
from llama_stack.core.datatypes import VectorStoreWithOwner
|
||||
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
|
||||
from llama_stack.core.store.registry import (
|
||||
|
|
@ -17,6 +15,7 @@ from llama_stack.core.store.registry import (
|
|||
DiskDistributionRegistry,
|
||||
)
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
|
||||
from llama_stack_api import Model, VectorStore
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
@ -304,8 +303,8 @@ async def test_double_registration_different_objects(disk_dist_registry):
|
|||
|
||||
async def test_double_registration_with_cache(cached_disk_dist_registry):
|
||||
"""Test double registration behavior with caching enabled."""
|
||||
from llama_stack.apis.models import ModelType
|
||||
from llama_stack.core.datatypes import ModelWithOwner
|
||||
from llama_stack_api import ModelType
|
||||
|
||||
model1 = ModelWithOwner(
|
||||
identifier="test_model",
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
|
||||
from llama_stack.apis.models import ModelType
|
||||
from llama_stack.core.datatypes import ModelWithOwner, User
|
||||
from llama_stack.core.store.registry import CachedDiskDistributionRegistry
|
||||
from llama_stack_api import ModelType
|
||||
|
||||
|
||||
async def test_registry_cache_with_acl(cached_disk_dist_registry):
|
||||
|
|
|
|||
|
|
@ -10,11 +10,10 @@ import pytest
|
|||
import yaml
|
||||
from pydantic import TypeAdapter, ValidationError
|
||||
|
||||
from llama_stack.apis.datatypes import Api
|
||||
from llama_stack.apis.models import ModelType
|
||||
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
|
||||
from llama_stack.core.datatypes import AccessRule, ModelWithOwner, User
|
||||
from llama_stack.core.routing_tables.models import ModelsRoutingTable
|
||||
from llama_stack_api import Api, ModelType
|
||||
|
||||
|
||||
class AsyncMock(MagicMock):
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ def middleware_with_mocks(mock_auth_endpoint):
|
|||
middleware = AuthenticationMiddleware(mock_app, auth_config, {})
|
||||
|
||||
# Mock the route_impls to simulate finding routes with required scopes
|
||||
from llama_stack.schema_utils import WebMethod
|
||||
from llama_stack_api import WebMethod
|
||||
|
||||
routes = {
|
||||
("POST", "/test/scoped"): WebMethod(route="/test/scoped", method="POST", required_scope="test.read"),
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.apis.inference import Inference
|
||||
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
|
||||
from llama_stack.core.resolver import resolve_impls
|
||||
from llama_stack.core.routers.inference import InferenceRouter
|
||||
|
|
@ -25,9 +24,9 @@ from llama_stack.core.storage.datatypes import (
|
|||
SqlStoreReference,
|
||||
StorageConfig,
|
||||
)
|
||||
from llama_stack.providers.datatypes import InlineProviderSpec, ProviderSpec
|
||||
from llama_stack.providers.utils.kvstore import register_kvstore_backends
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
||||
from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec
|
||||
|
||||
|
||||
def add_protocol_methods(cls: type, protocol: type[Protocol]) -> None:
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from pydantic import ValidationError
|
|||
|
||||
from llama_stack.core.access_control.access_control import AccessDeniedError
|
||||
from llama_stack.core.datatypes import AuthenticationRequiredError
|
||||
from llama_stack.core.server.server import translate_exception
|
||||
from llama_stack.core.server.server import remove_disabled_providers, translate_exception
|
||||
|
||||
|
||||
class TestTranslateException:
|
||||
|
|
@ -194,3 +194,70 @@ class TestTranslateException:
|
|||
assert isinstance(result3, HTTPException)
|
||||
assert result3.status_code == 403
|
||||
assert result3.detail == "Permission denied: Access denied"
|
||||
|
||||
|
||||
class TestRemoveDisabledProviders:
|
||||
"""Test cases for the remove_disabled_providers function."""
|
||||
|
||||
def test_remove_explicitly_disabled_provider(self):
|
||||
"""Test that providers with provider_id='__disabled__' are removed."""
|
||||
config = {
|
||||
"providers": {
|
||||
"inference": [
|
||||
{"provider_id": "openai", "provider_type": "remote::openai", "config": {}},
|
||||
{"provider_id": "__disabled__", "provider_type": "remote::vllm", "config": {}},
|
||||
]
|
||||
}
|
||||
}
|
||||
result = remove_disabled_providers(config)
|
||||
assert len(result["providers"]["inference"]) == 1
|
||||
assert result["providers"]["inference"][0]["provider_id"] == "openai"
|
||||
|
||||
def test_remove_empty_provider_id(self):
|
||||
"""Test that providers with empty provider_id are removed."""
|
||||
config = {
|
||||
"providers": {
|
||||
"inference": [
|
||||
{"provider_id": "openai", "provider_type": "remote::openai", "config": {}},
|
||||
{"provider_id": "", "provider_type": "remote::vllm", "config": {}},
|
||||
]
|
||||
}
|
||||
}
|
||||
result = remove_disabled_providers(config)
|
||||
assert len(result["providers"]["inference"]) == 1
|
||||
assert result["providers"]["inference"][0]["provider_id"] == "openai"
|
||||
|
||||
def test_keep_models_with_none_provider_model_id(self):
|
||||
"""Test that models with None provider_model_id are NOT removed."""
|
||||
config = {
|
||||
"registered_resources": {
|
||||
"models": [
|
||||
{
|
||||
"model_id": "llama-3-2-3b",
|
||||
"provider_id": "vllm-inference",
|
||||
"model_type": "llm",
|
||||
"provider_model_id": None,
|
||||
"metadata": {},
|
||||
},
|
||||
{
|
||||
"model_id": "gpt-4o-mini",
|
||||
"provider_id": "openai",
|
||||
"model_type": "llm",
|
||||
"provider_model_id": None,
|
||||
"metadata": {},
|
||||
},
|
||||
{
|
||||
"model_id": "granite-embedding-125m",
|
||||
"provider_id": "sentence-transformers",
|
||||
"model_type": "embedding",
|
||||
"provider_model_id": "ibm-granite/granite-embedding-125m-english",
|
||||
"metadata": {"embedding_dimension": 768},
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
result = remove_disabled_providers(config)
|
||||
assert len(result["registered_resources"]["models"]) == 3
|
||||
assert result["registered_resources"]["models"][0]["model_id"] == "llama-3-2-3b"
|
||||
assert result["registered_resources"]["models"][1]["model_id"] == "gpt-4o-mini"
|
||||
assert result["registered_resources"]["models"][2]["model_id"] == "granite-embedding-125m"
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ from unittest.mock import AsyncMock, MagicMock
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.common.responses import PaginatedResponse
|
||||
from llama_stack.core.server.server import create_dynamic_typed_route, create_sse_event, sse_generator
|
||||
from llama_stack_api import PaginatedResponse
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ Tests the new input_schema and output_schema fields.
|
|||
|
||||
from pydantic import ValidationError
|
||||
|
||||
from llama_stack.apis.tools import ToolDef
|
||||
from llama_stack.models.llama.datatypes import BuiltinTool, ToolDefinition
|
||||
from llama_stack_api import ToolDef
|
||||
|
||||
|
||||
class TestToolDefValidation:
|
||||
|
|
|
|||
|
|
@ -8,16 +8,16 @@ import time
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.inference import (
|
||||
from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
|
||||
from llama_stack.providers.utils.inference.inference_store import InferenceStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
||||
from llama_stack_api import (
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChoice,
|
||||
OpenAIUserMessageParam,
|
||||
Order,
|
||||
)
|
||||
from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
|
||||
from llama_stack.providers.utils.inference.inference_store import InferenceStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
|
|
|||
|
|
@ -10,15 +10,10 @@ from uuid import uuid4
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.agents import Order
|
||||
from llama_stack.apis.agents.openai_responses import (
|
||||
OpenAIResponseInput,
|
||||
OpenAIResponseObject,
|
||||
)
|
||||
from llama_stack.apis.inference import OpenAIMessageParam, OpenAIUserMessageParam
|
||||
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
|
||||
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
||||
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
||||
from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order
|
||||
|
||||
|
||||
def build_store(db_path: str, policy: list | None = None) -> ResponsesStore:
|
||||
|
|
@ -46,7 +41,7 @@ def create_test_response_object(
|
|||
|
||||
def create_test_response_input(content: str, input_id: str) -> OpenAIResponseInput:
|
||||
"""Helper to create a test response input."""
|
||||
from llama_stack.apis.agents.openai_responses import OpenAIResponseMessage
|
||||
from llama_stack_api import OpenAIResponseMessage
|
||||
|
||||
return OpenAIResponseMessage(
|
||||
id=input_id,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue