fix integration tests and add tests for multiple providers

Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
Francisco Javier Arceo 2025-10-13 12:13:43 -04:00
parent 1bb3434562
commit 9166669b83
3 changed files with 80 additions and 4 deletions

View file

@ -353,14 +353,12 @@ class OpenAIVectorStoreMixin(ABC):
provider_vector_db_id = extra.get("provider_vector_db_id") provider_vector_db_id = extra.get("provider_vector_db_id")
embedding_model = extra.get("embedding_model") embedding_model = extra.get("embedding_model")
embedding_dimension = extra.get("embedding_dimension", 384) embedding_dimension = extra.get("embedding_dimension", 384)
provider_id = extra.get("provider_id") # use provider_id from router or default to this provider's own ID (need for --stack-config)
provider_id = extra.get("provider_id") or getattr(self, "__provider_id__", None)
# Derive the canonical vector_db_id (allow override, else generate) # Derive the canonical vector_db_id (allow override, else generate)
vector_db_id = provider_vector_db_id or generate_object_id("vector_store", lambda: f"vs_{uuid.uuid4()}") vector_db_id = provider_vector_db_id or generate_object_id("vector_store", lambda: f"vs_{uuid.uuid4()}")
if provider_id is None:
raise ValueError("Provider ID is required")
if embedding_model is None: if embedding_model is None:
raise ValueError("Embedding model is required") raise ValueError("Embedding model is required")
@ -369,6 +367,9 @@ class OpenAIVectorStoreMixin(ABC):
raise ValueError("Embedding dimension is required") raise ValueError("Embedding dimension is required")
# Register the VectorDB backing this vector store # Register the VectorDB backing this vector store
if provider_id is None:
raise ValueError("Provider ID is required but was not provided")
vector_db = VectorDB( vector_db = VectorDB(
identifier=vector_db_id, identifier=vector_db_id,
embedding_dimension=embedding_dimension, embedding_dimension=embedding_dimension,

View file

@ -224,3 +224,21 @@ def test_query_returns_valid_object_when_identical_to_embedding_in_vdb(
assert len(response.chunks) > 0 assert len(response.chunks) > 0
assert response.chunks[0].metadata["document_id"] == "doc1" assert response.chunks[0].metadata["document_id"] == "doc1"
assert response.chunks[0].metadata["source"] == "precomputed" assert response.chunks[0].metadata["source"] == "precomputed"
def test_auto_extract_embedding_dimension(client_with_empty_registry, embedding_model_id):
vs = client_with_empty_registry.vector_stores.create(
name="test_auto_extract", extra_body={"embedding_model": embedding_model_id}
)
assert vs.id is not None
def test_provider_auto_selection_single_provider(client_with_empty_registry, embedding_model_id):
providers = [p for p in client_with_empty_registry.providers.list() if p.api == "vector_io"]
if len(providers) != 1:
pytest.skip(f"Test requires exactly one vector_io provider, found {len(providers)}")
vs = client_with_empty_registry.vector_stores.create(
name="test_auto_provider", extra_body={"embedding_model": embedding_model_id}
)
assert vs.id is not None

View file

@ -0,0 +1,57 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from unittest.mock import AsyncMock, Mock
import pytest
from llama_stack.apis.vector_io import OpenAICreateVectorStoreRequestWithExtraBody
from llama_stack.core.routers.vector_io import VectorIORouter
async def test_single_provider_auto_selection():
# provider_id automatically selected during vector store create() when only one provider available
mock_routing_table = Mock()
mock_routing_table.impls_by_provider_id = {"inline::faiss": "mock_provider"}
mock_routing_table.get_all_with_type = AsyncMock(
return_value=[
Mock(identifier="all-MiniLM-L6-v2", model_type="embedding", metadata={"embedding_dimension": 384})
]
)
mock_routing_table.register_vector_db = AsyncMock(
return_value=Mock(identifier="vs_123", provider_id="inline::faiss", provider_resource_id="vs_123")
)
mock_routing_table.get_provider_impl = AsyncMock(
return_value=Mock(openai_create_vector_store=AsyncMock(return_value=Mock(id="vs_123")))
)
router = VectorIORouter(mock_routing_table)
request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
{"name": "test_store", "embedding_model": "all-MiniLM-L6-v2"}
)
result = await router.openai_create_vector_store(request)
assert result.id == "vs_123"
async def test_create_vector_stores_multiple_providers_missing_provider_id_error():
# if multiple providers are available, vector store create will error without provider_id
mock_routing_table = Mock()
mock_routing_table.impls_by_provider_id = {
"inline::faiss": "mock_provider_1",
"inline::sqlite-vec": "mock_provider_2",
}
mock_routing_table.get_all_with_type = AsyncMock(
return_value=[
Mock(identifier="all-MiniLM-L6-v2", model_type="embedding", metadata={"embedding_dimension": 384})
]
)
router = VectorIORouter(mock_routing_table)
request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
{"name": "test_store", "embedding_model": "all-MiniLM-L6-v2"}
)
with pytest.raises(ValueError, match="Multiple vector_io providers available"):
await router.openai_create_vector_store(request)