mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-13 16:52:36 +00:00
4: finished rename I think
This commit is contained in:
parent
3d7b463a80
commit
44f104baae
15 changed files with 273 additions and 272 deletions
|
|
@ -18,19 +18,19 @@ from llama_stack.providers.inline.tool_runtime.rag.memory import MemoryToolRunti
|
|||
|
||||
|
||||
class TestRagQuery:
|
||||
async def test_query_raises_on_empty_vector_db_ids(self):
|
||||
async def test_query_raises_on_empty_vector_store_ids(self):
|
||||
rag_tool = MemoryToolRuntimeImpl(
|
||||
config=MagicMock(), vector_io_api=MagicMock(), inference_api=MagicMock(), files_api=MagicMock()
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
await rag_tool.query(content=MagicMock(), vector_db_ids=[])
|
||||
await rag_tool.query(content=MagicMock(), vector_store_ids=[])
|
||||
|
||||
async def test_query_chunk_metadata_handling(self):
|
||||
rag_tool = MemoryToolRuntimeImpl(
|
||||
config=MagicMock(), vector_io_api=MagicMock(), inference_api=MagicMock(), files_api=MagicMock()
|
||||
)
|
||||
content = "test query content"
|
||||
vector_db_ids = ["db1"]
|
||||
vector_store_ids = ["db1"]
|
||||
|
||||
chunk_metadata = ChunkMetadata(
|
||||
document_id="doc1",
|
||||
|
|
@ -55,7 +55,7 @@ class TestRagQuery:
|
|||
query_response = QueryChunksResponse(chunks=[chunk], scores=[1.0])
|
||||
|
||||
rag_tool.vector_io_api.query_chunks = AsyncMock(return_value=query_response)
|
||||
result = await rag_tool.query(content=content, vector_db_ids=vector_db_ids)
|
||||
result = await rag_tool.query(content=content, vector_store_ids=vector_store_ids)
|
||||
|
||||
assert result is not None
|
||||
expected_metadata_string = (
|
||||
|
|
@ -82,7 +82,7 @@ class TestRagQuery:
|
|||
with pytest.raises(ValueError):
|
||||
RAGQueryConfig(mode="wrong_mode")
|
||||
|
||||
async def test_query_adds_vector_db_id_to_chunk_metadata(self):
|
||||
async def test_query_adds_vector_store_id_to_chunk_metadata(self):
|
||||
rag_tool = MemoryToolRuntimeImpl(
|
||||
config=MagicMock(),
|
||||
vector_io_api=MagicMock(),
|
||||
|
|
@ -90,7 +90,7 @@ class TestRagQuery:
|
|||
files_api=MagicMock(),
|
||||
)
|
||||
|
||||
vector_db_ids = ["db1", "db2"]
|
||||
vector_store_ids = ["db1", "db2"]
|
||||
|
||||
# Fake chunks from each DB
|
||||
chunk_metadata1 = ChunkMetadata(
|
||||
|
|
@ -101,7 +101,7 @@ class TestRagQuery:
|
|||
)
|
||||
chunk1 = Chunk(
|
||||
content="chunk from db1",
|
||||
metadata={"vector_db_id": "db1", "document_id": "doc1"},
|
||||
metadata={"vector_store_id": "db1", "document_id": "doc1"},
|
||||
stored_chunk_id="c1",
|
||||
chunk_metadata=chunk_metadata1,
|
||||
)
|
||||
|
|
@ -114,7 +114,7 @@ class TestRagQuery:
|
|||
)
|
||||
chunk2 = Chunk(
|
||||
content="chunk from db2",
|
||||
metadata={"vector_db_id": "db2", "document_id": "doc2"},
|
||||
metadata={"vector_store_id": "db2", "document_id": "doc2"},
|
||||
stored_chunk_id="c2",
|
||||
chunk_metadata=chunk_metadata2,
|
||||
)
|
||||
|
|
@ -126,13 +126,13 @@ class TestRagQuery:
|
|||
]
|
||||
)
|
||||
|
||||
result = await rag_tool.query(content="test", vector_db_ids=vector_db_ids)
|
||||
result = await rag_tool.query(content="test", vector_store_ids=vector_store_ids)
|
||||
returned_chunks = result.metadata["chunks"]
|
||||
returned_scores = result.metadata["scores"]
|
||||
returned_doc_ids = result.metadata["document_ids"]
|
||||
returned_vector_db_ids = result.metadata["vector_db_ids"]
|
||||
returned_vector_store_ids = result.metadata["vector_store_ids"]
|
||||
|
||||
assert returned_chunks == ["chunk from db1", "chunk from db2"]
|
||||
assert returned_scores == (0.9, 0.8)
|
||||
assert returned_doc_ids == ["doc1", "doc2"]
|
||||
assert returned_vector_db_ids == ["db1", "db2"]
|
||||
assert returned_vector_store_ids == ["db1", "db2"]
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from llama_stack.apis.tools import RAGDocument
|
|||
from llama_stack.apis.vector_io import Chunk
|
||||
from llama_stack.providers.utils.memory.vector_store import (
|
||||
URL,
|
||||
VectorDBWithIndex,
|
||||
VectorStoreWithIndex,
|
||||
_validate_embedding,
|
||||
content_from_doc,
|
||||
make_overlapped_chunks,
|
||||
|
|
@ -206,15 +206,15 @@ class TestVectorStore:
|
|||
assert str(excinfo.value.__cause__) == "Cannot convert to string"
|
||||
|
||||
|
||||
class TestVectorDBWithIndex:
|
||||
class TestVectorStoreWithIndex:
|
||||
async def test_insert_chunks_without_embeddings(self):
|
||||
mock_vector_db = MagicMock()
|
||||
mock_vector_db.embedding_model = "test-model without embeddings"
|
||||
mock_vector_store = MagicMock()
|
||||
mock_vector_store.embedding_model = "test-model without embeddings"
|
||||
mock_index = AsyncMock()
|
||||
mock_inference_api = AsyncMock()
|
||||
|
||||
vector_db_with_index = VectorDBWithIndex(
|
||||
vector_db=mock_vector_db, index=mock_index, inference_api=mock_inference_api
|
||||
vector_store_with_index = VectorStoreWithIndex(
|
||||
vector_store=mock_vector_store, index=mock_index, inference_api=mock_inference_api
|
||||
)
|
||||
|
||||
chunks = [
|
||||
|
|
@ -227,7 +227,7 @@ class TestVectorDBWithIndex:
|
|||
OpenAIEmbeddingData(embedding=[0.4, 0.5, 0.6], index=1),
|
||||
]
|
||||
|
||||
await vector_db_with_index.insert_chunks(chunks)
|
||||
await vector_store_with_index.insert_chunks(chunks)
|
||||
|
||||
# Verify openai_embeddings was called with correct params
|
||||
mock_inference_api.openai_embeddings.assert_called_once()
|
||||
|
|
@ -243,14 +243,14 @@ class TestVectorDBWithIndex:
|
|||
assert np.array_equal(args[1], np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
|
||||
|
||||
async def test_insert_chunks_with_valid_embeddings(self):
|
||||
mock_vector_db = MagicMock()
|
||||
mock_vector_db.embedding_model = "test-model with embeddings"
|
||||
mock_vector_db.embedding_dimension = 3
|
||||
mock_vector_store = MagicMock()
|
||||
mock_vector_store.embedding_model = "test-model with embeddings"
|
||||
mock_vector_store.embedding_dimension = 3
|
||||
mock_index = AsyncMock()
|
||||
mock_inference_api = AsyncMock()
|
||||
|
||||
vector_db_with_index = VectorDBWithIndex(
|
||||
vector_db=mock_vector_db, index=mock_index, inference_api=mock_inference_api
|
||||
vector_store_with_index = VectorStoreWithIndex(
|
||||
vector_store=mock_vector_store, index=mock_index, inference_api=mock_inference_api
|
||||
)
|
||||
|
||||
chunks = [
|
||||
|
|
@ -258,7 +258,7 @@ class TestVectorDBWithIndex:
|
|||
Chunk(content="Test 2", embedding=[0.4, 0.5, 0.6], metadata={}),
|
||||
]
|
||||
|
||||
await vector_db_with_index.insert_chunks(chunks)
|
||||
await vector_store_with_index.insert_chunks(chunks)
|
||||
|
||||
mock_inference_api.openai_embeddings.assert_not_called()
|
||||
mock_index.add_chunks.assert_called_once()
|
||||
|
|
@ -267,14 +267,14 @@ class TestVectorDBWithIndex:
|
|||
assert np.array_equal(args[1], np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
|
||||
|
||||
async def test_insert_chunks_with_invalid_embeddings(self):
|
||||
mock_vector_db = MagicMock()
|
||||
mock_vector_db.embedding_dimension = 3
|
||||
mock_vector_db.embedding_model = "test-model with invalid embeddings"
|
||||
mock_vector_store = MagicMock()
|
||||
mock_vector_store.embedding_dimension = 3
|
||||
mock_vector_store.embedding_model = "test-model with invalid embeddings"
|
||||
mock_index = AsyncMock()
|
||||
mock_inference_api = AsyncMock()
|
||||
|
||||
vector_db_with_index = VectorDBWithIndex(
|
||||
vector_db=mock_vector_db, index=mock_index, inference_api=mock_inference_api
|
||||
vector_store_with_index = VectorStoreWithIndex(
|
||||
vector_store=mock_vector_store, index=mock_index, inference_api=mock_inference_api
|
||||
)
|
||||
|
||||
# Verify Chunk raises ValueError for invalid embedding type
|
||||
|
|
@ -283,7 +283,7 @@ class TestVectorDBWithIndex:
|
|||
|
||||
# Verify Chunk raises ValueError for invalid embedding type in insert_chunks (i.e., Chunk errors before insert_chunks is called)
|
||||
with pytest.raises(ValueError, match="Input should be a valid list"):
|
||||
await vector_db_with_index.insert_chunks(
|
||||
await vector_store_with_index.insert_chunks(
|
||||
[
|
||||
Chunk(content="Test 1", embedding=None, metadata={}),
|
||||
Chunk(content="Test 2", embedding="invalid_type", metadata={}),
|
||||
|
|
@ -292,7 +292,7 @@ class TestVectorDBWithIndex:
|
|||
|
||||
# Verify Chunk raises ValueError for invalid embedding element type in insert_chunks (i.e., Chunk errors before insert_chunks is called)
|
||||
with pytest.raises(ValueError, match=" Input should be a valid number, unable to parse string as a number "):
|
||||
await vector_db_with_index.insert_chunks(
|
||||
await vector_store_with_index.insert_chunks(
|
||||
Chunk(content="Test 1", embedding=[0.1, "string", 0.3], metadata={})
|
||||
)
|
||||
|
||||
|
|
@ -300,20 +300,20 @@ class TestVectorDBWithIndex:
|
|||
Chunk(content="Test 1", embedding=[0.1, 0.2, 0.3, 0.4], metadata={}),
|
||||
]
|
||||
with pytest.raises(ValueError, match="has dimension 4, expected 3"):
|
||||
await vector_db_with_index.insert_chunks(chunks_wrong_dim)
|
||||
await vector_store_with_index.insert_chunks(chunks_wrong_dim)
|
||||
|
||||
mock_inference_api.openai_embeddings.assert_not_called()
|
||||
mock_index.add_chunks.assert_not_called()
|
||||
|
||||
async def test_insert_chunks_with_partially_precomputed_embeddings(self):
|
||||
mock_vector_db = MagicMock()
|
||||
mock_vector_db.embedding_model = "test-model with partial embeddings"
|
||||
mock_vector_db.embedding_dimension = 3
|
||||
mock_vector_store = MagicMock()
|
||||
mock_vector_store.embedding_model = "test-model with partial embeddings"
|
||||
mock_vector_store.embedding_dimension = 3
|
||||
mock_index = AsyncMock()
|
||||
mock_inference_api = AsyncMock()
|
||||
|
||||
vector_db_with_index = VectorDBWithIndex(
|
||||
vector_db=mock_vector_db, index=mock_index, inference_api=mock_inference_api
|
||||
vector_store_with_index = VectorStoreWithIndex(
|
||||
vector_store=mock_vector_store, index=mock_index, inference_api=mock_inference_api
|
||||
)
|
||||
|
||||
chunks = [
|
||||
|
|
@ -327,7 +327,7 @@ class TestVectorDBWithIndex:
|
|||
OpenAIEmbeddingData(embedding=[0.3, 0.3, 0.3], index=1),
|
||||
]
|
||||
|
||||
await vector_db_with_index.insert_chunks(chunks)
|
||||
await vector_store_with_index.insert_chunks(chunks)
|
||||
|
||||
# Verify openai_embeddings was called with correct params
|
||||
mock_inference_api.openai_embeddings.assert_called_once()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue