mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
chore!: BREAKING CHANGE: vector_db_id -> vector_store_id (#3923)
# What does this PR do? ## Test Plan CI vector_io tests will fail until next client sync passed with https://github.com/llamastack/llama-stack-client-python/pull/286 checked out locally
This commit is contained in:
parent
b6954c9882
commit
b7dd3f5c56
29 changed files with 175 additions and 175 deletions
|
|
@ -367,7 +367,7 @@ def test_openai_vector_store_with_chunks(
|
|||
|
||||
# Insert chunks using the native LlamaStack API (since OpenAI API doesn't have direct chunk insertion)
|
||||
llama_client.vector_io.insert(
|
||||
vector_db_id=vector_store.id,
|
||||
vector_store_id=vector_store.id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
|
|
@ -434,7 +434,7 @@ def test_openai_vector_store_search_relevance(
|
|||
|
||||
# Insert chunks using native API
|
||||
llama_client.vector_io.insert(
|
||||
vector_db_id=vector_store.id,
|
||||
vector_store_id=vector_store.id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
|
|
@ -484,7 +484,7 @@ def test_openai_vector_store_search_with_ranking_options(
|
|||
|
||||
# Insert chunks
|
||||
llama_client.vector_io.insert(
|
||||
vector_db_id=vector_store.id,
|
||||
vector_store_id=vector_store.id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
|
|
@ -544,7 +544,7 @@ def test_openai_vector_store_search_with_high_score_filter(
|
|||
|
||||
# Insert chunks
|
||||
llama_client.vector_io.insert(
|
||||
vector_db_id=vector_store.id,
|
||||
vector_store_id=vector_store.id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
|
|
@ -610,7 +610,7 @@ def test_openai_vector_store_search_with_max_num_results(
|
|||
|
||||
# Insert chunks
|
||||
llama_client.vector_io.insert(
|
||||
vector_db_id=vector_store.id,
|
||||
vector_store_id=vector_store.id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
|
|
@ -1175,7 +1175,7 @@ def test_openai_vector_store_search_modes(
|
|||
)
|
||||
|
||||
client_with_models.vector_io.insert(
|
||||
vector_db_id=vector_store.id,
|
||||
vector_store_id=vector_store.id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
query = "Python programming language"
|
||||
|
|
|
|||
|
|
@ -123,12 +123,12 @@ def test_insert_chunks(
|
|||
actual_vector_store_id = create_response.id
|
||||
|
||||
client_with_empty_registry.vector_io.insert(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
chunks=sample_chunks,
|
||||
)
|
||||
|
||||
response = client_with_empty_registry.vector_io.query(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
query="What is the capital of France?",
|
||||
)
|
||||
assert response is not None
|
||||
|
|
@ -137,7 +137,7 @@ def test_insert_chunks(
|
|||
|
||||
query, expected_doc_id = test_case
|
||||
response = client_with_empty_registry.vector_io.query(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
query=query,
|
||||
)
|
||||
assert response is not None
|
||||
|
|
@ -174,13 +174,13 @@ def test_insert_chunks_with_precomputed_embeddings(
|
|||
]
|
||||
|
||||
client_with_empty_registry.vector_io.insert(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
chunks=chunks_with_embeddings,
|
||||
)
|
||||
|
||||
provider = [p.provider_id for p in client_with_empty_registry.providers.list() if p.api == "vector_io"][0]
|
||||
response = client_with_empty_registry.vector_io.query(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
query="precomputed embedding test",
|
||||
params=vector_io_provider_params_dict.get(provider, None),
|
||||
)
|
||||
|
|
@ -224,13 +224,13 @@ def test_query_returns_valid_object_when_identical_to_embedding_in_vdb(
|
|||
]
|
||||
|
||||
client_with_empty_registry.vector_io.insert(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
chunks=chunks_with_embeddings,
|
||||
)
|
||||
|
||||
provider = [p.provider_id for p in client_with_empty_registry.providers.list() if p.api == "vector_io"][0]
|
||||
response = client_with_empty_registry.vector_io.query(
|
||||
vector_db_id=actual_vector_store_id,
|
||||
vector_store_id=actual_vector_store_id,
|
||||
query="duplicate",
|
||||
params=vector_io_provider_params_dict.get(provider, None),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -23,14 +23,14 @@ class TestRagQuery:
|
|||
config=MagicMock(), vector_io_api=MagicMock(), inference_api=MagicMock(), files_api=MagicMock()
|
||||
)
|
||||
with pytest.raises(ValueError):
|
||||
await rag_tool.query(content=MagicMock(), vector_db_ids=[])
|
||||
await rag_tool.query(content=MagicMock(), vector_store_ids=[])
|
||||
|
||||
async def test_query_chunk_metadata_handling(self):
|
||||
rag_tool = MemoryToolRuntimeImpl(
|
||||
config=MagicMock(), vector_io_api=MagicMock(), inference_api=MagicMock(), files_api=MagicMock()
|
||||
)
|
||||
content = "test query content"
|
||||
vector_db_ids = ["db1"]
|
||||
vector_store_ids = ["db1"]
|
||||
|
||||
chunk_metadata = ChunkMetadata(
|
||||
document_id="doc1",
|
||||
|
|
@ -55,7 +55,7 @@ class TestRagQuery:
|
|||
query_response = QueryChunksResponse(chunks=[chunk], scores=[1.0])
|
||||
|
||||
rag_tool.vector_io_api.query_chunks = AsyncMock(return_value=query_response)
|
||||
result = await rag_tool.query(content=content, vector_db_ids=vector_db_ids)
|
||||
result = await rag_tool.query(content=content, vector_store_ids=vector_store_ids)
|
||||
|
||||
assert result is not None
|
||||
expected_metadata_string = (
|
||||
|
|
@ -90,7 +90,7 @@ class TestRagQuery:
|
|||
files_api=MagicMock(),
|
||||
)
|
||||
|
||||
vector_db_ids = ["db1", "db2"]
|
||||
vector_store_ids = ["db1", "db2"]
|
||||
|
||||
# Fake chunks from each DB
|
||||
chunk_metadata1 = ChunkMetadata(
|
||||
|
|
@ -101,7 +101,7 @@ class TestRagQuery:
|
|||
)
|
||||
chunk1 = Chunk(
|
||||
content="chunk from db1",
|
||||
metadata={"vector_db_id": "db1", "document_id": "doc1"},
|
||||
metadata={"vector_store_id": "db1", "document_id": "doc1"},
|
||||
stored_chunk_id="c1",
|
||||
chunk_metadata=chunk_metadata1,
|
||||
)
|
||||
|
|
@ -114,7 +114,7 @@ class TestRagQuery:
|
|||
)
|
||||
chunk2 = Chunk(
|
||||
content="chunk from db2",
|
||||
metadata={"vector_db_id": "db2", "document_id": "doc2"},
|
||||
metadata={"vector_store_id": "db2", "document_id": "doc2"},
|
||||
stored_chunk_id="c2",
|
||||
chunk_metadata=chunk_metadata2,
|
||||
)
|
||||
|
|
@ -126,13 +126,13 @@ class TestRagQuery:
|
|||
]
|
||||
)
|
||||
|
||||
result = await rag_tool.query(content="test", vector_db_ids=vector_db_ids)
|
||||
result = await rag_tool.query(content="test", vector_store_ids=vector_store_ids)
|
||||
returned_chunks = result.metadata["chunks"]
|
||||
returned_scores = result.metadata["scores"]
|
||||
returned_doc_ids = result.metadata["document_ids"]
|
||||
returned_vector_db_ids = result.metadata["vector_db_ids"]
|
||||
returned_vector_store_ids = result.metadata["vector_store_ids"]
|
||||
|
||||
assert returned_chunks == ["chunk from db1", "chunk from db2"]
|
||||
assert returned_scores == (0.9, 0.8)
|
||||
assert returned_doc_ids == ["doc1", "doc2"]
|
||||
assert returned_vector_db_ids == ["db1", "db2"]
|
||||
assert returned_vector_store_ids == ["db1", "db2"]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue