mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-18 02:42:31 +00:00
feat: Adding ChunkMetadata
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
6fde601765
commit
f90fce218e
13 changed files with 416 additions and 206 deletions
|
@ -9,7 +9,7 @@ import random
|
|||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata
|
||||
|
||||
EMBEDDING_DIMENSION = 384
|
||||
|
||||
|
@ -33,6 +33,20 @@ def sample_chunks():
|
|||
for j in range(k)
|
||||
for i in range(n)
|
||||
]
|
||||
sample.extend(
|
||||
[
|
||||
Chunk(
|
||||
content=f"Sentence {i} from document {j + k}",
|
||||
chunk_metadata=ChunkMetadata(
|
||||
document_id=f"document-{j + k}",
|
||||
chunk_id=f"document-{j}-chunk-{i}",
|
||||
source=f"example source-{j + k}-{i}",
|
||||
),
|
||||
)
|
||||
for j in range(k)
|
||||
for i in range(n)
|
||||
]
|
||||
)
|
||||
return sample
|
||||
|
||||
|
||||
|
|
53
tests/unit/providers/vector_io/test_chunk_utils.py
Normal file
53
tests/unit/providers/vector_io/test_chunk_utils.py
Normal file
|
@ -0,0 +1,53 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata
|
||||
from llama_stack.providers.utils.vector_io.chunk_utils import extract_or_generate_chunk_id, generate_chunk_id
|
||||
|
||||
# This test is a unit test for the chunk_utils.py helpers. This should only contain
|
||||
# tests which are specific to this file. More general (API-level) tests should be placed in
|
||||
# tests/integration/vector_io/
|
||||
#
|
||||
# How to run this test:
|
||||
#
|
||||
# pytest tests/unit/providers/vector_io/test_chunk_utils.py \
|
||||
# -v -s --tb=short --disable-warnings --asyncio-mode=auto
|
||||
|
||||
|
||||
def test_generate_chunk_id():
|
||||
chunks = [
|
||||
Chunk(content="test", metadata={"document_id": "doc-1"}),
|
||||
Chunk(content="test ", metadata={"document_id": "doc-1"}),
|
||||
Chunk(content="test 3", metadata={"document_id": "doc-1"}),
|
||||
]
|
||||
|
||||
chunk_ids = sorted([generate_chunk_id(chunk.metadata["document_id"], chunk.content) for chunk in chunks])
|
||||
assert chunk_ids == [
|
||||
"177a1368-f6a8-0c50-6e92-18677f2c3de3",
|
||||
"bc744db3-1b25-0a9c-cdff-b6ba3df73c36",
|
||||
"f68df25d-d9aa-ab4d-5684-64a233add20d",
|
||||
]
|
||||
|
||||
|
||||
def test_extract_or_generate_chunk_id():
|
||||
# Test with existing chunk ID
|
||||
chunk_with_id = Chunk(content="test", metadata={"document_id": "existing-id"})
|
||||
assert extract_or_generate_chunk_id(chunk_with_id) == "84ededcc-b80b-a83e-1a20-ca6515a11350"
|
||||
|
||||
# Test with document ID in metadata
|
||||
chunk_with_doc_id = Chunk(content="test", metadata={"document_id": "doc-1"})
|
||||
assert extract_or_generate_chunk_id(chunk_with_doc_id) == generate_chunk_id("doc-1", "test")
|
||||
|
||||
# Test chunks with ChunkMetadata
|
||||
chunk_with_metadata = Chunk(
|
||||
content="test", metadata={"document_id": "existing-id"}, chunk_metadata=ChunkMetadata(chunk_id="chunk-id-1")
|
||||
)
|
||||
assert extract_or_generate_chunk_id(chunk_with_metadata) == "chunk-id-1"
|
||||
|
||||
# Test with no ID or document ID
|
||||
chunk_without_id = Chunk(content="test")
|
||||
generated_id = extract_or_generate_chunk_id(chunk_without_id)
|
||||
assert isinstance(generated_id, str) and len(generated_id) == 36 # Should be a valid UUID
|
|
@ -81,7 +81,7 @@ __QUERY = "Sample query"
|
|||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("max_query_chunks, expected_chunks", [(2, 2), (100, 30)])
|
||||
@pytest.mark.parametrize("max_query_chunks, expected_chunks", [(2, 2), (100, 60)])
|
||||
async def test_qdrant_adapter_returns_expected_chunks(
|
||||
qdrant_adapter: QdrantVectorIOAdapter,
|
||||
vector_db_id,
|
||||
|
|
|
@ -15,7 +15,6 @@ from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import (
|
|||
SQLiteVecIndex,
|
||||
SQLiteVecVectorIOAdapter,
|
||||
_create_sqlite_connection,
|
||||
generate_chunk_id,
|
||||
)
|
||||
|
||||
# This test is a unit test for the SQLiteVecVectorIOAdapter class. This should only contain
|
||||
|
@ -150,21 +149,6 @@ async def sqlite_vec_adapter(sqlite_connection):
|
|||
await adapter.shutdown()
|
||||
|
||||
|
||||
def test_generate_chunk_id():
|
||||
chunks = [
|
||||
Chunk(content="test", metadata={"document_id": "doc-1"}),
|
||||
Chunk(content="test ", metadata={"document_id": "doc-1"}),
|
||||
Chunk(content="test 3", metadata={"document_id": "doc-1"}),
|
||||
]
|
||||
|
||||
chunk_ids = sorted([generate_chunk_id(chunk.metadata["document_id"], chunk.content) for chunk in chunks])
|
||||
assert chunk_ids == [
|
||||
"177a1368-f6a8-0c50-6e92-18677f2c3de3",
|
||||
"bc744db3-1b25-0a9c-cdff-b6ba3df73c36",
|
||||
"f68df25d-d9aa-ab4d-5684-64a233add20d",
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_query_chunks_hybrid_no_keyword_matches(sqlite_vec_index, sample_chunks, sample_embeddings):
|
||||
"""Test hybrid search when keyword search returns no matches - should still return vector results."""
|
||||
|
@ -339,7 +323,7 @@ async def test_query_chunks_hybrid_mixed_results(sqlite_vec_index, sample_chunks
|
|||
# Verify scores are in descending order
|
||||
assert all(response.scores[i] >= response.scores[i + 1] for i in range(len(response.scores) - 1))
|
||||
# Verify we get results from both the vector-similar document and keyword-matched document
|
||||
doc_ids = {chunk.metadata["document_id"] for chunk in response.chunks}
|
||||
doc_ids = {chunk.metadata.get("document_id") or chunk.chunk_metadata.document_id for chunk in response.chunks}
|
||||
assert "document-0" in doc_ids # From vector search
|
||||
assert "document-2" in doc_ids # From keyword search
|
||||
|
||||
|
@ -364,7 +348,11 @@ async def test_query_chunks_hybrid_weighted_reranker_parametrization(
|
|||
reranker_params={"alpha": 1.0},
|
||||
)
|
||||
assert len(response.chunks) > 0 # Should get at least one result
|
||||
assert any("document-0" in chunk.metadata["document_id"] for chunk in response.chunks)
|
||||
assert any(
|
||||
"document-0"
|
||||
in (chunk.metadata.get("document_id") or (chunk.chunk_metadata.document_id if chunk.chunk_metadata else ""))
|
||||
for chunk in response.chunks
|
||||
)
|
||||
|
||||
# alpha=0.0 (should behave like pure vector)
|
||||
response = await sqlite_vec_index.query_hybrid(
|
||||
|
@ -389,7 +377,11 @@ async def test_query_chunks_hybrid_weighted_reranker_parametrization(
|
|||
reranker_params={"alpha": 0.7},
|
||||
)
|
||||
assert len(response.chunks) > 0 # Should get at least one result
|
||||
assert any("document-0" in chunk.metadata["document_id"] for chunk in response.chunks)
|
||||
assert any(
|
||||
"document-0"
|
||||
in (chunk.metadata.get("document_id") or (chunk.chunk_metadata.document_id if chunk.chunk_metadata else ""))
|
||||
for chunk in response.chunks
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue