mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
fix(vector-io): handle missing document_id in insert_chunks
Fixed KeyError when chunks don't have document_id in metadata or chunk_metadata. Updated logging to safely extract document_id using getattr and RAG memory to handle different document_id locations. Added test for missing document_id scenarios. Fixes issue #3494 where /v1/vector-io/insert would crash with KeyError.
This commit is contained in:
parent
a50b63906c
commit
a14f79a362
3 changed files with 29 additions and 3 deletions
|
@ -113,6 +113,25 @@ async def test_insert_chunks_missing_db_raises(vector_io_adapter):
|
|||
await vector_io_adapter.insert_chunks("db_not_exist", [])
|
||||
|
||||
|
||||
async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
|
||||
"""Ensure no KeyError when document_id is missing or in different places."""
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata
|
||||
|
||||
fake_index = AsyncMock()
|
||||
vector_io_adapter.cache["db1"] = fake_index
|
||||
|
||||
# Various document_id scenarios that shouldn't crash
|
||||
chunks = [
|
||||
Chunk(content="has doc_id in metadata", metadata={"document_id": "doc-1"}),
|
||||
Chunk(content="no doc_id anywhere", metadata={"source": "test"}),
|
||||
Chunk(content="doc_id in chunk_metadata", chunk_metadata=ChunkMetadata(document_id="doc-3")),
|
||||
]
|
||||
|
||||
# Should work without KeyError
|
||||
await vector_io_adapter.insert_chunks("db1", chunks)
|
||||
fake_index.insert_chunks.assert_awaited_once()
|
||||
|
||||
|
||||
async def test_query_chunks_calls_underlying_index_and_returns(vector_io_adapter):
|
||||
expected = QueryChunksResponse(chunks=[Chunk(content="c1")], scores=[0.1])
|
||||
fake_index = AsyncMock(query_chunks=AsyncMock(return_value=expected))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue