mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
fix(vector-io): handle missing document_id in insert_chunks
Fixed KeyError when chunks don't have document_id in metadata or chunk_metadata. Updated logging to safely extract document_id using getattr and RAG memory to handle different document_id locations. Added test for missing document_id scenarios. Fixes issue #3494 where /v1/vector-io/insert would crash with KeyError.
This commit is contained in:
parent
a50b63906c
commit
a14f79a362
3 changed files with 29 additions and 3 deletions
|
@ -279,7 +279,10 @@ class MemoryToolRuntimeImpl(ToolGroupsProtocolPrivate, ToolRuntime, RAGToolRunti
|
|||
return RAGQueryResult(
|
||||
content=picked,
|
||||
metadata={
|
||||
"document_ids": [c.metadata["document_id"] for c in chunks[: len(picked)]],
|
||||
"document_ids": [
|
||||
c.metadata.get("document_id") or (c.chunk_metadata.document_id if c.chunk_metadata else None)
|
||||
for c in chunks[: len(picked)]
|
||||
],
|
||||
"chunks": [c.content for c in chunks[: len(picked)]],
|
||||
"scores": scores[: len(picked)],
|
||||
"vector_db_ids": [c.metadata["vector_db_id"] for c in chunks[: len(picked)]],
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue