From 1871cb9a71bbe3219803a754f01baa9d1174bf33 Mon Sep 17 00:00:00 2001 From: ilya-kolchinsky Date: Wed, 9 Apr 2025 19:53:59 +0200 Subject: [PATCH] Added coverage for the case where a vector DB was provided but no chunks were retrieved. --- llama_stack/providers/inline/tool_runtime/rag/memory.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/llama_stack/providers/inline/tool_runtime/rag/memory.py b/llama_stack/providers/inline/tool_runtime/rag/memory.py index 1f4b6d321..b1204e651 100644 --- a/llama_stack/providers/inline/tool_runtime/rag/memory.py +++ b/llama_stack/providers/inline/tool_runtime/rag/memory.py @@ -104,7 +104,9 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime): query_config: Optional[RAGQueryConfig] = None, ) -> RAGQueryResult: if not vector_db_ids: - raise ValueError("No vector DBs were provided to the RAG tool. Please provide at least one vector DB ID.") + raise ValueError( + "No vector DBs were provided to the knowledge search tool. Please provide at least one vector DB ID." + ) query_config = query_config or RAGQueryConfig() query = await generate_rag_query( @@ -127,7 +129,7 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime): scores = [s for r in results for s in r.scores] if not chunks: - return RAGQueryResult(content=None) + raise ValueError("The knowledge search tool did not find any information relevant to the query.") # sort by score chunks, scores = zip(*sorted(zip(chunks, scores, strict=False), key=lambda x: x[1], reverse=True), strict=False) # type: ignore