mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
Added coverage for the case where a vector DB was provided but no chunks were retrieved.
This commit is contained in:
parent
127b62dee0
commit
1871cb9a71
1 changed files with 4 additions and 2 deletions
|
@ -104,7 +104,9 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
|
|||
query_config: Optional[RAGQueryConfig] = None,
|
||||
) -> RAGQueryResult:
|
||||
if not vector_db_ids:
|
||||
raise ValueError("No vector DBs were provided to the RAG tool. Please provide at least one vector DB ID.")
|
||||
raise ValueError(
|
||||
"No vector DBs were provided to the knowledge search tool. Please provide at least one vector DB ID."
|
||||
)
|
||||
|
||||
query_config = query_config or RAGQueryConfig()
|
||||
query = await generate_rag_query(
|
||||
|
@ -127,7 +129,7 @@ class MemoryToolRuntimeImpl(ToolsProtocolPrivate, ToolRuntime, RAGToolRuntime):
|
|||
scores = [s for r in results for s in r.scores]
|
||||
|
||||
if not chunks:
|
||||
return RAGQueryResult(content=None)
|
||||
raise ValueError("The knowledge search tool did not find any information relevant to the query.")
|
||||
|
||||
# sort by score
|
||||
chunks, scores = zip(*sorted(zip(chunks, scores, strict=False), key=lambda x: x[1], reverse=True), strict=False) # type: ignore
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue