diff --git a/docs/source/providers/agents/index.md b/docs/source/providers/agents/index.md index a2c48d4b9..046db6bff 100644 --- a/docs/source/providers/agents/index.md +++ b/docs/source/providers/agents/index.md @@ -4,12 +4,12 @@ Agents API for creating and interacting with agentic systems. - Main functionalities provided by this API: - - Create agents with specific instructions and ability to use tools. - - Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn". - - Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). - - Agents can be provided with various shields (see the Safety API for more details). - - Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details. +Main functionalities provided by this API: +- Create agents with specific instructions and ability to use tools. +- Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn". +- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). +- Agents can be provided with various shields (see the Safety API for more details). +- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details. This section contains documentation for all available providers for the **agents** API. diff --git a/docs/source/providers/batches/index.md b/docs/source/providers/batches/index.md index 2a39a626c..f427a599b 100644 --- a/docs/source/providers/batches/index.md +++ b/docs/source/providers/batches/index.md @@ -4,11 +4,11 @@ Protocol for batch processing API operations. - The Batches API enables efficient processing of multiple requests in a single operation, - particularly useful for processing large datasets, batch evaluation workflows, and - cost-effective inference at scale. +The Batches API enables efficient processing of multiple requests in a single operation, +particularly useful for processing large datasets, batch evaluation workflows, and +cost-effective inference at scale. - Note: This API is currently under active development and may undergo changes. +Note: This API is currently under active development and may undergo changes. This section contains documentation for all available providers for the **batches** API. diff --git a/docs/source/providers/inference/index.md b/docs/source/providers/inference/index.md index b6d215474..291e8e525 100644 --- a/docs/source/providers/inference/index.md +++ b/docs/source/providers/inference/index.md @@ -4,9 +4,9 @@ Llama Stack Inference API for generating completions, chat completions, and embeddings. - This API provides the raw interface to the underlying models. Two kinds of models are supported: - - LLM models: these models generate "raw" and "chat" (conversational) completions. - - Embedding models: these models generate embeddings to be used for semantic search. +This API provides the raw interface to the underlying models. Two kinds of models are supported: +- LLM models: these models generate "raw" and "chat" (conversational) completions. +- Embedding models: these models generate embeddings to be used for semantic search. This section contains documentation for all available providers for the **inference** API. diff --git a/llama_stack/providers/remote/vector_io/chroma/chroma.py b/llama_stack/providers/remote/vector_io/chroma/chroma.py index 9c7a7732a..98332b37e 100644 --- a/llama_stack/providers/remote/vector_io/chroma/chroma.py +++ b/llama_stack/providers/remote/vector_io/chroma/chroma.py @@ -4,6 +4,7 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. import asyncio +import heapq import json import logging from typing import Any @@ -30,6 +31,7 @@ from llama_stack.providers.utils.memory.vector_store import ( EmbeddingIndex, VectorDBWithIndex, ) +from llama_stack.providers.utils.vector_io.vector_utils import Reranker from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig @@ -161,7 +163,55 @@ class ChromaIndex(EmbeddingIndex): reranker_type: str, reranker_params: dict[str, Any] | None = None, ) -> QueryChunksResponse: - raise NotImplementedError("Hybrid search is not supported in Chroma") + """ + Hybrid search combining vector similarity and keyword search using configurable reranking. + Args: + embedding: The query embedding vector + query_string: The text query for keyword search + k: Number of results to return + score_threshold: Minimum similarity score threshold + reranker_type: Type of reranker to use ("rrf" or "weighted") + reranker_params: Parameters for the reranker + Returns: + QueryChunksResponse with combined results + """ + if reranker_params is None: + reranker_params = {} + + # Get results from both search methods + vector_response = await self.query_vector(embedding, k, score_threshold) + keyword_response = await self.query_keyword(query_string, k, score_threshold) + + # Convert responses to score dictionaries using chunk_id + vector_scores = { + chunk.chunk_id: score for chunk, score in zip(vector_response.chunks, vector_response.scores, strict=False) + } + keyword_scores = { + chunk.chunk_id: score + for chunk, score in zip(keyword_response.chunks, keyword_response.scores, strict=False) + } + + # Combine scores using the reranking utility + combined_scores = Reranker.combine_search_results(vector_scores, keyword_scores, reranker_type, reranker_params) + + # Efficient top-k selection because it only tracks the k best candidates it's seen so far + top_k_items = heapq.nlargest(k, combined_scores.items(), key=lambda x: x[1]) + + # Filter by score threshold + filtered_items = [(doc_id, score) for doc_id, score in top_k_items if score >= score_threshold] + + # Create a map of chunk_id to chunk for both responses + chunk_map = {c.chunk_id: c for c in vector_response.chunks + keyword_response.chunks} + + # Use the map to look up chunks by their IDs + chunks = [] + scores = [] + for doc_id, score in filtered_items: + if doc_id in chunk_map: + chunks.append(chunk_map[doc_id]) + scores.append(score) + + return QueryChunksResponse(chunks=chunks, scores=scores) class ChromaVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPrivate): diff --git a/llama_stack/providers/utils/vector_io/vector_utils.py b/llama_stack/providers/utils/vector_io/vector_utils.py index f2888043e..e6dbcb2b5 100644 --- a/llama_stack/providers/utils/vector_io/vector_utils.py +++ b/llama_stack/providers/utils/vector_io/vector_utils.py @@ -37,3 +37,114 @@ def sanitize_collection_name(name: str, weaviate_format=False) -> str: else: s = proper_case(re.sub(r"[^a-zA-Z0-9]", "", name)) return s + + +class Reranker: + @staticmethod + def _normalize_scores(scores: dict[str, float]) -> dict[str, float]: + """ + Normalize scores to 0-1 range using min-max normalization. + Args: + scores: dictionary of scores with document IDs as keys and scores as values + Returns: + Normalized scores with document IDs as keys and normalized scores as values + """ + if not scores: + return {} + min_score, max_score = min(scores.values()), max(scores.values()) + score_range = max_score - min_score + if score_range > 0: + return {doc_id: (score - min_score) / score_range for doc_id, score in scores.items()} + return dict.fromkeys(scores, 1.0) + + @staticmethod + def weighted_rerank( + vector_scores: dict[str, float], + keyword_scores: dict[str, float], + alpha: float = 0.5, + ) -> dict[str, float]: + """ + Rerank via weighted average of scores. + Args: + vector_scores: scores from vector search + keyword_scores: scores from keyword search + alpha: weight factor between 0 and 1 (default: 0.5) + 0 = keyword only, 1 = vector only, 0.5 = equal weight + Returns: + All unique document IDs with weighted combined scores + """ + all_ids = set(vector_scores.keys()) | set(keyword_scores.keys()) + normalized_vector_scores = Reranker._normalize_scores(vector_scores) + normalized_keyword_scores = Reranker._normalize_scores(keyword_scores) + + # Weighted formula: score = (1-alpha) * keyword_score + alpha * vector_score + # alpha=0 means keyword only, alpha=1 means vector only + return { + doc_id: ((1 - alpha) * normalized_keyword_scores.get(doc_id, 0.0)) + + (alpha * normalized_vector_scores.get(doc_id, 0.0)) + for doc_id in all_ids + } + + @staticmethod + def rrf_rerank( + vector_scores: dict[str, float], + keyword_scores: dict[str, float], + impact_factor: float = 60.0, + ) -> dict[str, float]: + """ + Rerank via Reciprocal Rank Fusion. + Args: + vector_scores: scores from vector search + keyword_scores: scores from keyword search + impact_factor: impact factor for RRF (default: 60.0) + Returns: + All unique document IDs with RRF combined scores + """ + + # Convert scores to ranks + vector_ranks = { + doc_id: i + 1 + for i, (doc_id, _) in enumerate(sorted(vector_scores.items(), key=lambda x: x[1], reverse=True)) + } + keyword_ranks = { + doc_id: i + 1 + for i, (doc_id, _) in enumerate(sorted(keyword_scores.items(), key=lambda x: x[1], reverse=True)) + } + + all_ids = set(vector_scores.keys()) | set(keyword_scores.keys()) + rrf_scores = {} + for doc_id in all_ids: + vector_rank = vector_ranks.get(doc_id, float("inf")) + keyword_rank = keyword_ranks.get(doc_id, float("inf")) + + # RRF formula: score = 1/(k + r) where k is impact_factor (default: 60.0) and r is the rank + rrf_scores[doc_id] = (1.0 / (impact_factor + vector_rank)) + (1.0 / (impact_factor + keyword_rank)) + return rrf_scores + + @staticmethod + def combine_search_results( + vector_scores: dict[str, float], + keyword_scores: dict[str, float], + reranker_type: str = "rrf", + reranker_params: dict[str, float] | None = None, + ) -> dict[str, float]: + """ + Combine vector and keyword search results using specified reranking strategy. + Args: + vector_scores: scores from vector search + keyword_scores: scores from keyword search + reranker_type: type of reranker to use (default: RERANKER_TYPE_RRF) + reranker_params: parameters for the reranker + Returns: + All unique document IDs with combined scores + """ + if reranker_params is None: + reranker_params = {} + + if reranker_type == "weighted": + alpha = reranker_params.get("alpha", 0.5) + return Reranker.weighted_rerank(vector_scores, keyword_scores, alpha) + else: + # Default to RRF for None, RRF, or any unknown types + impact_factor = reranker_params.get("impact_factor", 60.0) + return Reranker.rrf_rerank(vector_scores, keyword_scores, impact_factor)