mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-29 15:04:44 +00:00
feat (RAG): Implement configurable search mode in RAGQueryConfig
Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com>
This commit is contained in:
parent
85b5f3172b
commit
e2a7022d3c
14 changed files with 210 additions and 43 deletions
|
|
@ -55,7 +55,9 @@ class WeaviateIndex(EmbeddingIndex):
|
|||
# TODO: make this async friendly
|
||||
collection.data.insert_many(data_objects)
|
||||
|
||||
async def query(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
|
||||
async def query(
|
||||
self, embedding: NDArray, query_string: Optional[str], k: int, score_threshold: float, mode: str
|
||||
) -> QueryChunksResponse:
|
||||
collection = self.client.collections.get(self.collection_name)
|
||||
|
||||
results = collection.query.near_vector(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue