mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 04:50:39 +00:00
feat: Add openAI compatible APIs to Qdrant (#2465)
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 5s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 15s
Test Llama Stack Build / generate-matrix (push) Successful in 9s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 15s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 19s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 13s
Test Llama Stack Build / build-single-provider (push) Failing after 13s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 15s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 22s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 14s
Integration Tests (Replay) / discover-tests (push) Successful in 24s
Vector IO Integration Tests / test-matrix (3.13, remote::qdrant) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.12, remote::weaviate) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.13, remote::weaviate) (push) Failing after 15s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 18s
Update ReadTheDocs / update-readthedocs (push) Failing after 12s
Unit Tests / unit-tests (3.12) (push) Failing after 11s
Vector IO Integration Tests / test-matrix (3.12, remote::qdrant) (push) Failing after 16s
Python Package Build Test / build (3.12) (push) Failing after 20s
Python Package Build Test / build (3.13) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 18s
Test External API and Providers / test-external (venv) (push) Failing after 18s
Unit Tests / unit-tests (3.13) (push) Failing after 19s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 42s
Integration Tests (Replay) / run-replay-mode-tests (push) Failing after 22s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 1m12s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 1m15s
Test Llama Stack Build / build (push) Failing after 32s
Pre-commit / pre-commit (push) Successful in 2m39s
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 5s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 15s
Test Llama Stack Build / generate-matrix (push) Successful in 9s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 15s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 19s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 13s
Test Llama Stack Build / build-single-provider (push) Failing after 13s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 15s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 22s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 14s
Integration Tests (Replay) / discover-tests (push) Successful in 24s
Vector IO Integration Tests / test-matrix (3.13, remote::qdrant) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.12, remote::weaviate) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.13, remote::weaviate) (push) Failing after 15s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 18s
Update ReadTheDocs / update-readthedocs (push) Failing after 12s
Unit Tests / unit-tests (3.12) (push) Failing after 11s
Vector IO Integration Tests / test-matrix (3.12, remote::qdrant) (push) Failing after 16s
Python Package Build Test / build (3.12) (push) Failing after 20s
Python Package Build Test / build (3.13) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 18s
Test External API and Providers / test-external (venv) (push) Failing after 18s
Unit Tests / unit-tests (3.13) (push) Failing after 19s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 42s
Integration Tests (Replay) / run-replay-mode-tests (push) Failing after 22s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 1m12s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 1m15s
Test Llama Stack Build / build (push) Failing after 32s
Pre-commit / pre-commit (push) Successful in 2m39s
# What does this PR do? Adds support to Vector store Open AI APIs in Qdrant. <!-- If resolving an issue, uncomment and update the line below --> Closes #2463 ## Test Plan <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com> Co-authored-by: ehhuang <ehhuang@users.noreply.github.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com>
This commit is contained in:
parent
194abe7734
commit
1f0766308d
13 changed files with 205 additions and 120 deletions
|
@ -24,7 +24,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
vector-io-provider: ["inline::faiss", "inline::sqlite-vec", "inline::milvus", "remote::chromadb", "remote::pgvector", "remote::weaviate"]
|
vector-io-provider: ["inline::faiss", "inline::sqlite-vec", "inline::milvus", "remote::chromadb", "remote::pgvector", "remote::weaviate", "remote::qdrant"]
|
||||||
python-version: ["3.12", "3.13"]
|
python-version: ["3.12", "3.13"]
|
||||||
fail-fast: false # we want to run all tests regardless of failure
|
fail-fast: false # we want to run all tests regardless of failure
|
||||||
|
|
||||||
|
@ -86,6 +86,29 @@ jobs:
|
||||||
PGPASSWORD=llamastack psql -h localhost -U llamastack -d llamastack \
|
PGPASSWORD=llamastack psql -h localhost -U llamastack -d llamastack \
|
||||||
-c "CREATE EXTENSION IF NOT EXISTS vector;"
|
-c "CREATE EXTENSION IF NOT EXISTS vector;"
|
||||||
|
|
||||||
|
- name: Setup Qdrant
|
||||||
|
if: matrix.vector-io-provider == 'remote::qdrant'
|
||||||
|
run: |
|
||||||
|
docker run --rm -d --pull always \
|
||||||
|
--name qdrant \
|
||||||
|
-p 6333:6333 \
|
||||||
|
qdrant/qdrant
|
||||||
|
|
||||||
|
- name: Wait for Qdrant to be ready
|
||||||
|
if: matrix.vector-io-provider == 'remote::qdrant'
|
||||||
|
run: |
|
||||||
|
echo "Waiting for Qdrant to be ready..."
|
||||||
|
for i in {1..30}; do
|
||||||
|
if curl -s http://localhost:6333/collections | grep -q '"status":"ok"'; then
|
||||||
|
echo "Qdrant is ready!"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
echo "Qdrant failed to start"
|
||||||
|
docker logs qdrant
|
||||||
|
exit 1
|
||||||
|
|
||||||
- name: Wait for ChromaDB to be ready
|
- name: Wait for ChromaDB to be ready
|
||||||
if: matrix.vector-io-provider == 'remote::chromadb'
|
if: matrix.vector-io-provider == 'remote::chromadb'
|
||||||
run: |
|
run: |
|
||||||
|
@ -136,9 +159,10 @@ jobs:
|
||||||
PGVECTOR_DB: ${{ matrix.vector-io-provider == 'remote::pgvector' && 'llamastack' || '' }}
|
PGVECTOR_DB: ${{ matrix.vector-io-provider == 'remote::pgvector' && 'llamastack' || '' }}
|
||||||
PGVECTOR_USER: ${{ matrix.vector-io-provider == 'remote::pgvector' && 'llamastack' || '' }}
|
PGVECTOR_USER: ${{ matrix.vector-io-provider == 'remote::pgvector' && 'llamastack' || '' }}
|
||||||
PGVECTOR_PASSWORD: ${{ matrix.vector-io-provider == 'remote::pgvector' && 'llamastack' || '' }}
|
PGVECTOR_PASSWORD: ${{ matrix.vector-io-provider == 'remote::pgvector' && 'llamastack' || '' }}
|
||||||
|
ENABLE_QDRANT: ${{ matrix.vector-io-provider == 'remote::qdrant' && 'true' || '' }}
|
||||||
|
QDRANT_URL: ${{ matrix.vector-io-provider == 'remote::qdrant' && 'http://localhost:6333' || '' }}
|
||||||
ENABLE_WEAVIATE: ${{ matrix.vector-io-provider == 'remote::weaviate' && 'true' || '' }}
|
ENABLE_WEAVIATE: ${{ matrix.vector-io-provider == 'remote::weaviate' && 'true' || '' }}
|
||||||
WEAVIATE_CLUSTER_URL: ${{ matrix.vector-io-provider == 'remote::weaviate' && 'localhost:8080' || '' }}
|
WEAVIATE_CLUSTER_URL: ${{ matrix.vector-io-provider == 'remote::weaviate' && 'localhost:8080' || '' }}
|
||||||
|
|
||||||
run: |
|
run: |
|
||||||
uv run pytest -sv --stack-config="inference=inline::sentence-transformers,vector_io=${{ matrix.vector-io-provider }}" \
|
uv run pytest -sv --stack-config="inference=inline::sentence-transformers,vector_io=${{ matrix.vector-io-provider }}" \
|
||||||
tests/integration/vector_io \
|
tests/integration/vector_io \
|
||||||
|
@ -160,6 +184,11 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
docker logs chromadb > chromadb.log
|
docker logs chromadb > chromadb.log
|
||||||
|
|
||||||
|
- name: Write Qdrant logs to file
|
||||||
|
if: ${{ always() && matrix.vector-io-provider == 'remote::qdrant' }}
|
||||||
|
run: |
|
||||||
|
docker logs qdrant > qdrant.log
|
||||||
|
|
||||||
- name: Upload all logs to artifacts
|
- name: Upload all logs to artifacts
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||||
|
|
|
@ -51,11 +51,15 @@ See the [Qdrant documentation](https://qdrant.tech/documentation/) for more deta
|
||||||
| Field | Type | Required | Default | Description |
|
| Field | Type | Required | Default | Description |
|
||||||
|-------|------|----------|---------|-------------|
|
|-------|------|----------|---------|-------------|
|
||||||
| `path` | `<class 'str'>` | No | PydanticUndefined | |
|
| `path` | `<class 'str'>` | No | PydanticUndefined | |
|
||||||
|
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
path: ${env.QDRANT_PATH:=~/.llama/~/.llama/dummy}/qdrant.db
|
path: ${env.QDRANT_PATH:=~/.llama/~/.llama/dummy}/qdrant.db
|
||||||
|
kvstore:
|
||||||
|
type: sqlite
|
||||||
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/qdrant_registry.db
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -20,11 +20,15 @@ Please refer to the inline provider documentation.
|
||||||
| `prefix` | `str \| None` | No | | |
|
| `prefix` | `str \| None` | No | | |
|
||||||
| `timeout` | `int \| None` | No | | |
|
| `timeout` | `int \| None` | No | | |
|
||||||
| `host` | `str \| None` | No | | |
|
| `host` | `str \| None` | No | | |
|
||||||
|
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig` | No | sqlite | |
|
||||||
|
|
||||||
## Sample Configuration
|
## Sample Configuration
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
api_key: ${env.QDRANT_API_KEY}
|
api_key: ${env.QDRANT_API_KEY:=}
|
||||||
|
kvstore:
|
||||||
|
type: sqlite
|
||||||
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/qdrant_registry.db
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -4,14 +4,18 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from llama_stack.providers.datatypes import Api, ProviderSpec
|
from typing import Any
|
||||||
|
|
||||||
|
from llama_stack.providers.datatypes import Api
|
||||||
|
|
||||||
from .config import QdrantVectorIOConfig
|
from .config import QdrantVectorIOConfig
|
||||||
|
|
||||||
|
|
||||||
async def get_adapter_impl(config: QdrantVectorIOConfig, deps: dict[Api, ProviderSpec]):
|
async def get_provider_impl(config: QdrantVectorIOConfig, deps: dict[Api, Any]):
|
||||||
from llama_stack.providers.remote.vector_io.qdrant.qdrant import QdrantVectorIOAdapter
|
from llama_stack.providers.remote.vector_io.qdrant.qdrant import QdrantVectorIOAdapter
|
||||||
|
|
||||||
impl = QdrantVectorIOAdapter(config, deps[Api.inference])
|
assert isinstance(config, QdrantVectorIOConfig), f"Unexpected config type: {type(config)}"
|
||||||
|
files_api = deps.get(Api.files)
|
||||||
|
impl = QdrantVectorIOAdapter(config, deps[Api.inference], files_api)
|
||||||
await impl.initialize()
|
await impl.initialize()
|
||||||
return impl
|
return impl
|
||||||
|
|
|
@ -9,15 +9,23 @@ from typing import Any
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from llama_stack.providers.utils.kvstore.config import (
|
||||||
|
KVStoreConfig,
|
||||||
|
SqliteKVStoreConfig,
|
||||||
|
)
|
||||||
from llama_stack.schema_utils import json_schema_type
|
from llama_stack.schema_utils import json_schema_type
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class QdrantVectorIOConfig(BaseModel):
|
class QdrantVectorIOConfig(BaseModel):
|
||||||
path: str
|
path: str
|
||||||
|
kvstore: KVStoreConfig
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
|
def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"path": "${env.QDRANT_PATH:=~/.llama/" + __distro_dir__ + "}/" + "qdrant.db",
|
"path": "${env.QDRANT_PATH:=~/.llama/" + __distro_dir__ + "}/" + "qdrant.db",
|
||||||
|
"kvstore": SqliteKVStoreConfig.sample_run_config(
|
||||||
|
__distro_dir__=__distro_dir__, db_name="qdrant_registry.db"
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
|
@ -460,6 +460,7 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
|
||||||
module="llama_stack.providers.inline.vector_io.qdrant",
|
module="llama_stack.providers.inline.vector_io.qdrant",
|
||||||
config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig",
|
config_class="llama_stack.providers.inline.vector_io.qdrant.QdrantVectorIOConfig",
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
optional_api_dependencies=[Api.files],
|
||||||
description=r"""
|
description=r"""
|
||||||
[Qdrant](https://qdrant.tech/documentation/) is an inline and remote vector database provider for Llama Stack. It
|
[Qdrant](https://qdrant.tech/documentation/) is an inline and remote vector database provider for Llama Stack. It
|
||||||
allows you to store and query vectors directly in memory.
|
allows you to store and query vectors directly in memory.
|
||||||
|
@ -516,6 +517,7 @@ Please refer to the inline provider documentation.
|
||||||
""",
|
""",
|
||||||
),
|
),
|
||||||
api_dependencies=[Api.inference],
|
api_dependencies=[Api.inference],
|
||||||
|
optional_api_dependencies=[Api.files],
|
||||||
),
|
),
|
||||||
remote_provider_spec(
|
remote_provider_spec(
|
||||||
Api.vector_io,
|
Api.vector_io,
|
||||||
|
|
|
@ -12,6 +12,7 @@ from .config import QdrantVectorIOConfig
|
||||||
async def get_adapter_impl(config: QdrantVectorIOConfig, deps: dict[Api, ProviderSpec]):
|
async def get_adapter_impl(config: QdrantVectorIOConfig, deps: dict[Api, ProviderSpec]):
|
||||||
from .qdrant import QdrantVectorIOAdapter
|
from .qdrant import QdrantVectorIOAdapter
|
||||||
|
|
||||||
impl = QdrantVectorIOAdapter(config, deps[Api.inference])
|
files_api = deps.get(Api.files)
|
||||||
|
impl = QdrantVectorIOAdapter(config, deps[Api.inference], files_api)
|
||||||
await impl.initialize()
|
await impl.initialize()
|
||||||
return impl
|
return impl
|
||||||
|
|
|
@ -8,6 +8,10 @@ from typing import Any
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from llama_stack.providers.utils.kvstore.config import (
|
||||||
|
KVStoreConfig,
|
||||||
|
SqliteKVStoreConfig,
|
||||||
|
)
|
||||||
from llama_stack.schema_utils import json_schema_type
|
from llama_stack.schema_utils import json_schema_type
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,9 +27,14 @@ class QdrantVectorIOConfig(BaseModel):
|
||||||
prefix: str | None = None
|
prefix: str | None = None
|
||||||
timeout: int | None = None
|
timeout: int | None = None
|
||||||
host: str | None = None
|
host: str | None = None
|
||||||
|
kvstore: KVStoreConfig
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
|
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"api_key": "${env.QDRANT_API_KEY}",
|
"api_key": "${env.QDRANT_API_KEY:=}",
|
||||||
|
"kvstore": SqliteKVStoreConfig.sample_run_config(
|
||||||
|
__distro_dir__=__distro_dir__,
|
||||||
|
db_name="qdrant_registry.db",
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
import uuid
|
import uuid
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
@ -13,25 +14,20 @@ from qdrant_client import AsyncQdrantClient, models
|
||||||
from qdrant_client.models import PointStruct
|
from qdrant_client.models import PointStruct
|
||||||
|
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import InterleavedContent
|
from llama_stack.apis.inference import InterleavedContent
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
from llama_stack.apis.vector_io import (
|
from llama_stack.apis.vector_io import (
|
||||||
Chunk,
|
Chunk,
|
||||||
QueryChunksResponse,
|
QueryChunksResponse,
|
||||||
SearchRankingOptions,
|
|
||||||
VectorIO,
|
VectorIO,
|
||||||
VectorStoreChunkingStrategy,
|
VectorStoreChunkingStrategy,
|
||||||
VectorStoreDeleteResponse,
|
|
||||||
VectorStoreFileContentsResponse,
|
|
||||||
VectorStoreFileObject,
|
VectorStoreFileObject,
|
||||||
VectorStoreFileStatus,
|
|
||||||
VectorStoreListFilesResponse,
|
|
||||||
VectorStoreListResponse,
|
|
||||||
VectorStoreObject,
|
|
||||||
VectorStoreSearchResponsePage,
|
|
||||||
)
|
)
|
||||||
from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate
|
from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate
|
||||||
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
|
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
|
||||||
|
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
|
||||||
|
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
||||||
from llama_stack.providers.utils.memory.vector_store import (
|
from llama_stack.providers.utils.memory.vector_store import (
|
||||||
EmbeddingIndex,
|
EmbeddingIndex,
|
||||||
VectorDBWithIndex,
|
VectorDBWithIndex,
|
||||||
|
@ -42,6 +38,10 @@ from .config import QdrantVectorIOConfig as RemoteQdrantVectorIOConfig
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
CHUNK_ID_KEY = "_chunk_id"
|
CHUNK_ID_KEY = "_chunk_id"
|
||||||
|
|
||||||
|
# KV store prefixes for vector databases
|
||||||
|
VERSION = "v3"
|
||||||
|
VECTOR_DBS_PREFIX = f"vector_dbs:qdrant:{VERSION}::"
|
||||||
|
|
||||||
|
|
||||||
def convert_id(_id: str) -> str:
|
def convert_id(_id: str) -> str:
|
||||||
"""
|
"""
|
||||||
|
@ -59,6 +59,11 @@ class QdrantIndex(EmbeddingIndex):
|
||||||
self.client = client
|
self.client = client
|
||||||
self.collection_name = collection_name
|
self.collection_name = collection_name
|
||||||
|
|
||||||
|
async def initialize(self) -> None:
|
||||||
|
# Qdrant collections are created on-demand in add_chunks
|
||||||
|
# If the collection does not exist, it will be created in add_chunks.
|
||||||
|
pass
|
||||||
|
|
||||||
async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
|
async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
|
||||||
assert len(chunks) == len(embeddings), (
|
assert len(chunks) == len(embeddings), (
|
||||||
f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
|
f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
|
||||||
|
@ -84,7 +89,15 @@ class QdrantIndex(EmbeddingIndex):
|
||||||
await self.client.upsert(collection_name=self.collection_name, points=points)
|
await self.client.upsert(collection_name=self.collection_name, points=points)
|
||||||
|
|
||||||
async def delete_chunk(self, chunk_id: str) -> None:
|
async def delete_chunk(self, chunk_id: str) -> None:
|
||||||
raise NotImplementedError("delete_chunk is not supported in qdrant")
|
"""Remove a chunk from the Qdrant collection."""
|
||||||
|
try:
|
||||||
|
await self.client.delete(
|
||||||
|
collection_name=self.collection_name,
|
||||||
|
points_selector=models.PointIdsList(points=[convert_id(chunk_id)]),
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
log.error(f"Error deleting chunk {chunk_id} from Qdrant collection {self.collection_name}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
|
async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
|
||||||
results = (
|
results = (
|
||||||
|
@ -136,17 +149,41 @@ class QdrantIndex(EmbeddingIndex):
|
||||||
await self.client.delete_collection(collection_name=self.collection_name)
|
await self.client.delete_collection(collection_name=self.collection_name)
|
||||||
|
|
||||||
|
|
||||||
class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
class QdrantVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPrivate):
|
||||||
def __init__(
|
def __init__(
|
||||||
self, config: RemoteQdrantVectorIOConfig | InlineQdrantVectorIOConfig, inference_api: Api.inference
|
self,
|
||||||
|
config: RemoteQdrantVectorIOConfig | InlineQdrantVectorIOConfig,
|
||||||
|
inference_api: Api.inference,
|
||||||
|
files_api: Files | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
self.client: AsyncQdrantClient = None
|
self.client: AsyncQdrantClient = None
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
|
self.files_api = files_api
|
||||||
|
self.vector_db_store = None
|
||||||
|
self.kvstore: KVStore | None = None
|
||||||
|
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
||||||
|
self._qdrant_lock = asyncio.Lock()
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
self.client = AsyncQdrantClient(**self.config.model_dump(exclude_none=True))
|
client_config = self.config.model_dump(exclude_none=True, exclude={"kvstore"})
|
||||||
|
self.client = AsyncQdrantClient(**client_config)
|
||||||
|
self.kvstore = await kvstore_impl(self.config.kvstore)
|
||||||
|
|
||||||
|
start_key = VECTOR_DBS_PREFIX
|
||||||
|
end_key = f"{VECTOR_DBS_PREFIX}\xff"
|
||||||
|
stored_vector_dbs = await self.kvstore.values_in_range(start_key, end_key)
|
||||||
|
|
||||||
|
for vector_db_data in stored_vector_dbs:
|
||||||
|
vector_db = VectorDB.model_validate_json(vector_db_data)
|
||||||
|
index = VectorDBWithIndex(
|
||||||
|
vector_db,
|
||||||
|
QdrantIndex(self.client, vector_db.identifier),
|
||||||
|
self.inference_api,
|
||||||
|
)
|
||||||
|
self.cache[vector_db.identifier] = index
|
||||||
|
self.openai_vector_stores = await self._load_openai_vector_stores()
|
||||||
|
|
||||||
async def shutdown(self) -> None:
|
async def shutdown(self) -> None:
|
||||||
await self.client.close()
|
await self.client.close()
|
||||||
|
@ -155,6 +192,10 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
self,
|
self,
|
||||||
vector_db: VectorDB,
|
vector_db: VectorDB,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
assert self.kvstore is not None
|
||||||
|
key = f"{VECTOR_DBS_PREFIX}{vector_db.identifier}"
|
||||||
|
await self.kvstore.set(key=key, value=vector_db.model_dump_json())
|
||||||
|
|
||||||
index = VectorDBWithIndex(
|
index = VectorDBWithIndex(
|
||||||
vector_db=vector_db,
|
vector_db=vector_db,
|
||||||
index=QdrantIndex(self.client, vector_db.identifier),
|
index=QdrantIndex(self.client, vector_db.identifier),
|
||||||
|
@ -168,10 +209,16 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
await self.cache[vector_db_id].index.delete()
|
await self.cache[vector_db_id].index.delete()
|
||||||
del self.cache[vector_db_id]
|
del self.cache[vector_db_id]
|
||||||
|
|
||||||
|
assert self.kvstore is not None
|
||||||
|
await self.kvstore.delete(f"{VECTOR_DBS_PREFIX}{vector_db_id}")
|
||||||
|
|
||||||
async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None:
|
async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None:
|
||||||
if vector_db_id in self.cache:
|
if vector_db_id in self.cache:
|
||||||
return self.cache[vector_db_id]
|
return self.cache[vector_db_id]
|
||||||
|
|
||||||
|
if self.vector_db_store is None:
|
||||||
|
raise ValueError(f"Vector DB not found {vector_db_id}")
|
||||||
|
|
||||||
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
||||||
if not vector_db:
|
if not vector_db:
|
||||||
raise VectorStoreNotFoundError(vector_db_id)
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
@ -208,61 +255,6 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
|
|
||||||
return await index.query_chunks(query, params)
|
return await index.query_chunks(query, params)
|
||||||
|
|
||||||
async def openai_create_vector_store(
|
|
||||||
self,
|
|
||||||
name: str,
|
|
||||||
file_ids: list[str] | None = None,
|
|
||||||
expires_after: dict[str, Any] | None = None,
|
|
||||||
chunking_strategy: dict[str, Any] | None = None,
|
|
||||||
metadata: dict[str, Any] | None = None,
|
|
||||||
embedding_model: str | None = None,
|
|
||||||
embedding_dimension: int | None = 384,
|
|
||||||
provider_id: str | None = None,
|
|
||||||
) -> VectorStoreObject:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_list_vector_stores(
|
|
||||||
self,
|
|
||||||
limit: int | None = 20,
|
|
||||||
order: str | None = "desc",
|
|
||||||
after: str | None = None,
|
|
||||||
before: str | None = None,
|
|
||||||
) -> VectorStoreListResponse:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_retrieve_vector_store(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
) -> VectorStoreObject:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_update_vector_store(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
name: str | None = None,
|
|
||||||
expires_after: dict[str, Any] | None = None,
|
|
||||||
metadata: dict[str, Any] | None = None,
|
|
||||||
) -> VectorStoreObject:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_delete_vector_store(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
) -> VectorStoreDeleteResponse:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_search_vector_store(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
query: str | list[str],
|
|
||||||
filters: dict[str, Any] | None = None,
|
|
||||||
max_num_results: int | None = 10,
|
|
||||||
ranking_options: SearchRankingOptions | None = None,
|
|
||||||
rewrite_query: bool | None = False,
|
|
||||||
search_mode: str | None = "vector",
|
|
||||||
) -> VectorStoreSearchResponsePage:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_attach_file_to_vector_store(
|
async def openai_attach_file_to_vector_store(
|
||||||
self,
|
self,
|
||||||
vector_store_id: str,
|
vector_store_id: str,
|
||||||
|
@ -270,47 +262,14 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
attributes: dict[str, Any] | None = None,
|
attributes: dict[str, Any] | None = None,
|
||||||
chunking_strategy: VectorStoreChunkingStrategy | None = None,
|
chunking_strategy: VectorStoreChunkingStrategy | None = None,
|
||||||
) -> VectorStoreFileObject:
|
) -> VectorStoreFileObject:
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
# Qdrant doesn't allow multiple clients to access the same storage path simultaneously.
|
||||||
|
async with self._qdrant_lock:
|
||||||
async def openai_list_files_in_vector_store(
|
await super().openai_attach_file_to_vector_store(vector_store_id, file_id, attributes, chunking_strategy)
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
limit: int | None = 20,
|
|
||||||
order: str | None = "desc",
|
|
||||||
after: str | None = None,
|
|
||||||
before: str | None = None,
|
|
||||||
filter: VectorStoreFileStatus | None = None,
|
|
||||||
) -> VectorStoreListFilesResponse:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_retrieve_vector_store_file(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
file_id: str,
|
|
||||||
) -> VectorStoreFileObject:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_retrieve_vector_store_file_contents(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
file_id: str,
|
|
||||||
) -> VectorStoreFileContentsResponse:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_update_vector_store_file(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
file_id: str,
|
|
||||||
attributes: dict[str, Any] | None = None,
|
|
||||||
) -> VectorStoreFileObject:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def openai_delete_vector_store_file(
|
|
||||||
self,
|
|
||||||
vector_store_id: str,
|
|
||||||
file_id: str,
|
|
||||||
) -> VectorStoreFileObject:
|
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
|
||||||
|
|
||||||
async def delete_chunks(self, store_id: str, chunk_ids: list[str]) -> None:
|
async def delete_chunks(self, store_id: str, chunk_ids: list[str]) -> None:
|
||||||
raise NotImplementedError("OpenAI Vector Stores API is not supported in Qdrant")
|
"""Delete chunks from a Qdrant vector store."""
|
||||||
|
index = await self._get_and_cache_vector_db_index(store_id)
|
||||||
|
if not index:
|
||||||
|
raise ValueError(f"Vector DB {store_id} not found")
|
||||||
|
for chunk_id in chunk_ids:
|
||||||
|
await index.index.delete_chunk(chunk_id)
|
||||||
|
|
|
@ -29,6 +29,8 @@ def skip_if_provider_doesnt_support_openai_vector_stores(client_with_models):
|
||||||
"inline::chromadb",
|
"inline::chromadb",
|
||||||
"remote::pgvector",
|
"remote::pgvector",
|
||||||
"remote::chromadb",
|
"remote::chromadb",
|
||||||
|
"remote::qdrant",
|
||||||
|
"inline::qdrant",
|
||||||
"remote::weaviate",
|
"remote::weaviate",
|
||||||
]:
|
]:
|
||||||
return
|
return
|
||||||
|
|
|
@ -125,6 +125,8 @@ def test_insert_chunks(client_with_empty_registry, embedding_model_id, embedding
|
||||||
def test_insert_chunks_with_precomputed_embeddings(client_with_empty_registry, embedding_model_id, embedding_dimension):
|
def test_insert_chunks_with_precomputed_embeddings(client_with_empty_registry, embedding_model_id, embedding_dimension):
|
||||||
vector_io_provider_params_dict = {
|
vector_io_provider_params_dict = {
|
||||||
"inline::milvus": {"score_threshold": -1.0},
|
"inline::milvus": {"score_threshold": -1.0},
|
||||||
|
"remote::qdrant": {"score_threshold": -1.0},
|
||||||
|
"inline::qdrant": {"score_threshold": -1.0},
|
||||||
}
|
}
|
||||||
vector_db_id = "test_precomputed_embeddings_db"
|
vector_db_id = "test_precomputed_embeddings_db"
|
||||||
client_with_empty_registry.vector_dbs.register(
|
client_with_empty_registry.vector_dbs.register(
|
||||||
|
@ -168,6 +170,8 @@ def test_query_returns_valid_object_when_identical_to_embedding_in_vdb(
|
||||||
):
|
):
|
||||||
vector_io_provider_params_dict = {
|
vector_io_provider_params_dict = {
|
||||||
"inline::milvus": {"score_threshold": 0.0},
|
"inline::milvus": {"score_threshold": 0.0},
|
||||||
|
"remote::qdrant": {"score_threshold": 0.0},
|
||||||
|
"inline::qdrant": {"score_threshold": 0.0},
|
||||||
}
|
}
|
||||||
vector_db_id = "test_precomputed_embeddings_db"
|
vector_db_id = "test_precomputed_embeddings_db"
|
||||||
client_with_empty_registry.vector_dbs.register(
|
client_with_empty_registry.vector_dbs.register(
|
||||||
|
|
|
@ -17,10 +17,12 @@ from llama_stack.providers.inline.vector_io.chroma.config import ChromaVectorIOC
|
||||||
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
||||||
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
|
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
|
||||||
from llama_stack.providers.inline.vector_io.milvus.config import MilvusVectorIOConfig, SqliteKVStoreConfig
|
from llama_stack.providers.inline.vector_io.milvus.config import MilvusVectorIOConfig, SqliteKVStoreConfig
|
||||||
|
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig
|
||||||
from llama_stack.providers.inline.vector_io.sqlite_vec import SQLiteVectorIOConfig
|
from llama_stack.providers.inline.vector_io.sqlite_vec import SQLiteVectorIOConfig
|
||||||
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteVecIndex, SQLiteVecVectorIOAdapter
|
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteVecIndex, SQLiteVecVectorIOAdapter
|
||||||
from llama_stack.providers.remote.vector_io.chroma.chroma import ChromaIndex, ChromaVectorIOAdapter, maybe_await
|
from llama_stack.providers.remote.vector_io.chroma.chroma import ChromaIndex, ChromaVectorIOAdapter, maybe_await
|
||||||
from llama_stack.providers.remote.vector_io.milvus.milvus import MilvusIndex, MilvusVectorIOAdapter
|
from llama_stack.providers.remote.vector_io.milvus.milvus import MilvusIndex, MilvusVectorIOAdapter
|
||||||
|
from llama_stack.providers.remote.vector_io.qdrant.qdrant import QdrantVectorIOAdapter
|
||||||
|
|
||||||
EMBEDDING_DIMENSION = 384
|
EMBEDDING_DIMENSION = 384
|
||||||
COLLECTION_PREFIX = "test_collection"
|
COLLECTION_PREFIX = "test_collection"
|
||||||
|
@ -280,6 +282,57 @@ async def chroma_vec_adapter(chroma_vec_db_path, mock_inference_api, embedding_d
|
||||||
await adapter.shutdown()
|
await adapter.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def qdrant_vec_db_path(tmp_path_factory):
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
db_path = str(tmp_path_factory.getbasetemp() / f"test_qdrant_{uuid.uuid4()}.db")
|
||||||
|
return db_path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
async def qdrant_vec_adapter(qdrant_vec_db_path, mock_inference_api, embedding_dimension):
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
config = QdrantVectorIOConfig(
|
||||||
|
db_path=qdrant_vec_db_path,
|
||||||
|
kvstore=SqliteKVStoreConfig(),
|
||||||
|
)
|
||||||
|
adapter = QdrantVectorIOAdapter(
|
||||||
|
config=config,
|
||||||
|
inference_api=mock_inference_api,
|
||||||
|
files_api=None,
|
||||||
|
)
|
||||||
|
collection_id = f"qdrant_test_collection_{uuid.uuid4()}"
|
||||||
|
await adapter.initialize()
|
||||||
|
await adapter.register_vector_db(
|
||||||
|
VectorDB(
|
||||||
|
identifier=collection_id,
|
||||||
|
provider_id="test_provider",
|
||||||
|
embedding_model="test_model",
|
||||||
|
embedding_dimension=embedding_dimension,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
adapter.test_collection_id = collection_id
|
||||||
|
yield adapter
|
||||||
|
await adapter.shutdown()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
async def qdrant_vec_index(qdrant_vec_db_path, embedding_dimension):
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from qdrant_client import AsyncQdrantClient
|
||||||
|
|
||||||
|
from llama_stack.providers.remote.vector_io.qdrant.qdrant import QdrantIndex
|
||||||
|
|
||||||
|
client = AsyncQdrantClient(path=qdrant_vec_db_path)
|
||||||
|
collection_name = f"qdrant_test_collection_{uuid.uuid4()}"
|
||||||
|
index = QdrantIndex(client, collection_name)
|
||||||
|
yield index
|
||||||
|
await index.delete()
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def vector_io_adapter(vector_provider, request):
|
def vector_io_adapter(vector_provider, request):
|
||||||
"""Returns the appropriate vector IO adapter based on the provider parameter."""
|
"""Returns the appropriate vector IO adapter based on the provider parameter."""
|
||||||
|
@ -288,6 +341,7 @@ def vector_io_adapter(vector_provider, request):
|
||||||
"faiss": "faiss_vec_adapter",
|
"faiss": "faiss_vec_adapter",
|
||||||
"sqlite_vec": "sqlite_vec_adapter",
|
"sqlite_vec": "sqlite_vec_adapter",
|
||||||
"chroma": "chroma_vec_adapter",
|
"chroma": "chroma_vec_adapter",
|
||||||
|
"qdrant": "qdrant_vec_adapter",
|
||||||
}
|
}
|
||||||
return request.getfixturevalue(vector_provider_dict[vector_provider])
|
return request.getfixturevalue(vector_provider_dict[vector_provider])
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ from llama_stack.providers.inline.vector_io.qdrant.config import (
|
||||||
from llama_stack.providers.remote.vector_io.qdrant.qdrant import (
|
from llama_stack.providers.remote.vector_io.qdrant.qdrant import (
|
||||||
QdrantVectorIOAdapter,
|
QdrantVectorIOAdapter,
|
||||||
)
|
)
|
||||||
|
from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
|
||||||
|
|
||||||
# This test is a unit test for the QdrantVectorIOAdapter class. This should only contain
|
# This test is a unit test for the QdrantVectorIOAdapter class. This should only contain
|
||||||
# tests which are specific to this class. More general (API-level) tests should be placed in
|
# tests which are specific to this class. More general (API-level) tests should be placed in
|
||||||
|
@ -36,7 +37,8 @@ from llama_stack.providers.remote.vector_io.qdrant.qdrant import (
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def qdrant_config(tmp_path) -> InlineQdrantVectorIOConfig:
|
def qdrant_config(tmp_path) -> InlineQdrantVectorIOConfig:
|
||||||
return InlineQdrantVectorIOConfig(path=os.path.join(tmp_path, "qdrant.db"))
|
kvstore_config = SqliteKVStoreConfig(db_name=os.path.join(tmp_path, "test_kvstore.db"))
|
||||||
|
return InlineQdrantVectorIOConfig(path=os.path.join(tmp_path, "qdrant.db"), kvstore=kvstore_config)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -50,6 +52,9 @@ def mock_vector_db(vector_db_id) -> MagicMock:
|
||||||
mock_vector_db.embedding_model = "embedding_model"
|
mock_vector_db.embedding_model = "embedding_model"
|
||||||
mock_vector_db.identifier = vector_db_id
|
mock_vector_db.identifier = vector_db_id
|
||||||
mock_vector_db.embedding_dimension = 384
|
mock_vector_db.embedding_dimension = 384
|
||||||
|
mock_vector_db.model_dump_json.return_value = (
|
||||||
|
'{"identifier": "' + vector_db_id + '", "embedding_model": "embedding_model", "embedding_dimension": 384}'
|
||||||
|
)
|
||||||
return mock_vector_db
|
return mock_vector_db
|
||||||
|
|
||||||
|
|
||||||
|
@ -69,7 +74,7 @@ def mock_api_service(sample_embeddings):
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
async def qdrant_adapter(qdrant_config, mock_vector_db_store, mock_api_service, loop) -> QdrantVectorIOAdapter:
|
async def qdrant_adapter(qdrant_config, mock_vector_db_store, mock_api_service, loop) -> QdrantVectorIOAdapter:
|
||||||
adapter = QdrantVectorIOAdapter(config=qdrant_config, inference_api=mock_api_service)
|
adapter = QdrantVectorIOAdapter(config=qdrant_config, inference_api=mock_api_service, files_api=None)
|
||||||
adapter.vector_db_store = mock_vector_db_store
|
adapter.vector_db_store = mock_vector_db_store
|
||||||
await adapter.initialize()
|
await adapter.initialize()
|
||||||
yield adapter
|
yield adapter
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue