mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
add __init__ to the mixin
This commit is contained in:
parent
e58bf82581
commit
c9be8c15c2
8 changed files with 17 additions and 54 deletions
|
@ -200,15 +200,10 @@ class FaissIndex(EmbeddingIndex):
|
||||||
|
|
||||||
class FaissVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPrivate):
|
class FaissVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPrivate):
|
||||||
def __init__(self, config: FaissVectorIOConfig, inference_api: Inference, files_api: Files | None) -> None:
|
def __init__(self, config: FaissVectorIOConfig, inference_api: Inference, files_api: Files | None) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.files_api = files_api
|
|
||||||
self.cache: dict[str, VectorDBWithIndex] = {}
|
self.cache: dict[str, VectorDBWithIndex] = {}
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
self.kvstore = await kvstore_impl(self.config.kvstore)
|
self.kvstore = await kvstore_impl(self.config.kvstore)
|
||||||
|
|
|
@ -410,15 +410,10 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoc
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, config, inference_api: Inference, files_api: Files | None) -> None:
|
def __init__(self, config, inference_api: Inference, files_api: Files | None) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.files_api = files_api
|
|
||||||
self.cache: dict[str, VectorDBWithIndex] = {}
|
self.cache: dict[str, VectorDBWithIndex] = {}
|
||||||
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
self.kvstore = await kvstore_impl(self.config.kvstore)
|
self.kvstore = await kvstore_impl(self.config.kvstore)
|
||||||
|
|
|
@ -140,14 +140,13 @@ class ChromaVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
inference_api: Api.inference,
|
inference_api: Api.inference,
|
||||||
files_api: Files | None,
|
files_api: Files | None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
log.info(f"Initializing ChromaVectorIOAdapter with url: {config}")
|
log.info(f"Initializing ChromaVectorIOAdapter with url: {config}")
|
||||||
self.config = config
|
self.config = config
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.client = None
|
self.client = None
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
self.vector_db_store = None
|
self.vector_db_store = None
|
||||||
self.files_api = files_api
|
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
self.kvstore = await kvstore_impl(self.config.kvstore)
|
self.kvstore = await kvstore_impl(self.config.kvstore)
|
||||||
|
@ -166,9 +165,6 @@ class ChromaVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
log.info(f"Connecting to Chroma local db at: {self.config.db_path}")
|
log.info(f"Connecting to Chroma local db at: {self.config.db_path}")
|
||||||
self.client = chromadb.PersistentClient(path=self.config.db_path)
|
self.client = chromadb.PersistentClient(path=self.config.db_path)
|
||||||
self.openai_vector_stores = await self._load_openai_vector_stores()
|
self.openai_vector_stores = await self._load_openai_vector_stores()
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
|
|
||||||
async def shutdown(self) -> None:
|
async def shutdown(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -309,17 +309,12 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
inference_api: Inference,
|
inference_api: Inference,
|
||||||
files_api: Files | None,
|
files_api: Files | None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
self.client = None
|
self.client = None
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.files_api = files_api
|
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
self.vector_db_store = None
|
self.vector_db_store = None
|
||||||
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
self.metadata_collection_name = "openai_vector_stores_metadata"
|
self.metadata_collection_name = "openai_vector_stores_metadata"
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import heapq
|
import heapq
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
@ -346,17 +345,12 @@ class PGVectorVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoco
|
||||||
inference_api: Api.inference,
|
inference_api: Api.inference,
|
||||||
files_api: Files | None = None,
|
files_api: Files | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.conn = None
|
self.conn = None
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
self.files_api = files_api
|
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
self.vector_db_store = None
|
self.vector_db_store = None
|
||||||
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
self.metadata_collection_name = "openai_vector_stores_metadata"
|
self.metadata_collection_name = "openai_vector_stores_metadata"
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
|
|
|
@ -27,7 +27,7 @@ from llama_stack.apis.vector_io import (
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate
|
from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate
|
||||||
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
|
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
|
||||||
from llama_stack.providers.utils.kvstore import KVStore, kvstore_impl
|
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||||
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
|
||||||
from llama_stack.providers.utils.memory.vector_store import (
|
from llama_stack.providers.utils.memory.vector_store import (
|
||||||
ChunkForDeletion,
|
ChunkForDeletion,
|
||||||
|
@ -162,17 +162,12 @@ class QdrantVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
inference_api: Api.inference,
|
inference_api: Api.inference,
|
||||||
files_api: Files | None = None,
|
files_api: Files | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.client: AsyncQdrantClient = None
|
self.client: AsyncQdrantClient = None
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.files_api = files_api
|
|
||||||
self.vector_db_store = None
|
self.vector_db_store = None
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
self._qdrant_lock = asyncio.Lock()
|
self._qdrant_lock = asyncio.Lock()
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
import asyncio
|
|
||||||
import json
|
import json
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
@ -285,17 +284,12 @@ class WeaviateVectorIOAdapter(
|
||||||
inference_api: Api.inference,
|
inference_api: Api.inference,
|
||||||
files_api: Files | None,
|
files_api: Files | None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
super().__init__(files_api=files_api, kvstore=None)
|
||||||
self.config = config
|
self.config = config
|
||||||
self.inference_api = inference_api
|
self.inference_api = inference_api
|
||||||
self.client_cache = {}
|
self.client_cache = {}
|
||||||
self.cache = {}
|
self.cache = {}
|
||||||
self.files_api = files_api
|
|
||||||
self.kvstore: KVStore | None = None
|
|
||||||
self.vector_db_store = None
|
self.vector_db_store = None
|
||||||
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
|
||||||
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
|
||||||
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
|
||||||
self._last_file_batch_cleanup_time = 0
|
|
||||||
self.metadata_collection_name = "openai_vector_stores_metadata"
|
self.metadata_collection_name = "openai_vector_stores_metadata"
|
||||||
|
|
||||||
def _get_client(self) -> weaviate.WeaviateClient:
|
def _get_client(self) -> weaviate.WeaviateClient:
|
||||||
|
|
|
@ -71,16 +71,15 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
an openai_vector_stores in-memory cache.
|
an openai_vector_stores in-memory cache.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# These should be provided by the implementing class
|
# Implementing classes should call super().__init__() in their __init__ method
|
||||||
openai_vector_stores: dict[str, dict[str, Any]]
|
# to properly initialize the mixin attributes.
|
||||||
openai_file_batches: dict[str, dict[str, Any]]
|
def __init__(self, files_api: Files | None = None, kvstore: KVStore | None = None):
|
||||||
files_api: Files | None
|
self.openai_vector_stores: dict[str, dict[str, Any]] = {}
|
||||||
# KV store for persisting OpenAI vector store metadata
|
self.openai_file_batches: dict[str, dict[str, Any]] = {}
|
||||||
kvstore: KVStore | None
|
self.files_api = files_api
|
||||||
# Track last cleanup time to throttle cleanup operations
|
self.kvstore = kvstore
|
||||||
_last_file_batch_cleanup_time: int
|
self._last_file_batch_cleanup_time = 0
|
||||||
# Track running file batch processing tasks
|
self._file_batch_tasks: dict[str, asyncio.Task[None]] = {}
|
||||||
_file_batch_tasks: dict[str, asyncio.Task[None]]
|
|
||||||
|
|
||||||
async def _save_openai_vector_store(self, store_id: str, store_info: dict[str, Any]) -> None:
|
async def _save_openai_vector_store(self, store_id: str, store_info: dict[str, Any]) -> None:
|
||||||
"""Save vector store metadata to persistent storage."""
|
"""Save vector store metadata to persistent storage."""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue