diff --git a/llama_stack/providers/registry/vector_io.py b/llama_stack/providers/registry/vector_io.py index 4422baba5..88a65397a 100644 --- a/llama_stack/providers/registry/vector_io.py +++ b/llama_stack/providers/registry/vector_io.py @@ -42,7 +42,7 @@ def available_providers() -> List[ProviderSpec]: provider_type="inline::meta-reference", pip_packages=EMBEDDING_DEPS + ["faiss-cpu"], module="llama_stack.providers.inline.vector_io.faiss", - config_class="llama_stack.providers.inline.vector_io.faiss.FaissImplConfig", + config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", deprecation_warning="Please use the `inline::faiss` provider instead.", api_dependencies=[Api.inference], ), @@ -51,7 +51,7 @@ def available_providers() -> List[ProviderSpec]: provider_type="inline::faiss", pip_packages=EMBEDDING_DEPS + ["faiss-cpu"], module="llama_stack.providers.inline.vector_io.faiss", - config_class="llama_stack.providers.inline.vector_io.faiss.FaissImplConfig", + config_class="llama_stack.providers.inline.vector_io.faiss.FaissVectorIOConfig", api_dependencies=[Api.inference], ), InlineProviderSpec( @@ -68,7 +68,7 @@ def available_providers() -> List[ProviderSpec]: adapter_type="chromadb", pip_packages=EMBEDDING_DEPS + ["chromadb-client"], module="llama_stack.providers.remote.vector_io.chroma", - config_class="llama_stack.providers.remote.vector_io.chroma.ChromaRemoteImplConfig", + config_class="llama_stack.providers.remote.vector_io.chroma.ChromaVectorIOConfig", ), api_dependencies=[Api.inference], ), @@ -77,7 +77,7 @@ def available_providers() -> List[ProviderSpec]: provider_type="inline::chromadb", pip_packages=EMBEDDING_DEPS + ["chromadb"], module="llama_stack.providers.inline.vector_io.chroma", - config_class="llama_stack.providers.inline.vector_io.chroma.ChromaInlineImplConfig", + config_class="llama_stack.providers.inline.vector_io.chroma.ChromaVectorIOConfig", api_dependencies=[Api.inference], ), remote_provider_spec( @@ -86,7 +86,7 @@ def available_providers() -> List[ProviderSpec]: adapter_type="pgvector", pip_packages=EMBEDDING_DEPS + ["psycopg2-binary"], module="llama_stack.providers.remote.vector_io.pgvector", - config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorConfig", + config_class="llama_stack.providers.remote.vector_io.pgvector.PGVectorVectorIOConfig", ), api_dependencies=[Api.inference], ), @@ -96,7 +96,7 @@ def available_providers() -> List[ProviderSpec]: adapter_type="weaviate", pip_packages=EMBEDDING_DEPS + ["weaviate-client"], module="llama_stack.providers.remote.vector_io.weaviate", - config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateConfig", + config_class="llama_stack.providers.remote.vector_io.weaviate.WeaviateVectorIOConfig", provider_data_validator="llama_stack.providers.remote.vector_io.weaviate.WeaviateRequestProviderData", ), api_dependencies=[Api.inference], @@ -107,7 +107,7 @@ def available_providers() -> List[ProviderSpec]: adapter_type="sample", pip_packages=[], module="llama_stack.providers.remote.vector_io.sample", - config_class="llama_stack.providers.remote.vector_io.sample.SampleConfig", + config_class="llama_stack.providers.remote.vector_io.sample.SampleVectorIOConfig", ), api_dependencies=[], ), @@ -117,7 +117,7 @@ def available_providers() -> List[ProviderSpec]: adapter_type="qdrant", pip_packages=EMBEDDING_DEPS + ["qdrant-client"], module="llama_stack.providers.remote.vector_io.qdrant", - config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantConfig", + config_class="llama_stack.providers.remote.vector_io.qdrant.QdrantVectorIOConfig", ), api_dependencies=[Api.inference], ), diff --git a/llama_stack/providers/remote/vector_io/chroma/chroma.py b/llama_stack/providers/remote/vector_io/chroma/chroma.py index f894a8e65..bd684160a 100644 --- a/llama_stack/providers/remote/vector_io/chroma/chroma.py +++ b/llama_stack/providers/remote/vector_io/chroma/chroma.py @@ -16,12 +16,13 @@ from llama_stack.apis.inference import InterleavedContent from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO from llama_stack.providers.datatypes import Api, VectorDBsProtocolPrivate +from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig from llama_stack.providers.utils.memory.vector_store import ( EmbeddingIndex, VectorDBWithIndex, ) -from .config import ChromaVectorIOConfig +from .config import ChromaVectorIOConfig as RemoteChromaVectorIOConfig log = logging.getLogger(__name__) @@ -88,7 +89,7 @@ class ChromaIndex(EmbeddingIndex): class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): def __init__( self, - config: Union[ChromaVectorIOConfig, ChromaVectorIOConfig], + config: Union[RemoteChromaVectorIOConfig, InlineChromaVectorIOConfig], inference_api: Api.inference, ) -> None: log.info(f"Initializing ChromaVectorIOAdapter with url: {config}") @@ -99,7 +100,7 @@ class ChromaVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate): self.cache = {} async def initialize(self) -> None: - if isinstance(self.config, ChromaVectorIOConfig): + if isinstance(self.config, RemoteChromaVectorIOConfig): log.info(f"Connecting to Chroma server at: {self.config.url}") url = self.config.url.rstrip("/") parsed = urlparse(url)