feat: configure vector-io provider with an embedding model

Signed-off-by: Mustafa Elbehery <melbeher@redhat.com>
This commit is contained in:
Mustafa Elbehery 2025-07-31 13:07:03 +02:00
parent 1f0766308d
commit d8f013b35a
29 changed files with 228 additions and 24 deletions

View file

@ -9,6 +9,7 @@ from typing import Any
from pydantic import BaseModel, Field
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
from llama_stack.providers.utils.vector_io.embedding_config import EmbeddingConfig
from llama_stack.schema_utils import json_schema_type
@ -16,6 +17,10 @@ from llama_stack.schema_utils import json_schema_type
class ChromaVectorIOConfig(BaseModel):
url: str | None
kvstore: KVStoreConfig = Field(description="Config for KV store backend")
embedding: EmbeddingConfig | None = Field(
default=None,
description="Default embedding configuration for this provider. When specified, vector databases created with this provider will use these embedding settings as defaults.",
)
@classmethod
def sample_run_config(cls, __distro_dir__: str, url: str = "${env.CHROMADB_URL}", **kwargs: Any) -> dict[str, Any]:
@ -25,4 +30,9 @@ class ChromaVectorIOConfig(BaseModel):
__distro_dir__=__distro_dir__,
db_name="chroma_remote_registry.db",
),
# Optional: Configure default embedding model for this provider
# "embedding": {
# "model": "${env.CHROMA_EMBEDDING_MODEL:=all-MiniLM-L6-v2}",
# "dimensions": 384
# },
}

View file

@ -9,6 +9,7 @@ from typing import Any
from pydantic import BaseModel, ConfigDict, Field
from llama_stack.providers.utils.kvstore.config import KVStoreConfig, SqliteKVStoreConfig
from llama_stack.providers.utils.vector_io.embedding_config import EmbeddingConfig
from llama_stack.schema_utils import json_schema_type
@ -18,6 +19,10 @@ class MilvusVectorIOConfig(BaseModel):
token: str | None = Field(description="The token of the Milvus server")
consistency_level: str = Field(description="The consistency level of the Milvus server", default="Strong")
kvstore: KVStoreConfig = Field(description="Config for KV store backend")
embedding: EmbeddingConfig | None = Field(
default=None,
description="Default embedding configuration for this provider. When specified, vector databases created with this provider will use these embedding settings as defaults.",
)
# This configuration allows additional fields to be passed through to the underlying Milvus client.
# See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general.
@ -32,4 +37,9 @@ class MilvusVectorIOConfig(BaseModel):
__distro_dir__=__distro_dir__,
db_name="milvus_remote_registry.db",
),
# Optional: Configure default embedding model for this provider
# "embedding": {
# "model": "${env.MILVUS_EMBEDDING_MODEL:=all-MiniLM-L6-v2}",
# "dimensions": 384
# },
}

View file

@ -29,6 +29,7 @@ from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIV
from llama_stack.providers.utils.memory.vector_store import (
EmbeddingIndex,
VectorDBWithIndex,
apply_provider_embedding_defaults,
)
from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name
@ -305,6 +306,9 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
self,
vector_db: VectorDB,
) -> None:
# Apply provider-level embedding defaults if configured
vector_db = apply_provider_embedding_defaults(vector_db, self.config.embedding)
if isinstance(self.config, RemoteMilvusVectorIOConfig):
consistency_level = self.config.consistency_level
else:

View file

@ -12,6 +12,7 @@ from llama_stack.providers.utils.kvstore.config import (
KVStoreConfig,
SqliteKVStoreConfig,
)
from llama_stack.providers.utils.vector_io.embedding_config import EmbeddingConfig
from llama_stack.schema_utils import json_schema_type
@ -23,6 +24,10 @@ class PGVectorVectorIOConfig(BaseModel):
user: str | None = Field(default="postgres")
password: str | None = Field(default="mysecretpassword")
kvstore: KVStoreConfig | None = Field(description="Config for KV store backend (SQLite only for now)", default=None)
embedding: EmbeddingConfig | None = Field(
default=None,
description="Default embedding configuration for this provider. When specified, vector databases created with this provider will use these embedding settings as defaults.",
)
@classmethod
def sample_run_config(
@ -45,4 +50,9 @@ class PGVectorVectorIOConfig(BaseModel):
__distro_dir__=__distro_dir__,
db_name="pgvector_registry.db",
),
# Optional: Configure default embedding model for this provider
# "embedding": {
# "model": "${env.PGVECTOR_EMBEDDING_MODEL:=all-MiniLM-L6-v2}",
# "dimensions": 384
# },
}

View file

@ -29,6 +29,7 @@ from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIV
from llama_stack.providers.utils.memory.vector_store import (
EmbeddingIndex,
VectorDBWithIndex,
apply_provider_embedding_defaults,
)
from .config import PGVectorVectorIOConfig
@ -222,6 +223,9 @@ class PGVectorVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoco
log.info("Connection to PGVector database server closed")
async def register_vector_db(self, vector_db: VectorDB) -> None:
# Apply provider-level embedding defaults if configured
vector_db = apply_provider_embedding_defaults(vector_db, self.config.embedding)
# Persist vector DB metadata in the KV store
assert self.kvstore is not None
# Upsert model metadata in Postgres

View file

@ -6,12 +6,13 @@
from typing import Any
from pydantic import BaseModel
from pydantic import BaseModel, Field
from llama_stack.providers.utils.kvstore.config import (
KVStoreConfig,
SqliteKVStoreConfig,
)
from llama_stack.providers.utils.vector_io.embedding_config import EmbeddingConfig
from llama_stack.schema_utils import json_schema_type
@ -28,6 +29,10 @@ class QdrantVectorIOConfig(BaseModel):
timeout: int | None = None
host: str | None = None
kvstore: KVStoreConfig
embedding: EmbeddingConfig | None = Field(
default=None,
description="Default embedding configuration for this provider. When specified, vector databases created with this provider will use these embedding settings as defaults.",
)
@classmethod
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
@ -37,4 +42,10 @@ class QdrantVectorIOConfig(BaseModel):
__distro_dir__=__distro_dir__,
db_name="qdrant_registry.db",
),
"api_key": "${env.QDRANT_API_KEY}",
# Optional: Configure default embedding model for this provider
# "embedding": {
# "model": "${env.QDRANT_EMBEDDING_MODEL:=all-MiniLM-L6-v2}",
# "dimensions": 384
# },
}

View file

@ -12,6 +12,7 @@ from llama_stack.providers.utils.kvstore.config import (
KVStoreConfig,
SqliteKVStoreConfig,
)
from llama_stack.providers.utils.vector_io.embedding_config import EmbeddingConfig
from llama_stack.schema_utils import json_schema_type
@ -21,6 +22,13 @@ class WeaviateVectorIOConfig(BaseModel):
weaviate_cluster_url: str | None = Field(description="The URL of the Weaviate cluster", default="localhost:8080")
kvstore: KVStoreConfig | None = Field(description="Config for KV store backend (SQLite only for now)", default=None)
@json_schema_type
class WeaviateVectorIOConfig(BaseModel):
embedding: EmbeddingConfig | None = Field(
default=None,
description="Default embedding configuration for this provider. When specified, vector databases created with this provider will use these embedding settings as defaults.",
)
@classmethod
def sample_run_config(
cls,
@ -34,4 +42,9 @@ class WeaviateVectorIOConfig(BaseModel):
__distro_dir__=__distro_dir__,
db_name="weaviate_registry.db",
),
# Optional: Configure default embedding model for this provider
# "embedding": {
# "model": "${env.WEAVIATE_EMBEDDING_MODEL:=all-MiniLM-L6-v2}",
# "dimensions": 384
# },
}