mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-23 10:19:47 +00:00
feat(vector-io): configurable embedding models for all providers (v2)\n\nAdds embedding_model and embedding_dimension fields to all VectorIOConfig classes.\nRouter respects provider defaults with fallback.\nIntroduces embedding_utils helper.\nComprehensive docs & samples.\nResolves #2729
This commit is contained in:
parent
c8f274347d
commit
d55dd3e9a0
24 changed files with 482 additions and 14 deletions
|
|
@ -129,13 +129,32 @@ class VectorIORouter(VectorIO):
|
|||
) -> VectorStoreObject:
|
||||
logger.debug(f"VectorIORouter.openai_create_vector_store: name={name}, provider_id={provider_id}")
|
||||
|
||||
# If no embedding model is provided, use the first available one
|
||||
# If no embedding model is provided, try provider defaults then fallback
|
||||
if embedding_model is None:
|
||||
embedding_model_info = await self._get_first_embedding_model()
|
||||
if embedding_model_info is None:
|
||||
raise ValueError("No embedding model provided and no embedding models available in the system")
|
||||
embedding_model, embedding_dimension = embedding_model_info
|
||||
logger.info(f"No embedding model specified, using first available: {embedding_model}")
|
||||
# Try to get provider-specific embedding model configuration
|
||||
if provider_id:
|
||||
try:
|
||||
provider_impl = self.routing_table.get_provider_impl(provider_id)
|
||||
provider_config = getattr(provider_impl, "config", None)
|
||||
|
||||
if provider_config:
|
||||
if hasattr(provider_config, "embedding_model") and provider_config.embedding_model:
|
||||
embedding_model = provider_config.embedding_model
|
||||
logger.info(f"Using provider config default embedding model: {embedding_model}")
|
||||
|
||||
if hasattr(provider_config, "embedding_dimension") and provider_config.embedding_dimension:
|
||||
embedding_dimension = provider_config.embedding_dimension
|
||||
logger.info(f"Using provider config embedding dimension: {embedding_dimension}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Could not get provider config for {provider_id}: {e}")
|
||||
|
||||
# If still no embedding model, use system fallback
|
||||
if embedding_model is None:
|
||||
embedding_model_info = await self._get_first_embedding_model()
|
||||
if embedding_model_info is None:
|
||||
raise ValueError("No embedding model provided and no embedding models available in the system")
|
||||
embedding_model, embedding_dimension = embedding_model_info
|
||||
logger.info(f"No embedding model specified, using first available: {embedding_model}")
|
||||
|
||||
vector_db_id = f"vs_{uuid.uuid4()}"
|
||||
registered_vector_db = await self.routing_table.register_vector_db(
|
||||
|
|
|
|||
|
|
@ -6,12 +6,25 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ChromaVectorIOConfig(BaseModel):
|
||||
db_path: str
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, db_path: str = "${env.CHROMADB_PATH}", **kwargs: Any) -> dict[str, Any]:
|
||||
return {"db_path": db_path}
|
||||
return {
|
||||
"db_path": db_path,
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.providers.utils.kvstore.config import (
|
||||
KVStoreConfig,
|
||||
|
|
@ -18,6 +18,14 @@ from llama_stack.schema_utils import json_schema_type
|
|||
@json_schema_type
|
||||
class FaissVectorIOConfig(BaseModel):
|
||||
kvstore: KVStoreConfig
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
|
||||
|
|
@ -25,5 +33,8 @@ class FaissVectorIOConfig(BaseModel):
|
|||
"kvstore": SqliteKVStoreConfig.sample_run_config(
|
||||
__distro_dir__=__distro_dir__,
|
||||
db_name="faiss_store.db",
|
||||
)
|
||||
),
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,6 +20,14 @@ class MilvusVectorIOConfig(BaseModel):
|
|||
db_path: str
|
||||
kvstore: KVStoreConfig = Field(description="Config for KV store backend (SQLite only for now)")
|
||||
consistency_level: str = Field(description="The consistency level of the Milvus server", default="Strong")
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
|
||||
|
|
@ -29,4 +37,7 @@ class MilvusVectorIOConfig(BaseModel):
|
|||
__distro_dir__=__distro_dir__,
|
||||
db_name="milvus_registry.db",
|
||||
),
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,14 @@ from llama_stack.providers.utils.kvstore.config import (
|
|||
class SQLiteVectorIOConfig(BaseModel):
|
||||
db_path: str = Field(description="Path to the SQLite database file")
|
||||
kvstore: KVStoreConfig = Field(description="Config for KV store backend (SQLite only for now)")
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
|
||||
|
|
@ -26,4 +34,7 @@ class SQLiteVectorIOConfig(BaseModel):
|
|||
__distro_dir__=__distro_dir__,
|
||||
db_name="sqlite_vec_registry.db",
|
||||
),
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,12 +6,25 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ChromaVectorIOConfig(BaseModel):
|
||||
url: str | None
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, url: str = "${env.CHROMADB_URL}", **kwargs: Any) -> dict[str, Any]:
|
||||
return {"url": url}
|
||||
return {
|
||||
"url": url,
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,14 @@ class MilvusVectorIOConfig(BaseModel):
|
|||
token: str | None = Field(description="The token of the Milvus server")
|
||||
consistency_level: str = Field(description="The consistency level of the Milvus server", default="Strong")
|
||||
kvstore: KVStoreConfig = Field(description="Config for KV store backend")
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
# This configuration allows additional fields to be passed through to the underlying Milvus client.
|
||||
# See the [Milvus](https://milvus.io/docs/install-overview.md) documentation for more details about Milvus in general.
|
||||
|
|
@ -32,4 +40,7 @@ class MilvusVectorIOConfig(BaseModel):
|
|||
__distro_dir__=__distro_dir__,
|
||||
db_name="milvus_remote_registry.db",
|
||||
),
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,13 +23,21 @@ class PGVectorVectorIOConfig(BaseModel):
|
|||
user: str | None = Field(default="postgres")
|
||||
password: str | None = Field(default="mysecretpassword")
|
||||
kvstore: KVStoreConfig | None = Field(description="Config for KV store backend (SQLite only for now)", default=None)
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(
|
||||
cls,
|
||||
__distro_dir__: str,
|
||||
host: str = "${env.PGVECTOR_HOST:=localhost}",
|
||||
port: int = "${env.PGVECTOR_PORT:=5432}",
|
||||
port: int | str = "${env.PGVECTOR_PORT:=5432}",
|
||||
db: str = "${env.PGVECTOR_DB}",
|
||||
user: str = "${env.PGVECTOR_USER}",
|
||||
password: str = "${env.PGVECTOR_PASSWORD}",
|
||||
|
|
@ -45,4 +53,7 @@ class PGVectorVectorIOConfig(BaseModel):
|
|||
__distro_dir__=__distro_dir__,
|
||||
db_name="pgvector_registry.db",
|
||||
),
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.schema_utils import json_schema_type
|
||||
|
||||
|
|
@ -23,9 +23,20 @@ class QdrantVectorIOConfig(BaseModel):
|
|||
prefix: str | None = None
|
||||
timeout: int | None = None
|
||||
host: str | None = None
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, **kwargs: Any) -> dict[str, Any]:
|
||||
return {
|
||||
"api_key": "${env.QDRANT_API_KEY}",
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,6 +21,15 @@ class WeaviateRequestProviderData(BaseModel):
|
|||
|
||||
|
||||
class WeaviateVectorIOConfig(BaseModel):
|
||||
embedding_model: str | None = Field(
|
||||
default=None,
|
||||
description="Optional default embedding model for this provider. If not specified, will use system default.",
|
||||
)
|
||||
embedding_dimension: int | None = Field(
|
||||
default=None,
|
||||
description="Optional embedding dimension override. Only needed for models with variable dimensions (e.g., Matryoshka embeddings). If not specified, will auto-lookup from model registry.",
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
|
||||
return {
|
||||
|
|
@ -28,4 +37,7 @@ class WeaviateVectorIOConfig(BaseModel):
|
|||
__distro_dir__=__distro_dir__,
|
||||
db_name="weaviate_registry.db",
|
||||
),
|
||||
# Optional: Configure default embedding model for this provider
|
||||
# "embedding_model": "all-MiniLM-L6-v2",
|
||||
# "embedding_dimension": 384, # Only needed for variable-dimension models
|
||||
}
|
||||
|
|
|
|||
5
llama_stack/providers/utils/vector_io/embedding_utils.py
Normal file
5
llama_stack/providers/utils/vector_io/embedding_utils.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
Loading…
Add table
Add a link
Reference in a new issue