mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-05 12:21:52 +00:00
docs: update configuration documentation for global default embedding model
- Clarified the optional nature of the default_embedding_dimension in the YAML configuration, specifying that it defaults to 384 if omitted. - Added a note in the VectorStoreConfig class to indicate that the router will fall back to 384 as the default dimension if not set.
This commit is contained in:
parent
600c3d5188
commit
f9afad99f8
2 changed files with 4 additions and 3 deletions
|
@ -41,5 +41,6 @@ class VectorStoreConfig(BaseModel):
|
|||
default_embedding_dimension: int | None = Field(
|
||||
default_factory=lambda: int(os.getenv("LLAMA_STACK_DEFAULT_EMBEDDING_DIMENSION", 0)) or None, ge=1
|
||||
)
|
||||
# Note: If not set, the router will fall back to 384 as the default dimension
|
||||
|
||||
model_config = ConfigDict(frozen=True)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue