mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-25 05:11:59 +00:00
feat: configure vector-io provider with an embedding model
Signed-off-by: Mustafa Elbehery <melbeher@redhat.com>
This commit is contained in:
parent
1f0766308d
commit
d8f013b35a
29 changed files with 228 additions and 24 deletions
36
llama_stack/providers/utils/vector_io/embedding_config.py
Normal file
36
llama_stack/providers/utils/vector_io/embedding_config.py
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.schema_utils import json_schema_type
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class EmbeddingConfig(BaseModel):
|
||||
"""Configuration for embedding model used by vector-io providers.
|
||||
|
||||
This allows providers to specify default embedding models for use-case specific
|
||||
vector stores, reducing the need for app developers to know embedding details.
|
||||
|
||||
Example usage in provider config:
|
||||
```yaml
|
||||
vector_io:
|
||||
- provider_id: question-answer
|
||||
provider_type: remote::pgvector
|
||||
config:
|
||||
embedding:
|
||||
model: prod/question-answer-embedder
|
||||
dimensions: 384
|
||||
```
|
||||
"""
|
||||
|
||||
model: str = Field(description="The embedding model identifier to use")
|
||||
dimensions: int | None = Field(default=None, description="The embedding dimensions (optional, can be inferred)")
|
||||
|
||||
def get_dimensions_or_default(self, default: int = 384) -> int:
|
||||
"""Get dimensions with fallback to default if not specified."""
|
||||
return self.dimensions if self.dimensions is not None else default
|
||||
Loading…
Add table
Add a link
Reference in a new issue