forked from phoenix-oss/llama-stack-mirror
feat(api): Add options for supporting various embedding models (#1192)
We need to support: - asymmetric embedding models (#934) - truncation policies (#933) - varying dimensional output (#932) ## Test Plan ```bash $ cd llama_stack/providers/tests/inference $ pytest -s -v -k fireworks test_embeddings.py \ --inference-model nomic-ai/nomic-embed-text-v1.5 --env EMBEDDING_DIMENSION=784 $ pytest -s -v -k together test_embeddings.py \ --inference-model togethercomputer/m2-bert-80M-8k-retrieval --env EMBEDDING_DIMENSION=784 $ pytest -s -v -k ollama test_embeddings.py \ --inference-model all-minilm:latest --env EMBEDDING_DIMENSION=784 ```
This commit is contained in:
parent
6f9d622340
commit
81ce39a607
19 changed files with 202 additions and 11 deletions
|
@ -402,6 +402,30 @@ class ModelStore(Protocol):
|
|||
def get_model(self, identifier: str) -> Model: ...
|
||||
|
||||
|
||||
class TextTruncation(Enum):
|
||||
"""Config for how to truncate text for embedding when text is longer than the model's max sequence length. Start and End semantics depend on whether the language is left-to-right or right-to-left.
|
||||
|
||||
:cvar none: No truncation (default). If the text is longer than the model's max sequence length, you will get an error.
|
||||
:cvar start: Truncate from the start
|
||||
:cvar end: Truncate from the end
|
||||
"""
|
||||
|
||||
none = "none"
|
||||
start = "start"
|
||||
end = "end"
|
||||
|
||||
|
||||
class EmbeddingTaskType(Enum):
|
||||
"""How is the embedding being used? This is only supported by asymmetric embedding models.
|
||||
|
||||
:cvar query: Used for a query for semantic search.
|
||||
:cvar document: Used at indexing time when ingesting documents.
|
||||
"""
|
||||
|
||||
query = "query"
|
||||
document = "document"
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
@trace_protocol
|
||||
class Inference(Protocol):
|
||||
|
@ -482,11 +506,17 @@ class Inference(Protocol):
|
|||
self,
|
||||
model_id: str,
|
||||
contents: List[str] | List[InterleavedContentItem],
|
||||
text_truncation: Optional[TextTruncation] = TextTruncation.none,
|
||||
output_dimension: Optional[int] = None,
|
||||
task_type: Optional[EmbeddingTaskType] = None,
|
||||
) -> EmbeddingsResponse:
|
||||
"""Generate embeddings for content pieces using the specified model.
|
||||
|
||||
:param model_id: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.
|
||||
:param contents: List of contents to generate embeddings for. Each content can be a string or an InterleavedContentItem (and hence can be multimodal). The behavior depends on the model and provider. Some models may only support text.
|
||||
:param output_dimension: (Optional) Output dimensionality for the embeddings. Only supported by Matryoshka models.
|
||||
:param text_truncation: (Optional) Config for how to truncate text for embedding when text is longer than the model's max sequence length.
|
||||
:param task_type: (Optional) How is the embedding being used? This is only supported by asymmetric embedding models.
|
||||
:returns: An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}
|
||||
"""
|
||||
...
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue