forked from phoenix-oss/llama-stack-mirror
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 4s
Integration Tests / test-matrix (http, inspect) (push) Failing after 9s
Integration Tests / test-matrix (http, inference) (push) Failing after 9s
Integration Tests / test-matrix (http, datasets) (push) Failing after 10s
Integration Tests / test-matrix (http, post_training) (push) Failing after 9s
Integration Tests / test-matrix (library, agents) (push) Failing after 7s
Integration Tests / test-matrix (http, agents) (push) Failing after 10s
Integration Tests / test-matrix (http, tool_runtime) (push) Failing after 8s
Integration Tests / test-matrix (http, providers) (push) Failing after 9s
Integration Tests / test-matrix (library, datasets) (push) Failing after 8s
Integration Tests / test-matrix (library, inference) (push) Failing after 9s
Integration Tests / test-matrix (http, scoring) (push) Failing after 10s
Test Llama Stack Build / generate-matrix (push) Successful in 6s
Integration Tests / test-matrix (library, providers) (push) Failing after 7s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 6s
Integration Tests / test-matrix (library, inspect) (push) Failing after 9s
Test Llama Stack Build / build-single-provider (push) Failing after 7s
Integration Tests / test-matrix (library, scoring) (push) Failing after 9s
Integration Tests / test-matrix (library, post_training) (push) Failing after 9s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 7s
Integration Tests / test-matrix (library, tool_runtime) (push) Failing after 10s
Unit Tests / unit-tests (3.11) (push) Failing after 7s
Test Llama Stack Build / build (push) Failing after 5s
Unit Tests / unit-tests (3.10) (push) Failing after 7s
Update ReadTheDocs / update-readthedocs (push) Failing after 6s
Unit Tests / unit-tests (3.12) (push) Failing after 8s
Unit Tests / unit-tests (3.13) (push) Failing after 7s
Test External Providers / test-external-providers (venv) (push) Failing after 26s
Pre-commit / pre-commit (push) Successful in 1m11s
# What does this PR do? Adds a new endpoint that is compatible with OpenAI for embeddings api. `/openai/v1/embeddings` Added providers for OpenAI, LiteLLM and SentenceTransformer. ## Test Plan ``` LLAMA_STACK_CONFIG=http://localhost:8321 pytest -sv tests/integration/inference/test_openai_embeddings.py --embedding-model all-MiniLM-L6-v2,text-embedding-3-small,gemini/text-embedding-004 ```
107 lines
3.6 KiB
Python
107 lines
3.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import base64
|
|
import logging
|
|
import struct
|
|
from typing import TYPE_CHECKING
|
|
|
|
if TYPE_CHECKING:
|
|
from sentence_transformers import SentenceTransformer
|
|
|
|
from llama_stack.apis.inference import (
|
|
EmbeddingsResponse,
|
|
EmbeddingTaskType,
|
|
InterleavedContentItem,
|
|
ModelStore,
|
|
OpenAIEmbeddingData,
|
|
OpenAIEmbeddingsResponse,
|
|
OpenAIEmbeddingUsage,
|
|
TextTruncation,
|
|
)
|
|
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
|
|
|
EMBEDDING_MODELS = {}
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
class SentenceTransformerEmbeddingMixin:
|
|
model_store: ModelStore
|
|
|
|
async def embeddings(
|
|
self,
|
|
model_id: str,
|
|
contents: list[str] | list[InterleavedContentItem],
|
|
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
output_dimension: int | None = None,
|
|
task_type: EmbeddingTaskType | None = None,
|
|
) -> EmbeddingsResponse:
|
|
model = await self.model_store.get_model(model_id)
|
|
embedding_model = self._load_sentence_transformer_model(model.provider_resource_id)
|
|
embeddings = embedding_model.encode(
|
|
[interleaved_content_as_str(content) for content in contents], show_progress_bar=False
|
|
)
|
|
return EmbeddingsResponse(embeddings=embeddings)
|
|
|
|
async def openai_embeddings(
|
|
self,
|
|
model: str,
|
|
input: str | list[str],
|
|
encoding_format: str | None = "float",
|
|
dimensions: int | None = None,
|
|
user: str | None = None,
|
|
) -> OpenAIEmbeddingsResponse:
|
|
# Convert input to list format if it's a single string
|
|
input_list = [input] if isinstance(input, str) else input
|
|
if not input_list:
|
|
raise ValueError("Empty list not supported")
|
|
|
|
# Get the model and generate embeddings
|
|
model_obj = await self.model_store.get_model(model)
|
|
embedding_model = self._load_sentence_transformer_model(model_obj.provider_resource_id)
|
|
embeddings = embedding_model.encode(input_list, show_progress_bar=False)
|
|
|
|
# Convert embeddings to the requested format
|
|
data = []
|
|
for i, embedding in enumerate(embeddings):
|
|
if encoding_format == "base64":
|
|
# Convert float array to base64 string
|
|
float_bytes = struct.pack(f"{len(embedding)}f", *embedding)
|
|
embedding_value = base64.b64encode(float_bytes).decode("ascii")
|
|
else:
|
|
# Default to float format
|
|
embedding_value = embedding.tolist()
|
|
|
|
data.append(
|
|
OpenAIEmbeddingData(
|
|
embedding=embedding_value,
|
|
index=i,
|
|
)
|
|
)
|
|
|
|
# Not returning actual token usage
|
|
usage = OpenAIEmbeddingUsage(prompt_tokens=-1, total_tokens=-1)
|
|
return OpenAIEmbeddingsResponse(
|
|
data=data,
|
|
model=model_obj.provider_resource_id,
|
|
usage=usage,
|
|
)
|
|
|
|
def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer":
|
|
global EMBEDDING_MODELS
|
|
|
|
loaded_model = EMBEDDING_MODELS.get(model)
|
|
if loaded_model is not None:
|
|
return loaded_model
|
|
|
|
log.info(f"Loading sentence transformer for {model}...")
|
|
from sentence_transformers import SentenceTransformer
|
|
|
|
loaded_model = SentenceTransformer(model)
|
|
EMBEDDING_MODELS[model] = loaded_model
|
|
return loaded_model
|