mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 17:02:49 +00:00
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chore: Enable keyword search for Milvus inline (#3073) With https://github.com/milvus-io/milvus-lite/pull/294 - Milvus Lite supports keyword search using BM25. While introducing keyword search we had explicitly disabled it for inline milvus. This PR removes the need for the check, and enables `inline::milvus` for tests. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> Run llama stack with `inline::milvus` enabled: ``` pytest tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes --stack-config=http://localhost:8321 --embedding-model=all-MiniLM-L6-v2 -v ``` ``` INFO 2025-08-07 17:06:20,932 tests.integration.conftest:64 tests: Setting DISABLE_CODE_SANDBOX=1 for macOS =========================================================================================== test session starts ============================================================================================ platform darwin -- Python 3.12.11, pytest-7.4.4, pluggy-1.5.0 -- /Users/vnarsing/miniconda3/envs/stack-client/bin/python cachedir: .pytest_cache metadata: {'Python': '3.12.11', 'Platform': 'macOS-14.7.6-arm64-arm-64bit', 'Packages': {'pytest': '7.4.4', 'pluggy': '1.5.0'}, 'Plugins': {'asyncio': '0.23.8', 'cov': '6.0.0', 'timeout': '2.2.0', 'socket': '0.7.0', 'html': '3.1.1', 'langsmith': '0.3.39', 'anyio': '4.8.0', 'metadata': '3.0.0'}} rootdir: /Users/vnarsing/go/src/github/meta-llama/llama-stack configfile: pyproject.toml plugins: asyncio-0.23.8, cov-6.0.0, timeout-2.2.0, socket-0.7.0, html-3.1.1, langsmith-0.3.39, anyio-4.8.0, metadata-3.0.0 asyncio: mode=Mode.AUTO collected 3 items tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-vector] PASSED [ 33%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-keyword] PASSED [ 66%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-hybrid] PASSED [100%] ============================================================================================ 3 passed in 4.75s ============================================================================================= ``` Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com> chore: Fixup main pre commit (#3204) build: Bump version to 0.2.18 chore: Faster npm pre-commit (#3206) Adds npm to pre-commit.yml installation and caches ui Removes node installation during pre-commit. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chiecking in for tonight, wip moving to agents api Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> remove log Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updated Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix: disable ui-prettier & ui-eslint (#3207) chore(pre-commit): add pre-commit hook to enforce llama_stack logger usage (#3061) This PR adds a step in pre-commit to enforce using `llama_stack` logger. Currently, various parts of the code base uses different loggers. As a custom `llama_stack` logger exist and used in the codebase, it is better to standardize its utilization. Signed-off-by: Mustafa Elbehery <melbeher@redhat.com> Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu> fix: fix ```openai_embeddings``` for asymmetric embedding NIMs (#3205) NVIDIA asymmetric embedding models (e.g., `nvidia/llama-3.2-nv-embedqa-1b-v2`) require an `input_type` parameter not present in the standard OpenAI embeddings API. This PR adds the `input_type="query"` as default and updates the documentation to suggest using the `embedding` API for passage embeddings. <!-- If resolving an issue, uncomment and update the line below --> Resolves #2892 ``` pytest -s -v tests/integration/inference/test_openai_embeddings.py --stack-config="inference=nvidia" --embedding-model="nvidia/llama-3.2-nv-embedqa-1b-v2" --env NVIDIA_API_KEY={nvidia_api_key} --env NVIDIA_BASE_URL="https://integrate.api.nvidia.com" ``` cleaning up Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating session manager to cache messages locally Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix linter Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> more cleanup Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
108 lines
3.6 KiB
Python
108 lines
3.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import base64
|
|
import struct
|
|
from typing import TYPE_CHECKING
|
|
|
|
from llama_stack.log import get_logger
|
|
|
|
if TYPE_CHECKING:
|
|
from sentence_transformers import SentenceTransformer
|
|
|
|
from llama_stack.apis.inference import (
|
|
EmbeddingsResponse,
|
|
EmbeddingTaskType,
|
|
InterleavedContentItem,
|
|
ModelStore,
|
|
OpenAIEmbeddingData,
|
|
OpenAIEmbeddingsResponse,
|
|
OpenAIEmbeddingUsage,
|
|
TextTruncation,
|
|
)
|
|
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
|
|
|
EMBEDDING_MODELS = {}
|
|
|
|
|
|
log = get_logger(name=__name__, category="inference")
|
|
|
|
|
|
class SentenceTransformerEmbeddingMixin:
|
|
model_store: ModelStore
|
|
|
|
async def embeddings(
|
|
self,
|
|
model_id: str,
|
|
contents: list[str] | list[InterleavedContentItem],
|
|
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
output_dimension: int | None = None,
|
|
task_type: EmbeddingTaskType | None = None,
|
|
) -> EmbeddingsResponse:
|
|
model = await self.model_store.get_model(model_id)
|
|
embedding_model = self._load_sentence_transformer_model(model.provider_resource_id)
|
|
embeddings = embedding_model.encode(
|
|
[interleaved_content_as_str(content) for content in contents], show_progress_bar=False
|
|
)
|
|
return EmbeddingsResponse(embeddings=embeddings)
|
|
|
|
async def openai_embeddings(
|
|
self,
|
|
model: str,
|
|
input: str | list[str],
|
|
encoding_format: str | None = "float",
|
|
dimensions: int | None = None,
|
|
user: str | None = None,
|
|
) -> OpenAIEmbeddingsResponse:
|
|
# Convert input to list format if it's a single string
|
|
input_list = [input] if isinstance(input, str) else input
|
|
if not input_list:
|
|
raise ValueError("Empty list not supported")
|
|
|
|
# Get the model and generate embeddings
|
|
model_obj = await self.model_store.get_model(model)
|
|
embedding_model = self._load_sentence_transformer_model(model_obj.provider_resource_id)
|
|
embeddings = embedding_model.encode(input_list, show_progress_bar=False)
|
|
|
|
# Convert embeddings to the requested format
|
|
data = []
|
|
for i, embedding in enumerate(embeddings):
|
|
if encoding_format == "base64":
|
|
# Convert float array to base64 string
|
|
float_bytes = struct.pack(f"{len(embedding)}f", *embedding)
|
|
embedding_value = base64.b64encode(float_bytes).decode("ascii")
|
|
else:
|
|
# Default to float format
|
|
embedding_value = embedding.tolist()
|
|
|
|
data.append(
|
|
OpenAIEmbeddingData(
|
|
embedding=embedding_value,
|
|
index=i,
|
|
)
|
|
)
|
|
|
|
# Not returning actual token usage
|
|
usage = OpenAIEmbeddingUsage(prompt_tokens=-1, total_tokens=-1)
|
|
return OpenAIEmbeddingsResponse(
|
|
data=data,
|
|
model=model,
|
|
usage=usage,
|
|
)
|
|
|
|
def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer":
|
|
global EMBEDDING_MODELS
|
|
|
|
loaded_model = EMBEDDING_MODELS.get(model)
|
|
if loaded_model is not None:
|
|
return loaded_model
|
|
|
|
log.info(f"Loading sentence transformer for {model}...")
|
|
from sentence_transformers import SentenceTransformer
|
|
|
|
loaded_model = SentenceTransformer(model)
|
|
EMBEDDING_MODELS[model] = loaded_model
|
|
return loaded_model
|