mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 13:22:36 +00:00
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chore: Enable keyword search for Milvus inline (#3073) With https://github.com/milvus-io/milvus-lite/pull/294 - Milvus Lite supports keyword search using BM25. While introducing keyword search we had explicitly disabled it for inline milvus. This PR removes the need for the check, and enables `inline::milvus` for tests. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> Run llama stack with `inline::milvus` enabled: ``` pytest tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes --stack-config=http://localhost:8321 --embedding-model=all-MiniLM-L6-v2 -v ``` ``` INFO 2025-08-07 17:06:20,932 tests.integration.conftest:64 tests: Setting DISABLE_CODE_SANDBOX=1 for macOS =========================================================================================== test session starts ============================================================================================ platform darwin -- Python 3.12.11, pytest-7.4.4, pluggy-1.5.0 -- /Users/vnarsing/miniconda3/envs/stack-client/bin/python cachedir: .pytest_cache metadata: {'Python': '3.12.11', 'Platform': 'macOS-14.7.6-arm64-arm-64bit', 'Packages': {'pytest': '7.4.4', 'pluggy': '1.5.0'}, 'Plugins': {'asyncio': '0.23.8', 'cov': '6.0.0', 'timeout': '2.2.0', 'socket': '0.7.0', 'html': '3.1.1', 'langsmith': '0.3.39', 'anyio': '4.8.0', 'metadata': '3.0.0'}} rootdir: /Users/vnarsing/go/src/github/meta-llama/llama-stack configfile: pyproject.toml plugins: asyncio-0.23.8, cov-6.0.0, timeout-2.2.0, socket-0.7.0, html-3.1.1, langsmith-0.3.39, anyio-4.8.0, metadata-3.0.0 asyncio: mode=Mode.AUTO collected 3 items tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-vector] PASSED [ 33%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-keyword] PASSED [ 66%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-hybrid] PASSED [100%] ============================================================================================ 3 passed in 4.75s ============================================================================================= ``` Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com> chore: Fixup main pre commit (#3204) build: Bump version to 0.2.18 chore: Faster npm pre-commit (#3206) Adds npm to pre-commit.yml installation and caches ui Removes node installation during pre-commit. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chiecking in for tonight, wip moving to agents api Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> remove log Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updated Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix: disable ui-prettier & ui-eslint (#3207) chore(pre-commit): add pre-commit hook to enforce llama_stack logger usage (#3061) This PR adds a step in pre-commit to enforce using `llama_stack` logger. Currently, various parts of the code base uses different loggers. As a custom `llama_stack` logger exist and used in the codebase, it is better to standardize its utilization. Signed-off-by: Mustafa Elbehery <melbeher@redhat.com> Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu> fix: fix ```openai_embeddings``` for asymmetric embedding NIMs (#3205) NVIDIA asymmetric embedding models (e.g., `nvidia/llama-3.2-nv-embedqa-1b-v2`) require an `input_type` parameter not present in the standard OpenAI embeddings API. This PR adds the `input_type="query"` as default and updates the documentation to suggest using the `embedding` API for passage embeddings. <!-- If resolving an issue, uncomment and update the line below --> Resolves #2892 ``` pytest -s -v tests/integration/inference/test_openai_embeddings.py --stack-config="inference=nvidia" --embedding-model="nvidia/llama-3.2-nv-embedqa-1b-v2" --env NVIDIA_API_KEY={nvidia_api_key} --env NVIDIA_BASE_URL="https://integrate.api.nvidia.com" ``` cleaning up Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating session manager to cache messages locally Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix linter Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> more cleanup Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
115 lines
3.6 KiB
Python
115 lines
3.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import contextvars
|
|
import json
|
|
from contextlib import AbstractContextManager
|
|
from typing import Any
|
|
|
|
from llama_stack.core.datatypes import User
|
|
from llama_stack.log import get_logger
|
|
|
|
from .utils.dynamic import instantiate_class_type
|
|
|
|
log = get_logger(name=__name__, category="core")
|
|
|
|
# Context variable for request provider data and auth attributes
|
|
PROVIDER_DATA_VAR = contextvars.ContextVar("provider_data", default=None)
|
|
|
|
|
|
class RequestProviderDataContext(AbstractContextManager):
|
|
"""Context manager for request provider data"""
|
|
|
|
def __init__(self, provider_data: dict[str, Any] | None = None, user: User | None = None):
|
|
self.provider_data = provider_data or {}
|
|
if user:
|
|
self.provider_data["__authenticated_user"] = user
|
|
|
|
self.token = None
|
|
|
|
def __enter__(self):
|
|
# Save the current value and set the new one
|
|
self.token = PROVIDER_DATA_VAR.set(self.provider_data)
|
|
return self
|
|
|
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
# Restore the previous value
|
|
if self.token is not None:
|
|
PROVIDER_DATA_VAR.reset(self.token)
|
|
|
|
|
|
class NeedsRequestProviderData:
|
|
def get_request_provider_data(self) -> Any:
|
|
spec = self.__provider_spec__
|
|
if not spec:
|
|
raise ValueError(f"Provider spec not set on {self.__class__}")
|
|
|
|
provider_type = spec.provider_type
|
|
validator_class = spec.provider_data_validator
|
|
if not validator_class:
|
|
raise ValueError(f"Provider {provider_type} does not have a validator")
|
|
|
|
val = PROVIDER_DATA_VAR.get()
|
|
if not val:
|
|
return None
|
|
|
|
validator = instantiate_class_type(validator_class)
|
|
try:
|
|
provider_data = validator(**val)
|
|
return provider_data
|
|
except Exception as e:
|
|
log.error(f"Error parsing provider data: {e}")
|
|
return None
|
|
|
|
|
|
def parse_request_provider_data(headers: dict[str, str]) -> dict[str, Any] | None:
|
|
"""Parse provider data from request headers"""
|
|
keys = [
|
|
"X-LlamaStack-Provider-Data",
|
|
"x-llamastack-provider-data",
|
|
]
|
|
val = None
|
|
for key in keys:
|
|
val = headers.get(key, None)
|
|
if val:
|
|
break
|
|
|
|
if not val:
|
|
return None
|
|
|
|
try:
|
|
return json.loads(val)
|
|
except json.JSONDecodeError:
|
|
log.error("Provider data not encoded as a JSON object!")
|
|
return None
|
|
|
|
|
|
def request_provider_data_context(
|
|
headers: dict[str, str], auth_attributes: dict[str, list[str]] | None = None
|
|
) -> AbstractContextManager:
|
|
"""Context manager that sets request provider data from headers and auth attributes for the duration of the context"""
|
|
provider_data = parse_request_provider_data(headers)
|
|
return RequestProviderDataContext(provider_data, auth_attributes)
|
|
|
|
|
|
def get_authenticated_user() -> User | None:
|
|
"""Helper to retrieve auth attributes from the provider data context"""
|
|
provider_data = PROVIDER_DATA_VAR.get()
|
|
if not provider_data:
|
|
return None
|
|
return provider_data.get("__authenticated_user")
|
|
|
|
|
|
def user_from_scope(scope: dict) -> User | None:
|
|
"""Create a User object from ASGI scope data (set by authentication middleware)"""
|
|
user_attributes = scope.get("user_attributes", {})
|
|
principal = scope.get("principal", "")
|
|
|
|
# auth not enabled
|
|
if not principal and not user_attributes:
|
|
return None
|
|
|
|
return User(principal=principal, attributes=user_attributes)
|