mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 15:09:49 +00:00
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chore: Enable keyword search for Milvus inline (#3073) With https://github.com/milvus-io/milvus-lite/pull/294 - Milvus Lite supports keyword search using BM25. While introducing keyword search we had explicitly disabled it for inline milvus. This PR removes the need for the check, and enables `inline::milvus` for tests. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> Run llama stack with `inline::milvus` enabled: ``` pytest tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes --stack-config=http://localhost:8321 --embedding-model=all-MiniLM-L6-v2 -v ``` ``` INFO 2025-08-07 17:06:20,932 tests.integration.conftest:64 tests: Setting DISABLE_CODE_SANDBOX=1 for macOS =========================================================================================== test session starts ============================================================================================ platform darwin -- Python 3.12.11, pytest-7.4.4, pluggy-1.5.0 -- /Users/vnarsing/miniconda3/envs/stack-client/bin/python cachedir: .pytest_cache metadata: {'Python': '3.12.11', 'Platform': 'macOS-14.7.6-arm64-arm-64bit', 'Packages': {'pytest': '7.4.4', 'pluggy': '1.5.0'}, 'Plugins': {'asyncio': '0.23.8', 'cov': '6.0.0', 'timeout': '2.2.0', 'socket': '0.7.0', 'html': '3.1.1', 'langsmith': '0.3.39', 'anyio': '4.8.0', 'metadata': '3.0.0'}} rootdir: /Users/vnarsing/go/src/github/meta-llama/llama-stack configfile: pyproject.toml plugins: asyncio-0.23.8, cov-6.0.0, timeout-2.2.0, socket-0.7.0, html-3.1.1, langsmith-0.3.39, anyio-4.8.0, metadata-3.0.0 asyncio: mode=Mode.AUTO collected 3 items tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-vector] PASSED [ 33%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-keyword] PASSED [ 66%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-hybrid] PASSED [100%] ============================================================================================ 3 passed in 4.75s ============================================================================================= ``` Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com> chore: Fixup main pre commit (#3204) build: Bump version to 0.2.18 chore: Faster npm pre-commit (#3206) Adds npm to pre-commit.yml installation and caches ui Removes node installation during pre-commit. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chiecking in for tonight, wip moving to agents api Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> remove log Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updated Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix: disable ui-prettier & ui-eslint (#3207) chore(pre-commit): add pre-commit hook to enforce llama_stack logger usage (#3061) This PR adds a step in pre-commit to enforce using `llama_stack` logger. Currently, various parts of the code base uses different loggers. As a custom `llama_stack` logger exist and used in the codebase, it is better to standardize its utilization. Signed-off-by: Mustafa Elbehery <melbeher@redhat.com> Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu> fix: fix ```openai_embeddings``` for asymmetric embedding NIMs (#3205) NVIDIA asymmetric embedding models (e.g., `nvidia/llama-3.2-nv-embedqa-1b-v2`) require an `input_type` parameter not present in the standard OpenAI embeddings API. This PR adds the `input_type="query"` as default and updates the documentation to suggest using the `embedding` API for passage embeddings. <!-- If resolving an issue, uncomment and update the line below --> Resolves #2892 ``` pytest -s -v tests/integration/inference/test_openai_embeddings.py --stack-config="inference=nvidia" --embedding-model="nvidia/llama-3.2-nv-embedqa-1b-v2" --env NVIDIA_API_KEY={nvidia_api_key} --env NVIDIA_BASE_URL="https://integrate.api.nvidia.com" ``` cleaning up Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating session manager to cache messages locally Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix linter Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> more cleanup Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
96 lines
2.5 KiB
Python
96 lines
2.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import importlib
|
|
import os
|
|
import signal
|
|
import subprocess
|
|
import sys
|
|
|
|
from termcolor import cprint
|
|
|
|
from llama_stack.log import get_logger
|
|
|
|
log = get_logger(name=__name__, category="core")
|
|
|
|
|
|
def formulate_run_args(image_type: str, image_name: str) -> list:
|
|
# Only venv is supported now
|
|
current_venv = os.environ.get("VIRTUAL_ENV")
|
|
env_name = image_name or current_venv
|
|
if not env_name:
|
|
cprint(
|
|
"No current virtual environment detected, please specify a virtual environment name with --image-name",
|
|
color="red",
|
|
file=sys.stderr,
|
|
)
|
|
return []
|
|
|
|
cprint(f"Using virtual environment: {env_name}", file=sys.stderr)
|
|
|
|
script = importlib.resources.files("llama_stack") / "core/start_stack.sh"
|
|
run_args = [
|
|
script,
|
|
image_type,
|
|
env_name,
|
|
]
|
|
|
|
return run_args
|
|
|
|
|
|
def in_notebook():
|
|
try:
|
|
from IPython import get_ipython
|
|
|
|
ipython = get_ipython()
|
|
if ipython is None or "IPKernelApp" not in ipython.config: # pragma: no cover
|
|
return False
|
|
except ImportError:
|
|
return False
|
|
except AttributeError:
|
|
return False
|
|
return True
|
|
|
|
|
|
def run_command(command: list[str]) -> int:
|
|
"""
|
|
Run a command with interrupt handling and output capture.
|
|
Uses subprocess.run with direct stream piping for better performance.
|
|
|
|
Args:
|
|
command (list): The command to run.
|
|
|
|
Returns:
|
|
int: The return code of the command.
|
|
"""
|
|
original_sigint = signal.getsignal(signal.SIGINT)
|
|
ctrl_c_pressed = False
|
|
|
|
def sigint_handler(signum, frame):
|
|
nonlocal ctrl_c_pressed
|
|
ctrl_c_pressed = True
|
|
log.info("\nCtrl-C detected. Aborting...")
|
|
|
|
try:
|
|
# Set up the signal handler
|
|
signal.signal(signal.SIGINT, sigint_handler)
|
|
|
|
# Run the command with stdout/stderr piped directly to system streams
|
|
result = subprocess.run(
|
|
command,
|
|
text=True,
|
|
check=False,
|
|
)
|
|
return result.returncode
|
|
except subprocess.SubprocessError as e:
|
|
log.error(f"Subprocess error: {e}")
|
|
return 1
|
|
except Exception as e:
|
|
log.exception(f"Unexpected error: {e}")
|
|
return 1
|
|
finally:
|
|
# Restore the original signal handler
|
|
signal.signal(signal.SIGINT, original_sigint)
|