mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 17:02:49 +00:00
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chore: Enable keyword search for Milvus inline (#3073) With https://github.com/milvus-io/milvus-lite/pull/294 - Milvus Lite supports keyword search using BM25. While introducing keyword search we had explicitly disabled it for inline milvus. This PR removes the need for the check, and enables `inline::milvus` for tests. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> Run llama stack with `inline::milvus` enabled: ``` pytest tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes --stack-config=http://localhost:8321 --embedding-model=all-MiniLM-L6-v2 -v ``` ``` INFO 2025-08-07 17:06:20,932 tests.integration.conftest:64 tests: Setting DISABLE_CODE_SANDBOX=1 for macOS =========================================================================================== test session starts ============================================================================================ platform darwin -- Python 3.12.11, pytest-7.4.4, pluggy-1.5.0 -- /Users/vnarsing/miniconda3/envs/stack-client/bin/python cachedir: .pytest_cache metadata: {'Python': '3.12.11', 'Platform': 'macOS-14.7.6-arm64-arm-64bit', 'Packages': {'pytest': '7.4.4', 'pluggy': '1.5.0'}, 'Plugins': {'asyncio': '0.23.8', 'cov': '6.0.0', 'timeout': '2.2.0', 'socket': '0.7.0', 'html': '3.1.1', 'langsmith': '0.3.39', 'anyio': '4.8.0', 'metadata': '3.0.0'}} rootdir: /Users/vnarsing/go/src/github/meta-llama/llama-stack configfile: pyproject.toml plugins: asyncio-0.23.8, cov-6.0.0, timeout-2.2.0, socket-0.7.0, html-3.1.1, langsmith-0.3.39, anyio-4.8.0, metadata-3.0.0 asyncio: mode=Mode.AUTO collected 3 items tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-vector] PASSED [ 33%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-keyword] PASSED [ 66%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-hybrid] PASSED [100%] ============================================================================================ 3 passed in 4.75s ============================================================================================= ``` Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com> chore: Fixup main pre commit (#3204) build: Bump version to 0.2.18 chore: Faster npm pre-commit (#3206) Adds npm to pre-commit.yml installation and caches ui Removes node installation during pre-commit. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chiecking in for tonight, wip moving to agents api Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> remove log Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updated Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix: disable ui-prettier & ui-eslint (#3207) chore(pre-commit): add pre-commit hook to enforce llama_stack logger usage (#3061) This PR adds a step in pre-commit to enforce using `llama_stack` logger. Currently, various parts of the code base uses different loggers. As a custom `llama_stack` logger exist and used in the codebase, it is better to standardize its utilization. Signed-off-by: Mustafa Elbehery <melbeher@redhat.com> Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu> fix: fix ```openai_embeddings``` for asymmetric embedding NIMs (#3205) NVIDIA asymmetric embedding models (e.g., `nvidia/llama-3.2-nv-embedqa-1b-v2`) require an `input_type` parameter not present in the standard OpenAI embeddings API. This PR adds the `input_type="query"` as default and updates the documentation to suggest using the `embedding` API for passage embeddings. <!-- If resolving an issue, uncomment and update the line below --> Resolves #2892 ``` pytest -s -v tests/integration/inference/test_openai_embeddings.py --stack-config="inference=nvidia" --embedding-model="nvidia/llama-3.2-nv-embedqa-1b-v2" --env NVIDIA_API_KEY={nvidia_api_key} --env NVIDIA_BASE_URL="https://integrate.api.nvidia.com" ``` cleaning up Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating session manager to cache messages locally Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix linter Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> more cleanup Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
181 lines
6.5 KiB
Python
181 lines
6.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
import textwrap
|
|
from typing import Any
|
|
|
|
from llama_stack.core.datatypes import (
|
|
LLAMA_STACK_RUN_CONFIG_VERSION,
|
|
DistributionSpec,
|
|
Provider,
|
|
StackRunConfig,
|
|
)
|
|
from llama_stack.core.distribution import (
|
|
builtin_automatically_routed_apis,
|
|
get_provider_registry,
|
|
)
|
|
from llama_stack.core.stack import cast_image_name_to_string, replace_env_vars
|
|
from llama_stack.core.utils.config_dirs import EXTERNAL_PROVIDERS_DIR
|
|
from llama_stack.core.utils.dynamic import instantiate_class_type
|
|
from llama_stack.core.utils.prompt_for_config import prompt_for_config
|
|
from llama_stack.log import get_logger
|
|
from llama_stack.providers.datatypes import Api, ProviderSpec
|
|
|
|
logger = get_logger(name=__name__, category="core")
|
|
|
|
|
|
def configure_single_provider(registry: dict[str, ProviderSpec], provider: Provider) -> Provider:
|
|
provider_spec = registry[provider.provider_type]
|
|
config_type = instantiate_class_type(provider_spec.config_class)
|
|
try:
|
|
if provider.config:
|
|
existing = config_type(**provider.config)
|
|
else:
|
|
existing = None
|
|
except Exception:
|
|
existing = None
|
|
|
|
cfg = prompt_for_config(config_type, existing)
|
|
return Provider(
|
|
provider_id=provider.provider_id,
|
|
provider_type=provider.provider_type,
|
|
config=cfg.model_dump(),
|
|
)
|
|
|
|
|
|
def configure_api_providers(config: StackRunConfig, build_spec: DistributionSpec) -> StackRunConfig:
|
|
is_nux = len(config.providers) == 0
|
|
|
|
if is_nux:
|
|
logger.info(
|
|
textwrap.dedent(
|
|
"""
|
|
Llama Stack is composed of several APIs working together. For each API served by the Stack,
|
|
we need to configure the providers (implementations) you want to use for these APIs.
|
|
"""
|
|
)
|
|
)
|
|
|
|
provider_registry = get_provider_registry()
|
|
builtin_apis = [a.routing_table_api for a in builtin_automatically_routed_apis()]
|
|
|
|
if config.apis:
|
|
apis_to_serve = config.apis
|
|
else:
|
|
apis_to_serve = [a.value for a in Api if a not in (Api.telemetry, Api.inspect, Api.providers)]
|
|
|
|
for api_str in apis_to_serve:
|
|
api = Api(api_str)
|
|
if api in builtin_apis:
|
|
continue
|
|
if api not in provider_registry:
|
|
raise ValueError(f"Unknown API `{api_str}`")
|
|
|
|
existing_providers = config.providers.get(api_str, [])
|
|
if existing_providers:
|
|
logger.info(f"Re-configuring existing providers for API `{api_str}`...")
|
|
updated_providers = []
|
|
for p in existing_providers:
|
|
logger.info(f"> Configuring provider `({p.provider_type})`")
|
|
updated_providers.append(configure_single_provider(provider_registry[api], p))
|
|
logger.info("")
|
|
else:
|
|
# we are newly configuring this API
|
|
plist = build_spec.providers.get(api_str, [])
|
|
plist = plist if isinstance(plist, list) else [plist]
|
|
|
|
if not plist:
|
|
raise ValueError(f"No provider configured for API {api_str}?")
|
|
|
|
logger.info(f"Configuring API `{api_str}`...")
|
|
updated_providers = []
|
|
for i, provider in enumerate(plist):
|
|
if i >= 1:
|
|
others = ", ".join(p.provider_type for p in plist[i:])
|
|
logger.info(
|
|
f"Not configuring other providers ({others}) interactively. Please edit the resulting YAML directly.\n"
|
|
)
|
|
break
|
|
|
|
logger.info(f"> Configuring provider `({provider.provider_type})`")
|
|
pid = provider.provider_type.split("::")[-1]
|
|
updated_providers.append(
|
|
configure_single_provider(
|
|
provider_registry[api],
|
|
Provider(
|
|
provider_id=(f"{pid}-{i:02d}" if len(plist) > 1 else pid),
|
|
provider_type=provider.provider_type,
|
|
config={},
|
|
),
|
|
)
|
|
)
|
|
logger.info("")
|
|
|
|
config.providers[api_str] = updated_providers
|
|
|
|
return config
|
|
|
|
|
|
def upgrade_from_routing_table(
|
|
config_dict: dict[str, Any],
|
|
) -> dict[str, Any]:
|
|
def get_providers(entries):
|
|
return [
|
|
Provider(
|
|
provider_id=(f"{entry['provider_type']}-{i:02d}" if len(entries) > 1 else entry["provider_type"]),
|
|
provider_type=entry["provider_type"],
|
|
config=entry["config"],
|
|
)
|
|
for i, entry in enumerate(entries)
|
|
]
|
|
|
|
providers_by_api = {}
|
|
|
|
routing_table = config_dict.get("routing_table", {})
|
|
for api_str, entries in routing_table.items():
|
|
providers = get_providers(entries)
|
|
providers_by_api[api_str] = providers
|
|
|
|
provider_map = config_dict.get("api_providers", config_dict.get("provider_map", {}))
|
|
if provider_map:
|
|
for api_str, provider in provider_map.items():
|
|
if isinstance(provider, dict) and "provider_type" in provider:
|
|
providers_by_api[api_str] = [
|
|
Provider(
|
|
provider_id=f"{provider['provider_type']}",
|
|
provider_type=provider["provider_type"],
|
|
config=provider["config"],
|
|
)
|
|
]
|
|
|
|
config_dict["providers"] = providers_by_api
|
|
|
|
config_dict.pop("routing_table", None)
|
|
config_dict.pop("api_providers", None)
|
|
config_dict.pop("provider_map", None)
|
|
|
|
config_dict["apis"] = config_dict["apis_to_serve"]
|
|
config_dict.pop("apis_to_serve", None)
|
|
|
|
return config_dict
|
|
|
|
|
|
def parse_and_maybe_upgrade_config(config_dict: dict[str, Any]) -> StackRunConfig:
|
|
version = config_dict.get("version", None)
|
|
if version == LLAMA_STACK_RUN_CONFIG_VERSION:
|
|
processed_config_dict = replace_env_vars(config_dict)
|
|
return StackRunConfig(**cast_image_name_to_string(processed_config_dict))
|
|
|
|
if "routing_table" in config_dict:
|
|
logger.info("Upgrading config...")
|
|
config_dict = upgrade_from_routing_table(config_dict)
|
|
|
|
config_dict["version"] = LLAMA_STACK_RUN_CONFIG_VERSION
|
|
|
|
if not config_dict.get("external_providers_dir", None):
|
|
config_dict["external_providers_dir"] = EXTERNAL_PROVIDERS_DIR
|
|
|
|
processed_config_dict = replace_env_vars(config_dict)
|
|
return StackRunConfig(**cast_image_name_to_string(processed_config_dict))
|