mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 17:02:49 +00:00
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chore: Enable keyword search for Milvus inline (#3073) With https://github.com/milvus-io/milvus-lite/pull/294 - Milvus Lite supports keyword search using BM25. While introducing keyword search we had explicitly disabled it for inline milvus. This PR removes the need for the check, and enables `inline::milvus` for tests. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> Run llama stack with `inline::milvus` enabled: ``` pytest tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes --stack-config=http://localhost:8321 --embedding-model=all-MiniLM-L6-v2 -v ``` ``` INFO 2025-08-07 17:06:20,932 tests.integration.conftest:64 tests: Setting DISABLE_CODE_SANDBOX=1 for macOS =========================================================================================== test session starts ============================================================================================ platform darwin -- Python 3.12.11, pytest-7.4.4, pluggy-1.5.0 -- /Users/vnarsing/miniconda3/envs/stack-client/bin/python cachedir: .pytest_cache metadata: {'Python': '3.12.11', 'Platform': 'macOS-14.7.6-arm64-arm-64bit', 'Packages': {'pytest': '7.4.4', 'pluggy': '1.5.0'}, 'Plugins': {'asyncio': '0.23.8', 'cov': '6.0.0', 'timeout': '2.2.0', 'socket': '0.7.0', 'html': '3.1.1', 'langsmith': '0.3.39', 'anyio': '4.8.0', 'metadata': '3.0.0'}} rootdir: /Users/vnarsing/go/src/github/meta-llama/llama-stack configfile: pyproject.toml plugins: asyncio-0.23.8, cov-6.0.0, timeout-2.2.0, socket-0.7.0, html-3.1.1, langsmith-0.3.39, anyio-4.8.0, metadata-3.0.0 asyncio: mode=Mode.AUTO collected 3 items tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-vector] PASSED [ 33%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-keyword] PASSED [ 66%] tests/integration/vector_io/test_openai_vector_stores.py::test_openai_vector_store_search_modes[None-None-all-MiniLM-L6-v2-None-384-hybrid] PASSED [100%] ============================================================================================ 3 passed in 4.75s ============================================================================================= ``` Signed-off-by: Varsha Prasad Narsing <varshaprasad96@gmail.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com> chore: Fixup main pre commit (#3204) build: Bump version to 0.2.18 chore: Faster npm pre-commit (#3206) Adds npm to pre-commit.yml installation and caches ui Removes node installation during pre-commit. <!-- If resolving an issue, uncomment and update the line below --> <!-- Closes #[issue-number] --> <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> chiecking in for tonight, wip moving to agents api Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> remove log Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updated Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix: disable ui-prettier & ui-eslint (#3207) chore(pre-commit): add pre-commit hook to enforce llama_stack logger usage (#3061) This PR adds a step in pre-commit to enforce using `llama_stack` logger. Currently, various parts of the code base uses different loggers. As a custom `llama_stack` logger exist and used in the codebase, it is better to standardize its utilization. Signed-off-by: Mustafa Elbehery <melbeher@redhat.com> Co-authored-by: Matthew Farrellee <matt@cs.wisc.edu> fix: fix ```openai_embeddings``` for asymmetric embedding NIMs (#3205) NVIDIA asymmetric embedding models (e.g., `nvidia/llama-3.2-nv-embedqa-1b-v2`) require an `input_type` parameter not present in the standard OpenAI embeddings API. This PR adds the `input_type="query"` as default and updates the documentation to suggest using the `embedding` API for passage embeddings. <!-- If resolving an issue, uncomment and update the line below --> Resolves #2892 ``` pytest -s -v tests/integration/inference/test_openai_embeddings.py --stack-config="inference=nvidia" --embedding-model="nvidia/llama-3.2-nv-embedqa-1b-v2" --env NVIDIA_API_KEY={nvidia_api_key} --env NVIDIA_BASE_URL="https://integrate.api.nvidia.com" ``` cleaning up Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating session manager to cache messages locally Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix linter Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> more cleanup Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
198 lines
6.7 KiB
Python
198 lines
6.7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
from collections.abc import Collection, Iterator, Sequence, Set
|
|
from pathlib import Path
|
|
from typing import (
|
|
Literal,
|
|
cast,
|
|
)
|
|
|
|
import tiktoken
|
|
|
|
from llama_stack.log import get_logger
|
|
from llama_stack.models.llama.tokenizer_utils import load_bpe_file
|
|
|
|
# The tiktoken tokenizer can handle <=400k chars without
|
|
# pyo3_runtime.PanicException.
|
|
TIKTOKEN_MAX_ENCODE_CHARS = 400_000
|
|
|
|
# https://github.com/openai/tiktoken/issues/195
|
|
# Here we iterate over subsequences and split if we exceed the limit
|
|
# of max consecutive non-whitespace or whitespace characters.
|
|
MAX_NO_WHITESPACES_CHARS = 25_000
|
|
|
|
|
|
_INSTANCE = None
|
|
|
|
logger = get_logger(name=__name__, category="models::llama")
|
|
|
|
|
|
class Tokenizer:
|
|
"""
|
|
Tokenizing and encoding/decoding text using the Tiktoken tokenizer.
|
|
"""
|
|
|
|
special_tokens: dict[str, int]
|
|
|
|
num_reserved_special_tokens = 256
|
|
|
|
pat_str = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" # noqa: E501
|
|
|
|
@classmethod
|
|
def get_instance(cls):
|
|
global _INSTANCE
|
|
|
|
if _INSTANCE is None:
|
|
_INSTANCE = Tokenizer(Path(__file__).parent / "tokenizer.model")
|
|
return _INSTANCE
|
|
|
|
def __init__(self, model_path: Path):
|
|
"""
|
|
Initializes the Tokenizer with a Tiktoken model.
|
|
|
|
Args:
|
|
model_path (str): The path to the Tiktoken model file.
|
|
"""
|
|
if not model_path.exists():
|
|
raise FileNotFoundError(f"Tokenizer model file not found: {model_path}")
|
|
|
|
mergeable_ranks = load_bpe_file(model_path)
|
|
num_base_tokens = len(mergeable_ranks)
|
|
special_tokens = [
|
|
"<|begin_of_text|>",
|
|
"<|end_of_text|>",
|
|
"<|reserved_special_token_0|>",
|
|
"<|reserved_special_token_1|>",
|
|
"<|finetune_right_pad_id|>",
|
|
"<|step_id|>",
|
|
"<|start_header_id|>",
|
|
"<|end_header_id|>",
|
|
"<|eom_id|>", # end of message
|
|
"<|eot_id|>", # end of turn
|
|
"<|python_tag|>",
|
|
"<|image|>",
|
|
]
|
|
reserved_tokens = [
|
|
f"<|reserved_special_token_{2 + i}|>" for i in range(self.num_reserved_special_tokens - len(special_tokens))
|
|
]
|
|
special_tokens = special_tokens + reserved_tokens
|
|
|
|
self.special_tokens = {token: num_base_tokens + i for i, token in enumerate(special_tokens)}
|
|
self.model = tiktoken.Encoding(
|
|
name=model_path.name,
|
|
pat_str=self.pat_str,
|
|
mergeable_ranks=mergeable_ranks,
|
|
special_tokens=self.special_tokens,
|
|
)
|
|
|
|
self.n_words: int = num_base_tokens + len(special_tokens)
|
|
# BOS / EOS token IDs
|
|
self.bos_id: int = self.special_tokens["<|begin_of_text|>"]
|
|
self.eos_id: int = self.special_tokens["<|end_of_text|>"]
|
|
self.eot_id: int = self.special_tokens["<|eot_id|>"]
|
|
self.eom_id: int = self.special_tokens["<|eom_id|>"]
|
|
self.python_tag_id = self.special_tokens["<|python_tag|>"]
|
|
self.pad_id: int = self.special_tokens["<|finetune_right_pad_id|>"]
|
|
self.stop_tokens = [
|
|
self.eos_id,
|
|
self.special_tokens["<|eom_id|>"],
|
|
self.special_tokens["<|eot_id|>"],
|
|
]
|
|
|
|
def encode(
|
|
self,
|
|
s: str,
|
|
*,
|
|
bos: bool,
|
|
eos: bool,
|
|
allowed_special: Literal["all"] | Set[str] | None = None,
|
|
disallowed_special: Literal["all"] | Collection[str] = (),
|
|
) -> list[int]:
|
|
"""
|
|
Encodes a string into a list of token IDs.
|
|
|
|
Args:
|
|
s (str): The input string to be encoded.
|
|
bos (bool): Whether to prepend the beginning-of-sequence token.
|
|
eos (bool): Whether to append the end-of-sequence token.
|
|
allowed_special ("all"|set[str]): allowed special tokens in string
|
|
disallowed_special ("all"|set[str]): special tokens that raise an error when in string
|
|
|
|
Returns:
|
|
list[int]: A list of token IDs.
|
|
|
|
By default, setting disallowed_special=() encodes a string by ignoring
|
|
special tokens. Specifically:
|
|
- Setting `disallowed_special` to () will cause all text corresponding
|
|
to special tokens to be encoded as natural text (insteading of raising
|
|
an error).
|
|
- Setting `allowed_special` to "all" will treat all text corresponding
|
|
to special tokens to be encoded as special tokens.
|
|
"""
|
|
if allowed_special is None:
|
|
allowed_special = set()
|
|
assert type(s) is str
|
|
|
|
substrs = (
|
|
substr
|
|
for i in range(0, len(s), TIKTOKEN_MAX_ENCODE_CHARS)
|
|
for substr in self._split_whitespaces_or_nonwhitespaces(
|
|
s[i : i + TIKTOKEN_MAX_ENCODE_CHARS], MAX_NO_WHITESPACES_CHARS
|
|
)
|
|
)
|
|
t: list[int] = []
|
|
for substr in substrs:
|
|
t.extend(
|
|
self.model.encode(
|
|
substr,
|
|
allowed_special=allowed_special,
|
|
disallowed_special=disallowed_special,
|
|
)
|
|
)
|
|
if bos:
|
|
t.insert(0, self.bos_id)
|
|
if eos:
|
|
t.append(self.eos_id)
|
|
return t
|
|
|
|
def decode(self, t: Sequence[int]) -> str:
|
|
"""
|
|
Decodes a list of token IDs into a string.
|
|
|
|
Args:
|
|
t (List[int]): The list of token IDs to be decoded.
|
|
|
|
Returns:
|
|
str: The decoded string.
|
|
"""
|
|
# Typecast is safe here. Tiktoken doesn't do anything list-related with the sequence.
|
|
return self.model.decode(cast(list[int], t))
|
|
|
|
@staticmethod
|
|
def _split_whitespaces_or_nonwhitespaces(s: str, max_consecutive_slice_len: int) -> Iterator[str]:
|
|
"""
|
|
Splits the string `s` so that each substring contains no more than `max_consecutive_slice_len`
|
|
consecutive whitespaces or consecutive non-whitespaces.
|
|
"""
|
|
current_slice_len = 0
|
|
current_slice_is_space = s[0].isspace() if len(s) > 0 else False
|
|
slice_start = 0
|
|
|
|
for i in range(len(s)):
|
|
is_now_space = s[i].isspace()
|
|
|
|
if current_slice_is_space ^ is_now_space:
|
|
current_slice_len = 1
|
|
current_slice_is_space = is_now_space
|
|
else:
|
|
current_slice_len += 1
|
|
if current_slice_len > max_consecutive_slice_len:
|
|
yield s[slice_start:i]
|
|
slice_start = i
|
|
current_slice_len = 1
|
|
yield s[slice_start:]
|