mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-13 08:36:09 +00:00
feat: Adding ChunkMetadata
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
6fde601765
commit
f90fce218e
13 changed files with 416 additions and 206 deletions
|
@ -7,6 +7,7 @@ import base64
|
|||
import io
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
@ -23,12 +24,13 @@ from llama_stack.apis.common.content_types import (
|
|||
)
|
||||
from llama_stack.apis.tools import RAGDocument
|
||||
from llama_stack.apis.vector_dbs import VectorDB
|
||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
|
||||
from llama_stack.models.llama.llama3.tokenizer import Tokenizer
|
||||
from llama_stack.providers.datatypes import Api
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
interleaved_content_as_str,
|
||||
)
|
||||
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -148,6 +150,10 @@ async def content_from_doc(doc: RAGDocument) -> str:
|
|||
def make_overlapped_chunks(
|
||||
document_id: str, text: str, window_len: int, overlap_len: int, metadata: dict[str, Any]
|
||||
) -> list[Chunk]:
|
||||
default_tokenizer = "DEFAULT_TIKTOKEN_TOKENIZER"
|
||||
default_embedding_model = (
|
||||
"DEFAULT_EMBEDDING_MODEL" # This will be correctly updated in `VectorDBWithIndex.insert_chunks`
|
||||
)
|
||||
tokenizer = Tokenizer.get_instance()
|
||||
tokens = tokenizer.encode(text, bos=False, eos=False)
|
||||
try:
|
||||
|
@ -166,11 +172,25 @@ def make_overlapped_chunks(
|
|||
chunk_metadata["token_count"] = len(toks)
|
||||
chunk_metadata["metadata_token_count"] = len(metadata_tokens)
|
||||
|
||||
backend_chunk_metadata = ChunkMetadata(
|
||||
document_id=document_id,
|
||||
chunk_id=generate_chunk_id(chunk, text),
|
||||
source=metadata.get("source", None),
|
||||
created_timestamp=metadata.get("created_timestamp", int(time.time())),
|
||||
updated_timestamp=int(time.time()),
|
||||
chunk_window=f"{i}-{i + len(toks)}",
|
||||
chunk_tokenizer=default_tokenizer,
|
||||
chunk_embedding_model=default_embedding_model,
|
||||
content_token_count=len(toks),
|
||||
metadata_token_count=len(metadata_tokens),
|
||||
)
|
||||
|
||||
# chunk is a string
|
||||
chunks.append(
|
||||
Chunk(
|
||||
content=chunk,
|
||||
metadata=chunk_metadata,
|
||||
chunk_metadata=backend_chunk_metadata,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -235,9 +255,13 @@ class VectorDBWithIndex:
|
|||
) -> None:
|
||||
chunks_to_embed = []
|
||||
for i, c in enumerate(chunks):
|
||||
# this should be done in `make_overlapped_chunks` but we do it here for convenience
|
||||
if c.embedding is None:
|
||||
chunks_to_embed.append(c)
|
||||
else:
|
||||
if c.chunk_metadata:
|
||||
c.chunk_metadata.chunk_embedding_model = self.vector_db.embedding_model
|
||||
c.chunk_metadata.chunk_embedding_dimension = self.vector_db.embedding_dimension
|
||||
_validate_embedding(c.embedding, i, self.vector_db.embedding_dimension)
|
||||
|
||||
if chunks_to_embed:
|
||||
|
|
5
llama_stack/providers/utils/vector_io/__init__.py
Normal file
5
llama_stack/providers/utils/vector_io/__init__.py
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
42
llama_stack/providers/utils/vector_io/chunk_utils.py
Normal file
42
llama_stack/providers/utils/vector_io/chunk_utils.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from llama_stack.apis.vector_io import Chunk
|
||||
|
||||
|
||||
def generate_chunk_id(document_id: str, chunk_text: str) -> str:
|
||||
"""Generate a unique chunk ID using a hash of document ID and chunk text."""
|
||||
hash_input = f"{document_id}:{chunk_text}".encode()
|
||||
return str(uuid.UUID(hashlib.md5(hash_input).hexdigest()))
|
||||
|
||||
|
||||
def extract_chunk_id_from_metadata(chunk: Chunk) -> str | None:
|
||||
"""Extract existing chunk ID from metadata. This is for compatibility with older Chunks
|
||||
that stored the document_id in the metadata and not in the ChunkMetadata."""
|
||||
if chunk.chunk_metadata is not None and hasattr(chunk.chunk_metadata, "chunk_id"):
|
||||
return chunk.chunk_metadata.chunk_id
|
||||
|
||||
if "chunk_id" in chunk.metadata:
|
||||
return str(chunk.metadata["chunk_id"])
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def extract_or_generate_chunk_id(chunk: Chunk) -> str:
|
||||
"""Extract existing chunk ID or generate a new one if not present. This is for compatibility with older Chunks
|
||||
that stored the document_id in the metadata."""
|
||||
stored_chunk_id = extract_chunk_id_from_metadata(chunk)
|
||||
if stored_chunk_id:
|
||||
return stored_chunk_id
|
||||
elif "document_id" in chunk.metadata:
|
||||
return generate_chunk_id(chunk.metadata["document_id"], str(chunk.content))
|
||||
else:
|
||||
logging.warning("Chunk has no ID or document_id in metadata. Generating random ID.")
|
||||
return str(uuid.uuid4())
|
Loading…
Add table
Add a link
Reference in a new issue