mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-06 20:44:58 +00:00
feat: Adding support for get, update, delete for Vector Stores API
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
0527c0fb15
commit
4c0eb47fc7
8 changed files with 1818 additions and 1 deletions
|
@ -426,6 +426,74 @@ class VectorStoreFileDeleteResponse(BaseModel):
|
|||
deleted: bool = True
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class VectorStoreChunkObject(BaseModel):
|
||||
"""OpenAI Vector Store Chunk object.
|
||||
|
||||
:param id: Unique identifier for the chunk
|
||||
:param object: Object type identifier, always "vector_store.file.chunk"
|
||||
:param created_at: Timestamp when the chunk was created
|
||||
:param vector_store_id: ID of the vector store containing this chunk
|
||||
:param file_id: ID of the file containing this chunk
|
||||
:param content: The content of the chunk, using the same format as Chunk class
|
||||
:param metadata: Metadata associated with the chunk
|
||||
:param embedding: The embedding vector for the chunk
|
||||
"""
|
||||
|
||||
id: str
|
||||
object: str = "vector_store.file.chunk"
|
||||
created_at: int
|
||||
vector_store_id: str
|
||||
file_id: str
|
||||
content: InterleavedContent
|
||||
metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
embedding: list[float] | None = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class VectorStoreListChunksResponse(BaseModel):
|
||||
"""Response from listing chunks in a vector store file.
|
||||
|
||||
:param object: Object type identifier, always "list"
|
||||
:param data: List of vector store chunk objects
|
||||
:param first_id: (Optional) ID of the first chunk in the list for pagination
|
||||
:param last_id: (Optional) ID of the last chunk in the list for pagination
|
||||
:param has_more: Whether there are more chunks available beyond this page
|
||||
"""
|
||||
|
||||
object: str = "list"
|
||||
data: list[VectorStoreChunkObject]
|
||||
first_id: str | None = None
|
||||
last_id: str | None = None
|
||||
has_more: bool = False
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class VectorStoreChunkUpdateRequest(BaseModel):
|
||||
"""Request to update a vector store chunk.
|
||||
|
||||
:param content: Updated content for the chunk
|
||||
:param metadata: Updated metadata for the chunk
|
||||
"""
|
||||
|
||||
content: InterleavedContent | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class VectorStoreChunkDeleteResponse(BaseModel):
|
||||
"""Response from deleting a vector store chunk.
|
||||
|
||||
:param id: Unique identifier of the deleted chunk
|
||||
:param object: Object type identifier for the deletion response
|
||||
:param deleted: Whether the deletion operation was successful
|
||||
"""
|
||||
|
||||
id: str
|
||||
object: str = "vector_store.file.chunk.deleted"
|
||||
deleted: bool = True
|
||||
|
||||
|
||||
class VectorDBStore(Protocol):
|
||||
def get_vector_db(self, vector_db_id: str) -> VectorDB | None: ...
|
||||
|
||||
|
@ -638,6 +706,28 @@ class VectorIO(Protocol):
|
|||
"""
|
||||
...
|
||||
|
||||
@webmethod(route="/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/chunks", method="GET")
|
||||
async def openai_list_vector_store_chunks(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
limit: int | None = 20,
|
||||
order: str | None = "desc",
|
||||
after: str | None = None,
|
||||
before: str | None = None,
|
||||
) -> VectorStoreListChunksResponse:
|
||||
"""List chunks in a vector store file.
|
||||
|
||||
:param vector_store_id: The ID of the vector store.
|
||||
:param file_id: The ID of the file.
|
||||
:param limit: Max number of chunks to return.
|
||||
:param order: Sort order.
|
||||
:param after: Pagination cursor.
|
||||
:param before: Pagination cursor.
|
||||
:returns: A VectorStoreListChunksResponse with the list of chunks.
|
||||
"""
|
||||
...
|
||||
|
||||
@webmethod(route="/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/content", method="GET")
|
||||
async def openai_retrieve_vector_store_file_contents(
|
||||
self,
|
||||
|
@ -681,3 +771,55 @@ class VectorIO(Protocol):
|
|||
:returns: A VectorStoreFileDeleteResponse indicating the deletion status.
|
||||
"""
|
||||
...
|
||||
|
||||
@webmethod(route="/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/chunks/{chunk_id}", method="GET")
|
||||
async def openai_retrieve_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkObject:
|
||||
"""Retrieve a specific chunk from a vector store file.
|
||||
|
||||
:param vector_store_id: The ID of the vector store containing the chunk.
|
||||
:param file_id: The ID of the file containing the chunk.
|
||||
:param chunk_id: The ID of the chunk to retrieve.
|
||||
:returns: A VectorStoreChunkObject representing the chunk.
|
||||
"""
|
||||
...
|
||||
|
||||
@webmethod(route="/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/chunks/{chunk_id}", method="POST")
|
||||
async def openai_update_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
content: InterleavedContent | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> VectorStoreChunkObject:
|
||||
"""Update a specific chunk in a vector store file.
|
||||
|
||||
:param vector_store_id: The ID of the vector store containing the chunk.
|
||||
:param file_id: The ID of the file containing the chunk.
|
||||
:param chunk_id: The ID of the chunk to update.
|
||||
:param content: Updated content for the chunk.
|
||||
:param metadata: Updated metadata for the chunk.
|
||||
:returns: A VectorStoreChunkObject representing the updated chunk.
|
||||
"""
|
||||
...
|
||||
|
||||
@webmethod(route="/openai/v1/vector_stores/{vector_store_id}/files/{file_id}/chunks/{chunk_id}", method="DELETE")
|
||||
async def openai_delete_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkDeleteResponse:
|
||||
"""Delete a specific chunk from a vector store file.
|
||||
|
||||
:param vector_store_id: The ID of the vector store containing the chunk.
|
||||
:param file_id: The ID of the file containing the chunk.
|
||||
:param chunk_id: The ID of the chunk to delete.
|
||||
:returns: A VectorStoreChunkDeleteResponse indicating the deletion status.
|
||||
"""
|
||||
...
|
||||
|
|
|
@ -17,7 +17,9 @@ from llama_stack.apis.vector_io import (
|
|||
QueryChunksResponse,
|
||||
SearchRankingOptions,
|
||||
VectorIO,
|
||||
VectorStoreChunkDeleteResponse,
|
||||
VectorStoreChunkingStrategy,
|
||||
VectorStoreChunkObject,
|
||||
VectorStoreDeleteResponse,
|
||||
VectorStoreFileContentsResponse,
|
||||
VectorStoreFileDeleteResponse,
|
||||
|
@ -341,6 +343,68 @@ class VectorIORouter(VectorIO):
|
|||
file_id=file_id,
|
||||
)
|
||||
|
||||
async def openai_retrieve_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkObject:
|
||||
logger.debug(f"VectorIORouter.openai_retrieve_vector_store_chunk: {vector_store_id}, {file_id}, {chunk_id}")
|
||||
return await self.routing_table.openai_retrieve_vector_store_chunk(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
chunk_id=chunk_id,
|
||||
)
|
||||
|
||||
async def openai_update_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
content: InterleavedContent | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> VectorStoreChunkObject:
|
||||
logger.debug(f"VectorIORouter.openai_update_vector_store_chunk: {vector_store_id}, {file_id}, {chunk_id}")
|
||||
return await self.routing_table.openai_update_vector_store_chunk(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
chunk_id=chunk_id,
|
||||
content=content,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
async def openai_delete_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkDeleteResponse:
|
||||
logger.debug(f"VectorIORouter.openai_delete_vector_store_chunk: {vector_store_id}, {file_id}, {chunk_id}")
|
||||
return await self.routing_table.openai_delete_vector_store_chunk(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
chunk_id=chunk_id,
|
||||
)
|
||||
|
||||
async def openai_list_vector_store_chunks(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
limit: int | None = 20,
|
||||
order: str | None = "desc",
|
||||
after: str | None = None,
|
||||
before: str | None = None,
|
||||
):
|
||||
logger.debug(f"VectorIORouter.openai_list_vector_store_chunks: {vector_store_id}, {file_id}")
|
||||
return await self.routing_table.openai_list_vector_store_chunks(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
limit=limit,
|
||||
order=order,
|
||||
after=after,
|
||||
before=before,
|
||||
)
|
||||
|
||||
async def health(self) -> dict[str, HealthResponse]:
|
||||
health_statuses = {}
|
||||
timeout = 1 # increasing the timeout to 1 second for health checks
|
||||
|
|
|
@ -13,13 +13,17 @@ from llama_stack.apis.models import ModelType
|
|||
from llama_stack.apis.resource import ResourceType
|
||||
from llama_stack.apis.vector_dbs import ListVectorDBsResponse, VectorDB, VectorDBs
|
||||
from llama_stack.apis.vector_io.vector_io import (
|
||||
InterleavedContent,
|
||||
SearchRankingOptions,
|
||||
VectorStoreChunkDeleteResponse,
|
||||
VectorStoreChunkingStrategy,
|
||||
VectorStoreChunkObject,
|
||||
VectorStoreDeleteResponse,
|
||||
VectorStoreFileContentsResponse,
|
||||
VectorStoreFileDeleteResponse,
|
||||
VectorStoreFileObject,
|
||||
VectorStoreFileStatus,
|
||||
VectorStoreListChunksResponse,
|
||||
VectorStoreObject,
|
||||
VectorStoreSearchResponsePage,
|
||||
)
|
||||
|
@ -227,3 +231,69 @@ class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
|
|||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
)
|
||||
|
||||
async def openai_retrieve_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkObject:
|
||||
await self.assert_action_allowed("read", "vector_db", vector_store_id)
|
||||
provider = await self.get_provider_impl(vector_store_id)
|
||||
return await provider.openai_retrieve_vector_store_chunk(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
chunk_id=chunk_id,
|
||||
)
|
||||
|
||||
async def openai_update_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
content: InterleavedContent | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> VectorStoreChunkObject:
|
||||
await self.assert_action_allowed("update", "vector_db", vector_store_id)
|
||||
provider = await self.get_provider_impl(vector_store_id)
|
||||
return await provider.openai_update_vector_store_chunk(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
chunk_id=chunk_id,
|
||||
content=content,
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
async def openai_delete_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkDeleteResponse:
|
||||
await self.assert_action_allowed("delete", "vector_db", vector_store_id)
|
||||
provider = await self.get_provider_impl(vector_store_id)
|
||||
return await provider.openai_delete_vector_store_chunk(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
chunk_id=chunk_id,
|
||||
)
|
||||
|
||||
async def openai_list_vector_store_chunks(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
limit: int | None = 20,
|
||||
order: str | None = "desc",
|
||||
after: str | None = None,
|
||||
before: str | None = None,
|
||||
) -> VectorStoreListChunksResponse:
|
||||
await self.assert_action_allowed("read", "vector_db", vector_store_id)
|
||||
provider = await self.get_provider_impl(vector_store_id)
|
||||
return await provider.openai_list_vector_store_chunks(
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
limit=limit,
|
||||
order=order,
|
||||
after=after,
|
||||
before=before,
|
||||
)
|
||||
|
|
|
@ -15,14 +15,17 @@ from typing import Any
|
|||
|
||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||
from llama_stack.apis.files import Files, OpenAIFileObject
|
||||
from llama_stack.apis.inference import InterleavedContent
|
||||
from llama_stack.apis.vector_dbs import VectorDB
|
||||
from llama_stack.apis.vector_io import (
|
||||
Chunk,
|
||||
QueryChunksResponse,
|
||||
SearchRankingOptions,
|
||||
VectorStoreChunkDeleteResponse,
|
||||
VectorStoreChunkingStrategy,
|
||||
VectorStoreChunkingStrategyAuto,
|
||||
VectorStoreChunkingStrategyStatic,
|
||||
VectorStoreChunkObject,
|
||||
VectorStoreContent,
|
||||
VectorStoreDeleteResponse,
|
||||
VectorStoreFileContentsResponse,
|
||||
|
@ -31,6 +34,7 @@ from llama_stack.apis.vector_io import (
|
|||
VectorStoreFileLastError,
|
||||
VectorStoreFileObject,
|
||||
VectorStoreFileStatus,
|
||||
VectorStoreListChunksResponse,
|
||||
VectorStoreListFilesResponse,
|
||||
VectorStoreListResponse,
|
||||
VectorStoreObject,
|
||||
|
@ -109,7 +113,14 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
assert self.kvstore
|
||||
meta_key = f"{OPENAI_VECTOR_STORES_FILES_PREFIX}{store_id}:{file_id}"
|
||||
await self.kvstore.set(key=meta_key, value=json.dumps(file_info))
|
||||
|
||||
# delete old file data to properly update content
|
||||
contents_prefix = f"{OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX}{store_id}:{file_id}:"
|
||||
end_key = f"{contents_prefix}\xff"
|
||||
old_keys = await self.kvstore.keys_in_range(contents_prefix, end_key)
|
||||
for old_key in old_keys:
|
||||
await self.kvstore.delete(old_key)
|
||||
|
||||
for idx, chunk in enumerate(file_contents):
|
||||
await self.kvstore.set(key=f"{contents_prefix}{idx}", value=json.dumps(chunk))
|
||||
|
||||
|
@ -791,3 +802,233 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
id=file_id,
|
||||
deleted=True,
|
||||
)
|
||||
|
||||
async def openai_retrieve_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkObject:
|
||||
"""Retrieve a specific chunk from a vector store file."""
|
||||
if vector_store_id not in self.openai_vector_stores:
|
||||
raise VectorStoreNotFoundError(vector_store_id)
|
||||
|
||||
store_info = self.openai_vector_stores[vector_store_id]
|
||||
if file_id not in store_info["file_ids"]:
|
||||
raise ValueError(f"File {file_id} not found in vector store {vector_store_id}")
|
||||
|
||||
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
||||
chunks = [Chunk.model_validate(c) for c in dict_chunks]
|
||||
|
||||
target_chunk = None
|
||||
for chunk in chunks:
|
||||
if chunk.chunk_id == chunk_id:
|
||||
target_chunk = chunk
|
||||
break
|
||||
|
||||
if target_chunk is None:
|
||||
raise ValueError(f"Chunk {chunk_id} not found in file {file_id}")
|
||||
|
||||
file_info = await self._load_openai_vector_store_file(vector_store_id, file_id)
|
||||
|
||||
return VectorStoreChunkObject(
|
||||
id=chunk_id,
|
||||
created_at=file_info.get("created_at", int(time.time())),
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
content=target_chunk.content,
|
||||
metadata=target_chunk.metadata,
|
||||
embedding=target_chunk.embedding,
|
||||
)
|
||||
|
||||
async def openai_update_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
content: InterleavedContent | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> VectorStoreChunkObject:
|
||||
"""Update a specific chunk in a vector store file."""
|
||||
if vector_store_id not in self.openai_vector_stores:
|
||||
raise VectorStoreNotFoundError(vector_store_id)
|
||||
|
||||
store_info = self.openai_vector_stores[vector_store_id]
|
||||
if file_id not in store_info["file_ids"]:
|
||||
raise ValueError(f"File {file_id} not found in vector store {vector_store_id}")
|
||||
|
||||
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
||||
chunks = [Chunk.model_validate(c) for c in dict_chunks]
|
||||
|
||||
target_chunk_index = None
|
||||
for i, chunk in enumerate(chunks):
|
||||
if chunk.chunk_id == chunk_id:
|
||||
target_chunk_index = i
|
||||
break
|
||||
|
||||
if target_chunk_index is None:
|
||||
raise ValueError(f"Chunk {chunk_id} not found in file {file_id}")
|
||||
|
||||
# updating content
|
||||
target_chunk = chunks[target_chunk_index]
|
||||
if content is not None:
|
||||
target_chunk.content = content
|
||||
# delete old chunk and update
|
||||
await self.delete_chunks(vector_store_id, [chunk_id])
|
||||
await self.insert_chunks(vector_store_id, [target_chunk])
|
||||
|
||||
if metadata is not None:
|
||||
target_chunk.metadata.update(metadata)
|
||||
|
||||
chunks[target_chunk_index] = target_chunk
|
||||
dict_chunks = [c.model_dump() for c in chunks]
|
||||
file_info = await self._load_openai_vector_store_file(vector_store_id, file_id)
|
||||
await self._save_openai_vector_store_file(vector_store_id, file_id, file_info, dict_chunks)
|
||||
|
||||
return VectorStoreChunkObject(
|
||||
id=chunk_id,
|
||||
created_at=file_info.get("created_at", int(time.time())),
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
content=target_chunk.content,
|
||||
metadata=target_chunk.metadata,
|
||||
embedding=target_chunk.embedding,
|
||||
)
|
||||
|
||||
async def openai_delete_vector_store_chunk(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
chunk_id: str,
|
||||
) -> VectorStoreChunkDeleteResponse:
|
||||
"""Delete a specific chunk from a vector store file."""
|
||||
if vector_store_id not in self.openai_vector_stores:
|
||||
raise VectorStoreNotFoundError(vector_store_id)
|
||||
|
||||
store_info = self.openai_vector_stores[vector_store_id]
|
||||
if file_id not in store_info["file_ids"]:
|
||||
raise ValueError(f"File {file_id} not found in vector store {vector_store_id}")
|
||||
|
||||
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
||||
chunks = [Chunk.model_validate(c) for c in dict_chunks]
|
||||
|
||||
target_chunk_index = None
|
||||
for i, chunk in enumerate(chunks):
|
||||
if chunk.chunk_id == chunk_id:
|
||||
target_chunk_index = i
|
||||
break
|
||||
|
||||
if target_chunk_index is None:
|
||||
raise ValueError(f"Chunk {chunk_id} not found in file {file_id}")
|
||||
|
||||
await self.delete_chunks(vector_store_id, [chunk_id])
|
||||
|
||||
dict_chunks.pop(target_chunk_index)
|
||||
file_info = await self._load_openai_vector_store_file(vector_store_id, file_id)
|
||||
await self._save_openai_vector_store_file(vector_store_id, file_id, file_info, dict_chunks)
|
||||
|
||||
return VectorStoreChunkDeleteResponse(
|
||||
id=chunk_id,
|
||||
deleted=True,
|
||||
)
|
||||
|
||||
async def openai_list_vector_store_chunks(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_id: str,
|
||||
limit: int | None = 20,
|
||||
order: str | None = "desc",
|
||||
after: str | None = None,
|
||||
before: str | None = None,
|
||||
) -> VectorStoreListChunksResponse:
|
||||
"""List chunks in a vector store file."""
|
||||
if vector_store_id not in self.openai_vector_stores:
|
||||
raise VectorStoreNotFoundError(vector_store_id)
|
||||
|
||||
store_info = self.openai_vector_stores[vector_store_id]
|
||||
if file_id not in store_info["file_ids"]:
|
||||
raise ValueError(f"File {file_id} not found in vector store {vector_store_id}")
|
||||
|
||||
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
||||
file_info = await self._load_openai_vector_store_file(vector_store_id, file_id)
|
||||
|
||||
chunk_ids = []
|
||||
for dict_chunk in dict_chunks:
|
||||
chunk = Chunk.model_validate(dict_chunk)
|
||||
if chunk.chunk_id:
|
||||
chunk_ids.append(chunk.chunk_id)
|
||||
|
||||
# TODO: Add abstract method query_all_chunks() to properly filter by file_id and vector_db_id, this is a temporary hack
|
||||
chunks = []
|
||||
if chunk_ids:
|
||||
try:
|
||||
file_filter = {"type": "eq", "key": "file_id", "value": file_id}
|
||||
|
||||
query_result = await self.query_chunks(
|
||||
vector_db_id=vector_store_id,
|
||||
query="*", # wildcard query to get all chunks
|
||||
params={
|
||||
"max_chunks": len(chunk_ids) * 2,
|
||||
"score_threshold": 0.0,
|
||||
"filters": file_filter,
|
||||
},
|
||||
)
|
||||
|
||||
chunk_id_set = set(chunk_ids)
|
||||
chunks = [chunk for chunk in query_result.chunks if chunk.chunk_id in chunk_id_set]
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to query vector database for chunks: {e}")
|
||||
# Fallback to KV store chunks if vector DB query fails
|
||||
chunks = [Chunk.model_validate(c) for c in dict_chunks]
|
||||
|
||||
chunk_objects = []
|
||||
for chunk in chunks:
|
||||
chunk_obj = VectorStoreChunkObject(
|
||||
id=chunk.chunk_id,
|
||||
created_at=file_info.get("created_at", int(time.time())),
|
||||
vector_store_id=vector_store_id,
|
||||
file_id=file_id,
|
||||
content=chunk.content,
|
||||
metadata=chunk.metadata,
|
||||
embedding=chunk.embedding,
|
||||
)
|
||||
chunk_objects.append(chunk_obj)
|
||||
|
||||
if order == "desc":
|
||||
chunk_objects.sort(key=lambda x: x.created_at, reverse=True)
|
||||
else:
|
||||
chunk_objects.sort(key=lambda x: x.created_at)
|
||||
|
||||
start_idx = 0
|
||||
end_idx = len(chunk_objects)
|
||||
|
||||
if after:
|
||||
# find index after 'after' chunk
|
||||
for i, chunk_obj in enumerate(chunk_objects):
|
||||
if chunk_obj.id == after:
|
||||
start_idx = i + 1
|
||||
break
|
||||
|
||||
if before:
|
||||
# find index before 'before' chunk
|
||||
for i, chunk_obj in enumerate(chunk_objects):
|
||||
if chunk_obj.id == before:
|
||||
end_idx = i
|
||||
break
|
||||
|
||||
if limit:
|
||||
if end_idx - start_idx > limit:
|
||||
end_idx = start_idx + limit
|
||||
|
||||
paginated_chunks = chunk_objects[start_idx:end_idx]
|
||||
|
||||
first_id = paginated_chunks[0].id if paginated_chunks else None
|
||||
last_id = paginated_chunks[-1].id if paginated_chunks else None
|
||||
has_more = end_idx < len(chunk_objects)
|
||||
|
||||
return VectorStoreListChunksResponse(
|
||||
data=paginated_chunks,
|
||||
first_id=first_id,
|
||||
last_id=last_id,
|
||||
has_more=has_more,
|
||||
)
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
"use client";
|
||||
|
||||
import { useRouter } from "next/navigation";
|
||||
import type { VectorStore } from "llama-stack-client/resources/vector-stores/vector-stores";
|
||||
import type { VectorStoreFile } from "llama-stack-client/resources/vector-stores/files";
|
||||
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
DetailLoadingView,
|
||||
DetailErrorView,
|
||||
|
@ -42,6 +44,11 @@ export function VectorStoreDetailView({
|
|||
id,
|
||||
}: VectorStoreDetailViewProps) {
|
||||
const title = "Vector Store Details";
|
||||
const router = useRouter();
|
||||
|
||||
const handleFileClick = (fileId: string) => {
|
||||
router.push(`/logs/vector-stores/${id}/files/${fileId}`);
|
||||
};
|
||||
|
||||
if (errorStore) {
|
||||
return <DetailErrorView title={title} id={id} error={errorStore} />;
|
||||
|
@ -80,7 +87,15 @@ export function VectorStoreDetailView({
|
|||
<TableBody>
|
||||
{files.map((file) => (
|
||||
<TableRow key={file.id}>
|
||||
<TableCell>{file.id}</TableCell>
|
||||
<TableCell>
|
||||
<Button
|
||||
variant="link"
|
||||
className="p-0 h-auto font-mono text-blue-600 hover:text-blue-800 dark:text-blue-400 dark:hover:text-blue-300"
|
||||
onClick={() => handleFileClick(file.id)}
|
||||
>
|
||||
{file.id}
|
||||
</Button>
|
||||
</TableCell>
|
||||
<TableCell>{file.status}</TableCell>
|
||||
<TableCell>
|
||||
{new Date(file.created_at * 1000).toLocaleString()}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue