From f298ae9d3d88dcfc6a6abe65764bf8e76ee76871 Mon Sep 17 00:00:00 2001 From: Swapna Lekkala Date: Tue, 30 Sep 2025 11:59:51 -0700 Subject: [PATCH] fix tests --- llama_stack/core/routers/vector_io.py | 69 +++++++++++++++++-- .../utils/memory/openai_vector_store_mixin.py | 20 ++++-- 2 files changed, 81 insertions(+), 8 deletions(-) diff --git a/llama_stack/core/routers/vector_io.py b/llama_stack/core/routers/vector_io.py index 786b0e391..0e3f9d8d9 100644 --- a/llama_stack/core/routers/vector_io.py +++ b/llama_stack/core/routers/vector_io.py @@ -8,9 +8,7 @@ import asyncio import uuid from typing import Any -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) +from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.models import ModelType from llama_stack.apis.vector_io import ( Chunk, @@ -19,9 +17,11 @@ from llama_stack.apis.vector_io import ( VectorIO, VectorStoreChunkingStrategy, VectorStoreDeleteResponse, + VectorStoreFileBatchObject, VectorStoreFileContentsResponse, VectorStoreFileDeleteResponse, VectorStoreFileObject, + VectorStoreFilesListInBatchResponse, VectorStoreFileStatus, VectorStoreListResponse, VectorStoreObject, @@ -193,7 +193,10 @@ class VectorIORouter(VectorIO): all_stores = all_stores[after_index + 1 :] if before: - before_index = next((i for i, store in enumerate(all_stores) if store.id == before), len(all_stores)) + before_index = next( + (i for i, store in enumerate(all_stores) if store.id == before), + len(all_stores), + ) all_stores = all_stores[:before_index] # Apply limit @@ -363,3 +366,61 @@ class VectorIORouter(VectorIO): status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}" ) return health_statuses + + async def openai_create_vector_store_file_batch( + self, + vector_store_id: str, + file_ids: list[str], + attributes: dict[str, Any] | None = None, + chunking_strategy: VectorStoreChunkingStrategy | None = None, + ) -> VectorStoreFileBatchObject: + logger.debug(f"VectorIORouter.openai_create_vector_store_file_batch: {vector_store_id}, {len(file_ids)} files") + return await self.routing_table.openai_create_vector_store_file_batch( + vector_store_id=vector_store_id, + file_ids=file_ids, + attributes=attributes, + chunking_strategy=chunking_strategy, + ) + + async def openai_retrieve_vector_store_file_batch( + self, + batch_id: str, + vector_store_id: str, + ) -> VectorStoreFileBatchObject: + logger.debug(f"VectorIORouter.openai_retrieve_vector_store_file_batch: {batch_id}, {vector_store_id}") + return await self.routing_table.openai_retrieve_vector_store_file_batch( + batch_id=batch_id, + vector_store_id=vector_store_id, + ) + + async def openai_list_files_in_vector_store_file_batch( + self, + batch_id: str, + vector_store_id: str, + after: str | None = None, + before: str | None = None, + filter: str | None = None, + limit: int | None = 20, + order: str | None = "desc", + ) -> VectorStoreFilesListInBatchResponse: + logger.debug(f"VectorIORouter.openai_list_files_in_vector_store_file_batch: {batch_id}, {vector_store_id}") + return await self.routing_table.openai_list_files_in_vector_store_file_batch( + batch_id=batch_id, + vector_store_id=vector_store_id, + after=after, + before=before, + filter=filter, + limit=limit, + order=order, + ) + + async def openai_cancel_vector_store_file_batch( + self, + batch_id: str, + vector_store_id: str, + ) -> VectorStoreFileBatchObject: + logger.debug(f"VectorIORouter.openai_cancel_vector_store_file_batch: {batch_id}, {vector_store_id}") + return await self.routing_table.openai_cancel_vector_store_file_batch( + batch_id=batch_id, + vector_store_id=vector_store_id, + ) diff --git a/llama_stack/providers/utils/memory/openai_vector_store_mixin.py b/llama_stack/providers/utils/memory/openai_vector_store_mixin.py index ca3f47986..36432767f 100644 --- a/llama_stack/providers/utils/memory/openai_vector_store_mixin.py +++ b/llama_stack/providers/utils/memory/openai_vector_store_mixin.py @@ -109,7 +109,11 @@ class OpenAIVectorStoreMixin(ABC): self.openai_vector_stores.pop(store_id, None) async def _save_openai_vector_store_file( - self, store_id: str, file_id: str, file_info: dict[str, Any], file_contents: list[dict[str, Any]] + self, + store_id: str, + file_id: str, + file_info: dict[str, Any], + file_contents: list[dict[str, Any]], ) -> None: """Save vector store file metadata to persistent storage.""" assert self.kvstore @@ -303,7 +307,10 @@ class OpenAIVectorStoreMixin(ABC): all_stores = all_stores[after_index + 1 :] if before: - before_index = next((i for i, store in enumerate(all_stores) if store["id"] == before), len(all_stores)) + before_index = next( + (i for i, store in enumerate(all_stores) if store["id"] == before), + len(all_stores), + ) all_stores = all_stores[:before_index] # Apply limit @@ -399,7 +406,9 @@ class OpenAIVectorStoreMixin(ABC): max_num_results: int | None = 10, ranking_options: SearchRankingOptions | None = None, rewrite_query: bool | None = False, - search_mode: str | None = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations + search_mode: ( + str | None + ) = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations ) -> VectorStoreSearchResponsePage: """Search for chunks in a vector store.""" max_num_results = max_num_results or 10 @@ -687,7 +696,10 @@ class OpenAIVectorStoreMixin(ABC): file_objects = file_objects[after_index + 1 :] if before: - before_index = next((i for i, file in enumerate(file_objects) if file.id == before), len(file_objects)) + before_index = next( + (i for i, file in enumerate(file_objects) if file.id == before), + len(file_objects), + ) file_objects = file_objects[:before_index] # Apply limit