fix tests

This commit is contained in:
Swapna Lekkala 2025-09-30 11:59:51 -07:00
parent de84c7bda7
commit f298ae9d3d
2 changed files with 81 additions and 8 deletions

View file

@ -8,9 +8,7 @@ import asyncio
import uuid import uuid
from typing import Any from typing import Any
from llama_stack.apis.common.content_types import ( from llama_stack.apis.common.content_types import InterleavedContent
InterleavedContent,
)
from llama_stack.apis.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.apis.vector_io import ( from llama_stack.apis.vector_io import (
Chunk, Chunk,
@ -19,9 +17,11 @@ from llama_stack.apis.vector_io import (
VectorIO, VectorIO,
VectorStoreChunkingStrategy, VectorStoreChunkingStrategy,
VectorStoreDeleteResponse, VectorStoreDeleteResponse,
VectorStoreFileBatchObject,
VectorStoreFileContentsResponse, VectorStoreFileContentsResponse,
VectorStoreFileDeleteResponse, VectorStoreFileDeleteResponse,
VectorStoreFileObject, VectorStoreFileObject,
VectorStoreFilesListInBatchResponse,
VectorStoreFileStatus, VectorStoreFileStatus,
VectorStoreListResponse, VectorStoreListResponse,
VectorStoreObject, VectorStoreObject,
@ -193,7 +193,10 @@ class VectorIORouter(VectorIO):
all_stores = all_stores[after_index + 1 :] all_stores = all_stores[after_index + 1 :]
if before: if before:
before_index = next((i for i, store in enumerate(all_stores) if store.id == before), len(all_stores)) before_index = next(
(i for i, store in enumerate(all_stores) if store.id == before),
len(all_stores),
)
all_stores = all_stores[:before_index] all_stores = all_stores[:before_index]
# Apply limit # Apply limit
@ -363,3 +366,61 @@ class VectorIORouter(VectorIO):
status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}" status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}"
) )
return health_statuses return health_statuses
async def openai_create_vector_store_file_batch(
self,
vector_store_id: str,
file_ids: list[str],
attributes: dict[str, Any] | None = None,
chunking_strategy: VectorStoreChunkingStrategy | None = None,
) -> VectorStoreFileBatchObject:
logger.debug(f"VectorIORouter.openai_create_vector_store_file_batch: {vector_store_id}, {len(file_ids)} files")
return await self.routing_table.openai_create_vector_store_file_batch(
vector_store_id=vector_store_id,
file_ids=file_ids,
attributes=attributes,
chunking_strategy=chunking_strategy,
)
async def openai_retrieve_vector_store_file_batch(
self,
batch_id: str,
vector_store_id: str,
) -> VectorStoreFileBatchObject:
logger.debug(f"VectorIORouter.openai_retrieve_vector_store_file_batch: {batch_id}, {vector_store_id}")
return await self.routing_table.openai_retrieve_vector_store_file_batch(
batch_id=batch_id,
vector_store_id=vector_store_id,
)
async def openai_list_files_in_vector_store_file_batch(
self,
batch_id: str,
vector_store_id: str,
after: str | None = None,
before: str | None = None,
filter: str | None = None,
limit: int | None = 20,
order: str | None = "desc",
) -> VectorStoreFilesListInBatchResponse:
logger.debug(f"VectorIORouter.openai_list_files_in_vector_store_file_batch: {batch_id}, {vector_store_id}")
return await self.routing_table.openai_list_files_in_vector_store_file_batch(
batch_id=batch_id,
vector_store_id=vector_store_id,
after=after,
before=before,
filter=filter,
limit=limit,
order=order,
)
async def openai_cancel_vector_store_file_batch(
self,
batch_id: str,
vector_store_id: str,
) -> VectorStoreFileBatchObject:
logger.debug(f"VectorIORouter.openai_cancel_vector_store_file_batch: {batch_id}, {vector_store_id}")
return await self.routing_table.openai_cancel_vector_store_file_batch(
batch_id=batch_id,
vector_store_id=vector_store_id,
)

View file

@ -109,7 +109,11 @@ class OpenAIVectorStoreMixin(ABC):
self.openai_vector_stores.pop(store_id, None) self.openai_vector_stores.pop(store_id, None)
async def _save_openai_vector_store_file( async def _save_openai_vector_store_file(
self, store_id: str, file_id: str, file_info: dict[str, Any], file_contents: list[dict[str, Any]] self,
store_id: str,
file_id: str,
file_info: dict[str, Any],
file_contents: list[dict[str, Any]],
) -> None: ) -> None:
"""Save vector store file metadata to persistent storage.""" """Save vector store file metadata to persistent storage."""
assert self.kvstore assert self.kvstore
@ -303,7 +307,10 @@ class OpenAIVectorStoreMixin(ABC):
all_stores = all_stores[after_index + 1 :] all_stores = all_stores[after_index + 1 :]
if before: if before:
before_index = next((i for i, store in enumerate(all_stores) if store["id"] == before), len(all_stores)) before_index = next(
(i for i, store in enumerate(all_stores) if store["id"] == before),
len(all_stores),
)
all_stores = all_stores[:before_index] all_stores = all_stores[:before_index]
# Apply limit # Apply limit
@ -399,7 +406,9 @@ class OpenAIVectorStoreMixin(ABC):
max_num_results: int | None = 10, max_num_results: int | None = 10,
ranking_options: SearchRankingOptions | None = None, ranking_options: SearchRankingOptions | None = None,
rewrite_query: bool | None = False, rewrite_query: bool | None = False,
search_mode: str | None = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations search_mode: (
str | None
) = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations
) -> VectorStoreSearchResponsePage: ) -> VectorStoreSearchResponsePage:
"""Search for chunks in a vector store.""" """Search for chunks in a vector store."""
max_num_results = max_num_results or 10 max_num_results = max_num_results or 10
@ -687,7 +696,10 @@ class OpenAIVectorStoreMixin(ABC):
file_objects = file_objects[after_index + 1 :] file_objects = file_objects[after_index + 1 :]
if before: if before:
before_index = next((i for i, file in enumerate(file_objects) if file.id == before), len(file_objects)) before_index = next(
(i for i, file in enumerate(file_objects) if file.id == before),
len(file_objects),
)
file_objects = file_objects[:before_index] file_objects = file_objects[:before_index]
# Apply limit # Apply limit