mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
fix tests
This commit is contained in:
parent
de84c7bda7
commit
f298ae9d3d
2 changed files with 81 additions and 8 deletions
|
@ -8,9 +8,7 @@ import asyncio
|
|||
import uuid
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.apis.common.content_types import (
|
||||
InterleavedContent,
|
||||
)
|
||||
from llama_stack.apis.common.content_types import InterleavedContent
|
||||
from llama_stack.apis.models import ModelType
|
||||
from llama_stack.apis.vector_io import (
|
||||
Chunk,
|
||||
|
@ -19,9 +17,11 @@ from llama_stack.apis.vector_io import (
|
|||
VectorIO,
|
||||
VectorStoreChunkingStrategy,
|
||||
VectorStoreDeleteResponse,
|
||||
VectorStoreFileBatchObject,
|
||||
VectorStoreFileContentsResponse,
|
||||
VectorStoreFileDeleteResponse,
|
||||
VectorStoreFileObject,
|
||||
VectorStoreFilesListInBatchResponse,
|
||||
VectorStoreFileStatus,
|
||||
VectorStoreListResponse,
|
||||
VectorStoreObject,
|
||||
|
@ -193,7 +193,10 @@ class VectorIORouter(VectorIO):
|
|||
all_stores = all_stores[after_index + 1 :]
|
||||
|
||||
if before:
|
||||
before_index = next((i for i, store in enumerate(all_stores) if store.id == before), len(all_stores))
|
||||
before_index = next(
|
||||
(i for i, store in enumerate(all_stores) if store.id == before),
|
||||
len(all_stores),
|
||||
)
|
||||
all_stores = all_stores[:before_index]
|
||||
|
||||
# Apply limit
|
||||
|
@ -363,3 +366,61 @@ class VectorIORouter(VectorIO):
|
|||
status=HealthStatus.ERROR, message=f"Health check failed: {str(e)}"
|
||||
)
|
||||
return health_statuses
|
||||
|
||||
async def openai_create_vector_store_file_batch(
|
||||
self,
|
||||
vector_store_id: str,
|
||||
file_ids: list[str],
|
||||
attributes: dict[str, Any] | None = None,
|
||||
chunking_strategy: VectorStoreChunkingStrategy | None = None,
|
||||
) -> VectorStoreFileBatchObject:
|
||||
logger.debug(f"VectorIORouter.openai_create_vector_store_file_batch: {vector_store_id}, {len(file_ids)} files")
|
||||
return await self.routing_table.openai_create_vector_store_file_batch(
|
||||
vector_store_id=vector_store_id,
|
||||
file_ids=file_ids,
|
||||
attributes=attributes,
|
||||
chunking_strategy=chunking_strategy,
|
||||
)
|
||||
|
||||
async def openai_retrieve_vector_store_file_batch(
|
||||
self,
|
||||
batch_id: str,
|
||||
vector_store_id: str,
|
||||
) -> VectorStoreFileBatchObject:
|
||||
logger.debug(f"VectorIORouter.openai_retrieve_vector_store_file_batch: {batch_id}, {vector_store_id}")
|
||||
return await self.routing_table.openai_retrieve_vector_store_file_batch(
|
||||
batch_id=batch_id,
|
||||
vector_store_id=vector_store_id,
|
||||
)
|
||||
|
||||
async def openai_list_files_in_vector_store_file_batch(
|
||||
self,
|
||||
batch_id: str,
|
||||
vector_store_id: str,
|
||||
after: str | None = None,
|
||||
before: str | None = None,
|
||||
filter: str | None = None,
|
||||
limit: int | None = 20,
|
||||
order: str | None = "desc",
|
||||
) -> VectorStoreFilesListInBatchResponse:
|
||||
logger.debug(f"VectorIORouter.openai_list_files_in_vector_store_file_batch: {batch_id}, {vector_store_id}")
|
||||
return await self.routing_table.openai_list_files_in_vector_store_file_batch(
|
||||
batch_id=batch_id,
|
||||
vector_store_id=vector_store_id,
|
||||
after=after,
|
||||
before=before,
|
||||
filter=filter,
|
||||
limit=limit,
|
||||
order=order,
|
||||
)
|
||||
|
||||
async def openai_cancel_vector_store_file_batch(
|
||||
self,
|
||||
batch_id: str,
|
||||
vector_store_id: str,
|
||||
) -> VectorStoreFileBatchObject:
|
||||
logger.debug(f"VectorIORouter.openai_cancel_vector_store_file_batch: {batch_id}, {vector_store_id}")
|
||||
return await self.routing_table.openai_cancel_vector_store_file_batch(
|
||||
batch_id=batch_id,
|
||||
vector_store_id=vector_store_id,
|
||||
)
|
||||
|
|
|
@ -109,7 +109,11 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
self.openai_vector_stores.pop(store_id, None)
|
||||
|
||||
async def _save_openai_vector_store_file(
|
||||
self, store_id: str, file_id: str, file_info: dict[str, Any], file_contents: list[dict[str, Any]]
|
||||
self,
|
||||
store_id: str,
|
||||
file_id: str,
|
||||
file_info: dict[str, Any],
|
||||
file_contents: list[dict[str, Any]],
|
||||
) -> None:
|
||||
"""Save vector store file metadata to persistent storage."""
|
||||
assert self.kvstore
|
||||
|
@ -303,7 +307,10 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
all_stores = all_stores[after_index + 1 :]
|
||||
|
||||
if before:
|
||||
before_index = next((i for i, store in enumerate(all_stores) if store["id"] == before), len(all_stores))
|
||||
before_index = next(
|
||||
(i for i, store in enumerate(all_stores) if store["id"] == before),
|
||||
len(all_stores),
|
||||
)
|
||||
all_stores = all_stores[:before_index]
|
||||
|
||||
# Apply limit
|
||||
|
@ -399,7 +406,9 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
max_num_results: int | None = 10,
|
||||
ranking_options: SearchRankingOptions | None = None,
|
||||
rewrite_query: bool | None = False,
|
||||
search_mode: str | None = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations
|
||||
search_mode: (
|
||||
str | None
|
||||
) = "vector", # Using str instead of Literal due to OpenAPI schema generator limitations
|
||||
) -> VectorStoreSearchResponsePage:
|
||||
"""Search for chunks in a vector store."""
|
||||
max_num_results = max_num_results or 10
|
||||
|
@ -687,7 +696,10 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
file_objects = file_objects[after_index + 1 :]
|
||||
|
||||
if before:
|
||||
before_index = next((i for i, file in enumerate(file_objects) if file.id == before), len(file_objects))
|
||||
before_index = next(
|
||||
(i for i, file in enumerate(file_objects) if file.id == before),
|
||||
len(file_objects),
|
||||
)
|
||||
file_objects = file_objects[:before_index]
|
||||
|
||||
# Apply limit
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue