mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
Merge 0fb6fecedb into sapling-pr-archive-ehhuang
This commit is contained in:
commit
0a328f0878
3 changed files with 15 additions and 6 deletions
|
|
@ -260,7 +260,7 @@ class VectorStoreSearchResponsePage(BaseModel):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
object: str = "vector_store.search_results.page"
|
object: str = "vector_store.search_results.page"
|
||||||
search_query: str
|
search_query: list[str]
|
||||||
data: list[VectorStoreSearchResponse]
|
data: list[VectorStoreSearchResponse]
|
||||||
has_more: bool = False
|
has_more: bool = False
|
||||||
next_page: str | None = None
|
next_page: str | None = None
|
||||||
|
|
@ -478,7 +478,7 @@ class OpenAICreateVectorStoreRequestWithExtraBody(BaseModel, extra="allow"):
|
||||||
name: str | None = None
|
name: str | None = None
|
||||||
file_ids: list[str] | None = None
|
file_ids: list[str] | None = None
|
||||||
expires_after: dict[str, Any] | None = None
|
expires_after: dict[str, Any] | None = None
|
||||||
chunking_strategy: dict[str, Any] | None = None
|
chunking_strategy: VectorStoreChunkingStrategy | None = None
|
||||||
metadata: dict[str, Any] | None = None
|
metadata: dict[str, Any] | None = None
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,8 @@ from llama_stack.apis.vector_io import (
|
||||||
SearchRankingOptions,
|
SearchRankingOptions,
|
||||||
VectorIO,
|
VectorIO,
|
||||||
VectorStoreChunkingStrategy,
|
VectorStoreChunkingStrategy,
|
||||||
|
VectorStoreChunkingStrategyStatic,
|
||||||
|
VectorStoreChunkingStrategyStaticConfig,
|
||||||
VectorStoreDeleteResponse,
|
VectorStoreDeleteResponse,
|
||||||
VectorStoreFileBatchObject,
|
VectorStoreFileBatchObject,
|
||||||
VectorStoreFileContentsResponse,
|
VectorStoreFileContentsResponse,
|
||||||
|
|
@ -167,6 +169,13 @@ class VectorIORouter(VectorIO):
|
||||||
if embedding_dimension is not None:
|
if embedding_dimension is not None:
|
||||||
params.model_extra["embedding_dimension"] = embedding_dimension
|
params.model_extra["embedding_dimension"] = embedding_dimension
|
||||||
|
|
||||||
|
# Set chunking strategy explicitly if not provided
|
||||||
|
if params.chunking_strategy is None or params.chunking_strategy.type == "auto":
|
||||||
|
# actualize the chunking strategy to static
|
||||||
|
params.chunking_strategy = VectorStoreChunkingStrategyStatic(
|
||||||
|
static=VectorStoreChunkingStrategyStaticConfig()
|
||||||
|
)
|
||||||
|
|
||||||
return await provider.openai_create_vector_store(params)
|
return await provider.openai_create_vector_store(params)
|
||||||
|
|
||||||
async def openai_list_vector_stores(
|
async def openai_list_vector_stores(
|
||||||
|
|
|
||||||
|
|
@ -350,7 +350,7 @@ def test_openai_vector_store_search_empty(
|
||||||
assert search_response is not None
|
assert search_response is not None
|
||||||
assert hasattr(search_response, "data")
|
assert hasattr(search_response, "data")
|
||||||
assert len(search_response.data) == 0 # Empty store should return no results
|
assert len(search_response.data) == 0 # Empty store should return no results
|
||||||
assert search_response.search_query == "test query"
|
assert search_response.search_query == ["test query"]
|
||||||
assert search_response.has_more is False
|
assert search_response.has_more is False
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -679,7 +679,7 @@ def test_openai_vector_store_attach_file(
|
||||||
assert file_attach_response.id == file.id
|
assert file_attach_response.id == file.id
|
||||||
assert file_attach_response.vector_store_id == vector_store.id
|
assert file_attach_response.vector_store_id == vector_store.id
|
||||||
assert file_attach_response.status == "completed"
|
assert file_attach_response.status == "completed"
|
||||||
assert file_attach_response.chunking_strategy.type == "auto"
|
assert file_attach_response.chunking_strategy.type == "static"
|
||||||
assert file_attach_response.created_at > 0
|
assert file_attach_response.created_at > 0
|
||||||
assert not file_attach_response.last_error
|
assert not file_attach_response.last_error
|
||||||
|
|
||||||
|
|
@ -815,8 +815,8 @@ def test_openai_vector_store_list_files(
|
||||||
assert set(file_ids) == {file.id for file in files_list.data}
|
assert set(file_ids) == {file.id for file in files_list.data}
|
||||||
assert files_list.data[0].object == "vector_store.file"
|
assert files_list.data[0].object == "vector_store.file"
|
||||||
assert files_list.data[0].vector_store_id == vector_store.id
|
assert files_list.data[0].vector_store_id == vector_store.id
|
||||||
assert files_list.data[0].status == "completed"
|
assert files_list.data[0].status in ["completed", "in_progress"]
|
||||||
assert files_list.data[0].chunking_strategy.type == "auto"
|
assert files_list.data[0].chunking_strategy.type == "static"
|
||||||
assert files_list.data[0].created_at > 0
|
assert files_list.data[0].created_at > 0
|
||||||
assert files_list.first_id == files_list.data[0].id
|
assert files_list.first_id == files_list.data[0].id
|
||||||
assert not files_list.data[0].last_error
|
assert not files_list.data[0].last_error
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue