mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 18:00:36 +00:00
fix: actualize chunking strategy in vector store create API
# What does this PR do? ## Test Plan
This commit is contained in:
parent
9d5c34af27
commit
e57c813a5e
3 changed files with 18 additions and 4 deletions
|
|
@ -20,6 +20,8 @@ from llama_stack.apis.vector_io import (
|
|||
SearchRankingOptions,
|
||||
VectorIO,
|
||||
VectorStoreChunkingStrategy,
|
||||
VectorStoreChunkingStrategyStatic,
|
||||
VectorStoreChunkingStrategyStaticConfig,
|
||||
VectorStoreDeleteResponse,
|
||||
VectorStoreFileBatchObject,
|
||||
VectorStoreFileContentsResponse,
|
||||
|
|
@ -167,6 +169,13 @@ class VectorIORouter(VectorIO):
|
|||
if embedding_dimension is not None:
|
||||
params.model_extra["embedding_dimension"] = embedding_dimension
|
||||
|
||||
# Set chunking strategy explicitly if not provided
|
||||
if params.chunking_strategy is None or params.chunking_strategy.type == "auto":
|
||||
# actualize the chunking strategy to static
|
||||
params.chunking_strategy = VectorStoreChunkingStrategyStatic(
|
||||
static=VectorStoreChunkingStrategyStaticConfig()
|
||||
)
|
||||
|
||||
return await provider.openai_create_vector_store(params)
|
||||
|
||||
async def openai_list_vector_stores(
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ from llama_stack.apis.vector_io import (
|
|||
VectorStoreChunkingStrategy,
|
||||
VectorStoreChunkingStrategyAuto,
|
||||
VectorStoreChunkingStrategyStatic,
|
||||
VectorStoreChunkingStrategyStaticConfig,
|
||||
VectorStoreContent,
|
||||
VectorStoreDeleteResponse,
|
||||
VectorStoreFileBatchObject,
|
||||
|
|
@ -414,6 +415,10 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
in_progress=0,
|
||||
total=0,
|
||||
)
|
||||
if not params.chunking_strategy or params.chunking_strategy.type == "auto":
|
||||
chunking_strategy = VectorStoreChunkingStrategyStatic(static=VectorStoreChunkingStrategyStaticConfig())
|
||||
else:
|
||||
chunking_strategy = params.chunking_strategy
|
||||
store_info: dict[str, Any] = {
|
||||
"id": vector_store_id,
|
||||
"object": "vector_store",
|
||||
|
|
@ -426,7 +431,7 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
"expires_at": None,
|
||||
"last_active_at": created_at,
|
||||
"file_ids": [],
|
||||
"chunking_strategy": params.chunking_strategy,
|
||||
"chunking_strategy": chunking_strategy.model_dump(),
|
||||
}
|
||||
|
||||
# Add provider information to metadata if provided
|
||||
|
|
|
|||
|
|
@ -679,7 +679,7 @@ def test_openai_vector_store_attach_file(
|
|||
assert file_attach_response.id == file.id
|
||||
assert file_attach_response.vector_store_id == vector_store.id
|
||||
assert file_attach_response.status == "completed"
|
||||
assert file_attach_response.chunking_strategy.type == "auto"
|
||||
assert file_attach_response.chunking_strategy.type == "static"
|
||||
assert file_attach_response.created_at > 0
|
||||
assert not file_attach_response.last_error
|
||||
|
||||
|
|
@ -815,8 +815,8 @@ def test_openai_vector_store_list_files(
|
|||
assert set(file_ids) == {file.id for file in files_list.data}
|
||||
assert files_list.data[0].object == "vector_store.file"
|
||||
assert files_list.data[0].vector_store_id == vector_store.id
|
||||
assert files_list.data[0].status == "completed"
|
||||
assert files_list.data[0].chunking_strategy.type == "auto"
|
||||
assert files_list.data[0].status in ["completed", "in_progress"]
|
||||
assert files_list.data[0].chunking_strategy.type == "static"
|
||||
assert files_list.data[0].created_at > 0
|
||||
assert files_list.first_id == files_list.data[0].id
|
||||
assert not files_list.data[0].last_error
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue