mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
minor fixes
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
febae72092
commit
fee32f2248
3 changed files with 13 additions and 21 deletions
|
@ -41,7 +41,6 @@ VECTOR_INDEX_PREFIX = f"vector_index:milvus:{VERSION}::"
|
||||||
OPENAI_VECTOR_STORES_PREFIX = f"openai_vector_stores:milvus:{VERSION}::"
|
OPENAI_VECTOR_STORES_PREFIX = f"openai_vector_stores:milvus:{VERSION}::"
|
||||||
OPENAI_VECTOR_STORES_FILES_PREFIX = f"openai_vector_stores_files:milvus:{VERSION}::"
|
OPENAI_VECTOR_STORES_FILES_PREFIX = f"openai_vector_stores_files:milvus:{VERSION}::"
|
||||||
OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX = f"openai_vector_stores_files_contents:milvus:{VERSION}::"
|
OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX = f"openai_vector_stores_files_contents:milvus:{VERSION}::"
|
||||||
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id
|
|
||||||
|
|
||||||
|
|
||||||
class MilvusIndex(EmbeddingIndex):
|
class MilvusIndex(EmbeddingIndex):
|
||||||
|
@ -252,16 +251,6 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
key = f"{OPENAI_VECTOR_STORES_PREFIX}{store_id}"
|
key = f"{OPENAI_VECTOR_STORES_PREFIX}{store_id}"
|
||||||
await self.kvstore.delete(key)
|
await self.kvstore.delete(key)
|
||||||
|
|
||||||
async def _save_openai_vector_store_file(
|
|
||||||
self, store_id: str, file_id: str, file_info: dict[str, Any], file_contents: list[dict[str, Any]]
|
|
||||||
) -> None:
|
|
||||||
"""Save vector store file metadata to Milvus database."""
|
|
||||||
assert self.kvstore is not None
|
|
||||||
key = f"{OPENAI_VECTOR_STORES_FILES_PREFIX}{store_id}:{file_id}"
|
|
||||||
await self.kvstore.set(key=key, value=json.dumps(file_info))
|
|
||||||
content_key = f"{OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX}{store_id}:{file_id}"
|
|
||||||
await self.kvstore.set(key=content_key, value=json.dumps(file_contents))
|
|
||||||
|
|
||||||
async def _load_openai_vector_stores(self) -> dict[str, dict[str, Any]]:
|
async def _load_openai_vector_stores(self) -> dict[str, dict[str, Any]]:
|
||||||
"""Load all vector store metadata from persistent storage."""
|
"""Load all vector store metadata from persistent storage."""
|
||||||
assert self.kvstore is not None
|
assert self.kvstore is not None
|
||||||
|
@ -274,6 +263,12 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
self, store_id: str, file_id: str, file_info: dict[str, Any], file_contents: list[dict[str, Any]]
|
self, store_id: str, file_id: str, file_info: dict[str, Any], file_contents: list[dict[str, Any]]
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Save vector store file metadata to Milvus database."""
|
"""Save vector store file metadata to Milvus database."""
|
||||||
|
if store_id not in self.openai_vector_stores:
|
||||||
|
store_info = await self._load_openai_vector_stores(store_id)
|
||||||
|
if not store_info:
|
||||||
|
logger.error(f"OpenAI vector store {store_id} not found")
|
||||||
|
raise ValueError(f"No vector store found with id {store_id}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files"):
|
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files"):
|
||||||
file_schema = MilvusClient.create_schema(
|
file_schema = MilvusClient.create_schema(
|
||||||
|
@ -314,7 +309,6 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
schema=content_schema,
|
schema=content_schema,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Save file metadata
|
|
||||||
file_data = [
|
file_data = [
|
||||||
{
|
{
|
||||||
"store_file_id": f"{store_id}_{file_id}",
|
"store_file_id": f"{store_id}_{file_id}",
|
||||||
|
@ -332,7 +326,7 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
# Save file contents
|
# Save file contents
|
||||||
contents_data = [
|
contents_data = [
|
||||||
{
|
{
|
||||||
"chunk_id": generate_chunk_id(file_id, content.get("chunk_id", None)),
|
"chunk_id": content.get("chunk_metadata").get("chunk_id"),
|
||||||
"store_file_id": f"{store_id}_{file_id}",
|
"store_file_id": f"{store_id}_{file_id}",
|
||||||
"store_id": store_id,
|
"store_id": store_id,
|
||||||
"file_id": file_id,
|
"file_id": file_id,
|
||||||
|
@ -355,7 +349,7 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files"):
|
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files"):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
query_filter = f"store_id == '{store_id}' AND file_id == '{file_id}'"
|
query_filter = f"store_file_id == '{store_id}_{file_id}'"
|
||||||
results = await asyncio.to_thread(
|
results = await asyncio.to_thread(
|
||||||
self.client.query,
|
self.client.query,
|
||||||
collection_name="openai_vector_store_files",
|
collection_name="openai_vector_store_files",
|
||||||
|
@ -380,14 +374,15 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files_contents"):
|
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files_contents"):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
query_filter = f"store_id == '{store_id}' AND file_id == '{file_id}'"
|
query_filter = (
|
||||||
|
f"store_id == '{store_id}' AND file_id == '{file_id}' AND store_file_id == '{store_id}_{file_id}'"
|
||||||
|
)
|
||||||
results = await asyncio.to_thread(
|
results = await asyncio.to_thread(
|
||||||
self.client.query,
|
self.client.query,
|
||||||
collection_name="openai_vector_store_files_contents",
|
collection_name="openai_vector_store_files_contents",
|
||||||
filter=query_filter,
|
filter=query_filter,
|
||||||
output_fields=["chunk_id", "store_id", "file_id", "content"],
|
output_fields=["chunk_id", "store_id", "file_id", "content"],
|
||||||
)
|
)
|
||||||
print(f"\nresults from milvus = {results}\n")
|
|
||||||
|
|
||||||
contents = []
|
contents = []
|
||||||
for result in results:
|
for result in results:
|
||||||
|
@ -398,8 +393,6 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
logger.error(f"Failed to decode content for store {store_id}, file {file_id}: {e}")
|
logger.error(f"Failed to decode content for store {store_id}, file {file_id}: {e}")
|
||||||
return contents
|
return contents
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"failed {e}")
|
|
||||||
|
|
||||||
logger.error(f"Error loading openai vector store file contents for {file_id} in store {store_id}: {e}")
|
logger.error(f"Error loading openai vector store file contents for {file_id} in store {store_id}: {e}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
@ -428,7 +421,6 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
|
|
||||||
async def _delete_openai_vector_store_file_from_storage(self, store_id: str, file_id: str) -> None:
|
async def _delete_openai_vector_store_file_from_storage(self, store_id: str, file_id: str) -> None:
|
||||||
"""Delete vector store file metadata from Milvus database."""
|
"""Delete vector store file metadata from Milvus database."""
|
||||||
print("milvus is trying to delete stuff")
|
|
||||||
try:
|
try:
|
||||||
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files"):
|
if not await asyncio.to_thread(self.client.has_collection, "openai_vector_store_files"):
|
||||||
return
|
return
|
||||||
|
|
|
@ -31,7 +31,7 @@ def skip_if_provider_doesnt_support_openai_vector_stores(client_with_models):
|
||||||
def skip_if_provider_doesnt_support_openai_vector_store_files_api(client_with_models):
|
def skip_if_provider_doesnt_support_openai_vector_store_files_api(client_with_models):
|
||||||
vector_io_providers = [p for p in client_with_models.providers.list() if p.api == "vector_io"]
|
vector_io_providers = [p for p in client_with_models.providers.list() if p.api == "vector_io"]
|
||||||
for p in vector_io_providers:
|
for p in vector_io_providers:
|
||||||
if p.provider_type in ["inline::faiss", "inline::sqlite-vec", "inlihne::milvus"]:
|
if p.provider_type in ["inline::faiss", "inline::sqlite-vec", "inline::milvus"]:
|
||||||
return
|
return
|
||||||
|
|
||||||
pytest.skip("OpenAI vector stores are not supported by any provider")
|
pytest.skip("OpenAI vector stores are not supported by any provider")
|
||||||
|
@ -524,7 +524,6 @@ def test_openai_vector_store_attach_files_on_creation(compat_client_with_empty_s
|
||||||
file_ids = valid_file_ids + [failed_file_id]
|
file_ids = valid_file_ids + [failed_file_id]
|
||||||
num_failed = len(file_ids) - len(valid_file_ids)
|
num_failed = len(file_ids) - len(valid_file_ids)
|
||||||
|
|
||||||
# Create a vector store
|
|
||||||
vector_store = compat_client.vector_stores.create(
|
vector_store = compat_client.vector_stores.create(
|
||||||
name="test_store",
|
name="test_store",
|
||||||
file_ids=file_ids,
|
file_ids=file_ids,
|
||||||
|
|
|
@ -296,6 +296,7 @@ def test_response_non_streaming_file_search(
|
||||||
if isinstance(openai_client, LlamaStackAsLibraryClient):
|
if isinstance(openai_client, LlamaStackAsLibraryClient):
|
||||||
pytest.skip("Responses API file search is not yet supported in library client.")
|
pytest.skip("Responses API file search is not yet supported in library client.")
|
||||||
|
|
||||||
|
print(f"list models = {openai_client.models.list()}")
|
||||||
test_name_base = get_base_test_name(request)
|
test_name_base = get_base_test_name(request)
|
||||||
if should_skip_test(verification_config, provider, model, test_name_base):
|
if should_skip_test(verification_config, provider, model, test_name_base):
|
||||||
pytest.skip(f"Skipping {test_name_base} for model {model} on provider {provider} based on config.")
|
pytest.skip(f"Skipping {test_name_base} for model {model} on provider {provider} based on config.")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue