updates for tonight

Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
Francisco Javier Arceo 2025-07-27 23:55:22 -04:00
parent bc461567f6
commit f4ea02f084
9 changed files with 72 additions and 49 deletions

View file

@ -37,15 +37,15 @@ See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more
| Field | Type | Required | Default | Description | | Field | Type | Required | Default | Description |
|-------|------|----------|---------|-------------| |-------|------|----------|---------|-------------|
| `weaviate_api_key` | `<class 'str'>` | No | PydanticUndefined | The API key for the Weaviate instance | | `weaviate_api_key` | `str \| None` | No | | The API key for the Weaviate instance |
| `weaviate_cluster_url` | `<class 'str'>` | No | PydanticUndefined | The URL of the Weaviate cluster | | `weaviate_cluster_url` | `str \| None` | No | | The URL of the Weaviate cluster |
| `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) | | `kvstore` | `utils.kvstore.config.RedisKVStoreConfig \| utils.kvstore.config.SqliteKVStoreConfig \| utils.kvstore.config.PostgresKVStoreConfig \| utils.kvstore.config.MongoDBKVStoreConfig, annotation=NoneType, required=False, default='sqlite', discriminator='type'` | No | | Config for KV store backend (SQLite only for now) |
## Sample Configuration ## Sample Configuration
```yaml ```yaml
weaviate_api_key: dummy-api-key-for-testing weaviate_api_key: null
weaviate_cluster_url: http://localhost:8080 weaviate_cluster_url: null
kvstore: kvstore:
type: sqlite type: sqlite
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/weaviate_registry.db db_path: ${env.SQLITE_STORE_DIR:=~/.llama/dummy}/weaviate_registry.db

View file

@ -16,7 +16,7 @@ from pydantic import BaseModel, Field
from llama_stack.apis.inference import InterleavedContent from llama_stack.apis.inference import InterleavedContent
from llama_stack.apis.vector_dbs import VectorDB from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
from llama_stack.schema_utils import json_schema_type, webmethod from llama_stack.schema_utils import json_schema_type, webmethod
from llama_stack.strong_typing.schema import register_schema from llama_stack.strong_typing.schema import register_schema

View file

@ -7,7 +7,6 @@
import asyncio import asyncio
import logging import logging
import os import os
import re
from typing import Any from typing import Any
from numpy.typing import NDArray from numpy.typing import NDArray
@ -30,6 +29,7 @@ from llama_stack.providers.utils.memory.vector_store import (
EmbeddingIndex, EmbeddingIndex,
VectorDBWithIndex, VectorDBWithIndex,
) )
from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name
from .config import MilvusVectorIOConfig as RemoteMilvusVectorIOConfig from .config import MilvusVectorIOConfig as RemoteMilvusVectorIOConfig
@ -43,14 +43,6 @@ OPENAI_VECTOR_STORES_FILES_PREFIX = f"openai_vector_stores_files:milvus:{VERSION
OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX = f"openai_vector_stores_files_contents:milvus:{VERSION}::" OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX = f"openai_vector_stores_files_contents:milvus:{VERSION}::"
def sanitize_collection_name(name: str) -> str:
"""
Sanitize collection name to ensure it only contains numbers, letters, and underscores.
Any other characters are replaced with underscores.
"""
return re.sub(r"[^a-zA-Z0-9_]", "_", name)
class MilvusIndex(EmbeddingIndex): class MilvusIndex(EmbeddingIndex):
def __init__( def __init__(
self, client: MilvusClient, collection_name: str, consistency_level="Strong", kvstore: KVStore | None = None self, client: MilvusClient, collection_name: str, consistency_level="Strong", kvstore: KVStore | None = None

View file

@ -17,15 +17,15 @@ from llama_stack.schema_utils import json_schema_type
@json_schema_type @json_schema_type
class WeaviateVectorIOConfig(BaseModel): class WeaviateVectorIOConfig(BaseModel):
weaviate_api_key: str = Field(description="The API key for the Weaviate instance") weaviate_api_key: str | None = Field(description="The API key for the Weaviate instance", default=None)
weaviate_cluster_url: str = Field(description="The URL of the Weaviate cluster") weaviate_cluster_url: str | None = Field(description="The URL of the Weaviate cluster", default=None)
kvstore: KVStoreConfig | None = Field(description="Config for KV store backend (SQLite only for now)", default=None) kvstore: KVStoreConfig | None = Field(description="Config for KV store backend (SQLite only for now)", default=None)
@classmethod @classmethod
def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]: def sample_run_config(cls, __distro_dir__: str, **kwargs: Any) -> dict[str, Any]:
return { return {
"weaviate_api_key": "dummy-api-key-for-testing", "weaviate_api_key": None,
"weaviate_cluster_url": "http://localhost:8080", "weaviate_cluster_url": None,
"kvstore": SqliteKVStoreConfig.sample_run_config( "kvstore": SqliteKVStoreConfig.sample_run_config(
__distro_dir__=__distro_dir__, __distro_dir__=__distro_dir__,
db_name="weaviate_registry.db", db_name="weaviate_registry.db",

View file

@ -26,6 +26,7 @@ from llama_stack.providers.utils.memory.vector_store import (
EmbeddingIndex, EmbeddingIndex,
VectorDBWithIndex, VectorDBWithIndex,
) )
from llama_stack.providers.utils.vector_io.vector_utils import sanitize_collection_name
from .config import WeaviateVectorIOConfig from .config import WeaviateVectorIOConfig
@ -42,9 +43,12 @@ OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX = f"openai_vector_stores_files_conten
class WeaviateIndex(EmbeddingIndex): class WeaviateIndex(EmbeddingIndex):
def __init__(self, client: weaviate.Client, collection_name: str, kvstore: KVStore | None = None): def __init__(self, client: weaviate.Client, collection_name: str, kvstore: KVStore | None = None):
self.client = client self.client = client
self.collection_name = collection_name self.collection_name = sanitize_collection_name(collection_name, weaviate_format=True)
self.kvstore = kvstore self.kvstore = kvstore
async def initialize(self):
pass
async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray): async def add_chunks(self, chunks: list[Chunk], embeddings: NDArray):
assert len(chunks) == len(embeddings), ( assert len(chunks) == len(embeddings), (
f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}" f"Chunk length {len(chunks)} does not match embedding length {len(embeddings)}"
@ -68,7 +72,8 @@ class WeaviateIndex(EmbeddingIndex):
collection.data.insert_many(data_objects) collection.data.insert_many(data_objects)
async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse: async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
collection = self.client.collections.get(self.collection_name) sanitized_collection_name = sanitize_collection_name(self.collection_name)
collection = self.client.collections.get(sanitized_collection_name)
results = collection.query.near_vector( results = collection.query.near_vector(
near_vector=embedding.tolist(), near_vector=embedding.tolist(),
@ -93,7 +98,8 @@ class WeaviateIndex(EmbeddingIndex):
return QueryChunksResponse(chunks=chunks, scores=scores) return QueryChunksResponse(chunks=chunks, scores=scores)
async def delete(self, chunk_ids: list[str]) -> None: async def delete(self, chunk_ids: list[str]) -> None:
collection = self.client.collections.get(self.collection_name) sanitized_collection_name = sanitize_collection_name(self.collection_name)
collection = self.client.collections.get(sanitized_collection_name)
collection.data.delete_many(where=Filter.by_property("id").contains_any(chunk_ids)) collection.data.delete_many(where=Filter.by_property("id").contains_any(chunk_ids))
async def query_keyword( async def query_keyword(
@ -139,8 +145,7 @@ class WeaviateVectorIOAdapter(
self.metadata_collection_name = "openai_vector_stores_metadata" self.metadata_collection_name = "openai_vector_stores_metadata"
def _get_client(self) -> weaviate.Client: def _get_client(self) -> weaviate.Client:
# if self.config.test_environment: if self.config.weaviate_cluster_url is None:
if True:
key = "local::test" key = "local::test"
client = weaviate.connect_to_local( client = weaviate.connect_to_local(
host="localhost", host="localhost",
@ -160,8 +165,8 @@ class WeaviateVectorIOAdapter(
async def initialize(self) -> None: async def initialize(self) -> None:
"""Set up KV store and load existing vector DBs and OpenAI vector stores.""" """Set up KV store and load existing vector DBs and OpenAI vector stores."""
# Initialize KV store for metadata # Initialize KV store for metadata if configured
if self.kvstore is not None: if self.config.kvstore is not None:
self.kvstore = await kvstore_impl(self.config.kvstore) self.kvstore = await kvstore_impl(self.config.kvstore)
else: else:
self.kvstore = None self.kvstore = None
@ -194,11 +199,12 @@ class WeaviateVectorIOAdapter(
vector_db: VectorDB, vector_db: VectorDB,
) -> None: ) -> None:
client = self._get_client() client = self._get_client()
sanitized_collection_name = sanitize_collection_name(vector_db.identifier)
# Create collection if it doesn't exist # Create collection if it doesn't exist
if not client.collections.exists(vector_db.identifier): if not client.collections.exists(sanitized_collection_name):
print(f"creating collection {vector_db}")
client.collections.create( client.collections.create(
name=vector_db.identifier, name=sanitized_collection_name,
vectorizer_config=wvc.config.Configure.Vectorizer.none(), vectorizer_config=wvc.config.Configure.Vectorizer.none(),
properties=[ properties=[
wvc.config.Property( wvc.config.Property(
@ -208,37 +214,40 @@ class WeaviateVectorIOAdapter(
], ],
) )
self.cache[vector_db.identifier] = VectorDBWithIndex( self.cache[sanitized_collection_name] = VectorDBWithIndex(
vector_db, vector_db,
WeaviateIndex(client=client, collection_name=vector_db.identifier), WeaviateIndex(client=client, collection_name=sanitized_collection_name),
self.inference_api, self.inference_api,
) )
async def unregister_vector_db(self, vector_db_id: str) -> None: async def unregister_vector_db(self, vector_db_id: str) -> None:
if vector_db_id not in self.cache: sanitized_collection_name = sanitize_collection_name(vector_db_id)
log.warning(f"Vector DB {vector_db_id} not found")
if sanitized_collection_name not in self.cache:
log.warning(f"Vector DB {sanitized_collection_name} not found")
return return
await self.cache[vector_db_id].index.delete() await self.cache[sanitized_collection_name].index.delete()
del self.cache[vector_db_id] del self.cache[sanitized_collection_name]
async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None: async def _get_and_cache_vector_db_index(self, vector_db_id: str) -> VectorDBWithIndex | None:
if vector_db_id in self.cache: sanitized_collection_name = sanitize_collection_name(vector_db_id)
return self.cache[vector_db_id] if sanitized_collection_name in self.cache:
return self.cache[sanitized_collection_name]
vector_db = await self.vector_db_store.get_vector_db(vector_db_id) vector_db = await self.vector_db_store.get_vector_db(sanitized_collection_name)
if not vector_db: if not vector_db:
raise ValueError(f"Vector DB {vector_db_id} not found") raise ValueError(f"Vector DB {sanitized_collection_name} not found")
client = self._get_client() client = self._get_client()
if not client.collections.exists(vector_db.identifier): if not client.collections.exists(vector_db.identifier):
raise ValueError(f"Collection with name `{vector_db.identifier}` not found") raise ValueError(f"Collection with name `{sanitized_collection_name}` not found")
index = VectorDBWithIndex( index = VectorDBWithIndex(
vector_db=vector_db, vector_db=vector_db,
index=WeaviateIndex(client=client, collection_name=vector_db.identifier), index=WeaviateIndex(client=client, collection_name=sanitized_collection_name),
inference_api=self.inference_api, inference_api=self.inference_api,
) )
self.cache[vector_db_id] = index self.cache[sanitized_collection_name] = index
return index return index
async def insert_chunks( async def insert_chunks(
@ -247,9 +256,10 @@ class WeaviateVectorIOAdapter(
chunks: list[Chunk], chunks: list[Chunk],
ttl_seconds: int | None = None, ttl_seconds: int | None = None,
) -> None: ) -> None:
index = await self._get_and_cache_vector_db_index(vector_db_id) sanitized_collection_name = sanitize_collection_name(vector_db_id)
index = await self._get_and_cache_vector_db_index(sanitized_collection_name)
if not index: if not index:
raise ValueError(f"Vector DB {vector_db_id} not found") raise ValueError(f"Vector DB {sanitized_collection_name} not found")
await index.insert_chunks(chunks) await index.insert_chunks(chunks)
@ -259,8 +269,9 @@ class WeaviateVectorIOAdapter(
query: InterleavedContent, query: InterleavedContent,
params: dict[str, Any] | None = None, params: dict[str, Any] | None = None,
) -> QueryChunksResponse: ) -> QueryChunksResponse:
index = await self._get_and_cache_vector_db_index(vector_db_id) sanitized_collection_name = sanitize_collection_name(vector_db_id)
index = await self._get_and_cache_vector_db_index(sanitized_collection_name)
if not index: if not index:
raise ValueError(f"Vector DB {vector_db_id} not found") raise ValueError(f"Vector DB {sanitized_collection_name} not found")
return await index.query_chunks(query, params) return await index.query_chunks(query, params)

View file

@ -30,7 +30,7 @@ from llama_stack.providers.datatypes import Api
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
interleaved_content_as_str, interleaved_content_as_str,
) )
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
log = logging.getLogger(__name__) log = logging.getLogger(__name__)

View file

@ -5,6 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
import hashlib import hashlib
import re
import uuid import uuid
@ -19,3 +20,22 @@ def generate_chunk_id(document_id: str, chunk_text: str, chunk_window: str | Non
if chunk_window: if chunk_window:
hash_input += f":{chunk_window}".encode() hash_input += f":{chunk_window}".encode()
return str(uuid.UUID(hashlib.md5(hash_input, usedforsecurity=False).hexdigest())) return str(uuid.UUID(hashlib.md5(hash_input, usedforsecurity=False).hexdigest()))
def proper_case(s: str) -> str:
"""Convert a string to proper case (first letter uppercase, rest lowercase)."""
return s[0].upper() + s[1:].lower() if s else s
def sanitize_collection_name(name: str, weaviate_format=False) -> str:
"""
Sanitize collection name to ensure it only contains numbers, letters, and underscores.
Any other characters are replaced with underscores.
"""
print(f"Sanitizing collection name: {name} (Weaviate format: {weaviate_format})")
if not weaviate_format:
s = re.sub(r"[^a-zA-Z0-9_]", "_", name)
else:
s = proper_case(re.sub(r"[^a-zA-Z0-9]", "", name))
print(f"Sanitized collection name from: {name} to: {s}")
return s

View file

@ -110,11 +110,11 @@ def test_openai_create_vector_store(compat_client_with_empty_stores, client_with
# Create a vector store # Create a vector store
vector_store = client.vector_stores.create( vector_store = client.vector_stores.create(
name="test_vector_store", metadata={"purpose": "testing", "environment": "integration"} name="Vs_test_vector_store", metadata={"purpose": "testing", "environment": "integration"}
) )
assert vector_store is not None assert vector_store is not None
assert vector_store.name == "test_vector_store" assert vector_store.name == "Vs_test_vector_store"
assert vector_store.object == "vector_store" assert vector_store.object == "vector_store"
assert vector_store.status in ["completed", "in_progress"] assert vector_store.status in ["completed", "in_progress"]
assert vector_store.metadata["purpose"] == "testing" assert vector_store.metadata["purpose"] == "testing"

View file

@ -5,7 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.vector_io import Chunk, ChunkMetadata from llama_stack.apis.vector_io import Chunk, ChunkMetadata
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
# This test is a unit test for the chunk_utils.py helpers. This should only contain # This test is a unit test for the chunk_utils.py helpers. This should only contain
# tests which are specific to this file. More general (API-level) tests should be placed in # tests which are specific to this file. More general (API-level) tests should be placed in