mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
moar
This commit is contained in:
parent
61bffe8b56
commit
363dc83041
5 changed files with 9 additions and 4 deletions
|
@ -91,7 +91,7 @@ def get_provider_dependencies(
|
|||
|
||||
|
||||
def print_pip_install_help(config: BuildConfig):
|
||||
normal_deps, special_deps = get_provider_dependencies(config)
|
||||
normal_deps, special_deps, _ = get_provider_dependencies(config)
|
||||
|
||||
cprint(
|
||||
f"Please install needed dependencies using the following commands:\n\nuv pip install {' '.join(normal_deps)}",
|
||||
|
|
|
@ -266,7 +266,9 @@ class QdrantVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
|||
) -> VectorStoreFileObject:
|
||||
# Qdrant doesn't allow multiple clients to access the same storage path simultaneously.
|
||||
async with self._qdrant_lock:
|
||||
await super().openai_attach_file_to_vector_store(vector_store_id, file_id, attributes, chunking_strategy)
|
||||
return await super().openai_attach_file_to_vector_store(
|
||||
vector_store_id, file_id, attributes, chunking_strategy
|
||||
)
|
||||
|
||||
async def delete_chunks(self, store_id: str, chunks_for_deletion: list[ChunkForDeletion]) -> None:
|
||||
"""Delete chunks from a Qdrant vector store."""
|
||||
|
|
|
@ -68,6 +68,7 @@ class WeaviateIndex(EmbeddingIndex):
|
|||
data_objects.append(
|
||||
wvc.data.DataObject(
|
||||
properties={
|
||||
"chunk_id": chunk.chunk_id,
|
||||
"chunk_content": chunk.model_dump_json(),
|
||||
},
|
||||
vector=embeddings[i].tolist(),
|
||||
|
@ -84,7 +85,7 @@ class WeaviateIndex(EmbeddingIndex):
|
|||
sanitized_collection_name = sanitize_collection_name(self.collection_name, weaviate_format=True)
|
||||
collection = self.client.collections.get(sanitized_collection_name)
|
||||
chunk_ids = [chunk.chunk_id for chunk in chunks_for_deletion]
|
||||
collection.data.delete_many(where=Filter.by_property("id").contains_any(chunk_ids))
|
||||
collection.data.delete_many(where=Filter.by_property("chunk_id").contains_any(chunk_ids))
|
||||
|
||||
async def query_vector(self, embedding: NDArray, k: int, score_threshold: float) -> QueryChunksResponse:
|
||||
sanitized_collection_name = sanitize_collection_name(self.collection_name, weaviate_format=True)
|
||||
|
|
|
@ -37,6 +37,7 @@ from llama_stack.apis.vector_io import (
|
|||
VectorStoreSearchResponse,
|
||||
VectorStoreSearchResponsePage,
|
||||
)
|
||||
from llama_stack.log import get_logger
|
||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||
from llama_stack.providers.utils.memory.vector_store import (
|
||||
ChunkForDeletion,
|
||||
|
@ -44,7 +45,7 @@ from llama_stack.providers.utils.memory.vector_store import (
|
|||
make_overlapped_chunks,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = get_logger(__name__, category="vector_io")
|
||||
|
||||
# Constants for OpenAI vector stores
|
||||
CHUNK_MULTIPLIER = 5
|
||||
|
|
|
@ -592,6 +592,7 @@ def test_openai_vector_store_list_files(compat_client_with_empty_stores, client_
|
|||
vector_store_id=vector_store.id,
|
||||
file_id=file.id,
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed", (
|
||||
f"Failed to attach file {file.id} to vector store {vector_store.id}: {response=}"
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue