mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 20:12:33 +00:00
format
This commit is contained in:
parent
44f104baae
commit
c7eed3ef80
16 changed files with 35 additions and 21 deletions
|
|
@ -23,8 +23,8 @@ from llama_stack.apis.scoring import Scoring
|
||||||
from llama_stack.apis.scoring_functions import ScoringFn, ScoringFnInput
|
from llama_stack.apis.scoring_functions import ScoringFn, ScoringFnInput
|
||||||
from llama_stack.apis.shields import Shield, ShieldInput
|
from llama_stack.apis.shields import Shield, ShieldInput
|
||||||
from llama_stack.apis.tools import ToolGroup, ToolGroupInput, ToolRuntime
|
from llama_stack.apis.tools import ToolGroup, ToolGroupInput, ToolRuntime
|
||||||
from llama_stack.apis.vector_stores import VectorStore, VectorStoreInput
|
|
||||||
from llama_stack.apis.vector_io import VectorIO
|
from llama_stack.apis.vector_io import VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore, VectorStoreInput
|
||||||
from llama_stack.core.access_control.datatypes import AccessRule
|
from llama_stack.core.access_control.datatypes import AccessRule
|
||||||
from llama_stack.core.storage.datatypes import (
|
from llama_stack.core.storage.datatypes import (
|
||||||
KVStoreReference,
|
KVStoreReference,
|
||||||
|
|
|
||||||
|
|
@ -29,8 +29,8 @@ from llama_stack.apis.scoring_functions import ScoringFunctions
|
||||||
from llama_stack.apis.shields import Shields
|
from llama_stack.apis.shields import Shields
|
||||||
from llama_stack.apis.telemetry import Telemetry
|
from llama_stack.apis.telemetry import Telemetry
|
||||||
from llama_stack.apis.tools import ToolGroups, ToolRuntime
|
from llama_stack.apis.tools import ToolGroups, ToolRuntime
|
||||||
from llama_stack.apis.vector_stores import VectorStores
|
|
||||||
from llama_stack.apis.vector_io import VectorIO
|
from llama_stack.apis.vector_io import VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStores
|
||||||
from llama_stack.apis.version import LLAMA_STACK_API_V1ALPHA
|
from llama_stack.apis.version import LLAMA_STACK_API_V1ALPHA
|
||||||
from llama_stack.core.client import get_client_impl
|
from llama_stack.core.client import get_client_impl
|
||||||
from llama_stack.core.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,8 @@ from numpy.typing import NDArray
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import HealthResponse, HealthStatus, VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import HealthResponse, HealthStatus, VectorStoresProtocolPrivate
|
||||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||||
|
|
|
||||||
|
|
@ -17,8 +17,8 @@ from numpy.typing import NDArray
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference
|
from llama_stack.apis.inference import Inference
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
||||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||||
|
|
@ -412,7 +412,9 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorStoresPro
|
||||||
return [v.vector_store for v in self.cache.values()]
|
return [v.vector_store for v in self.cache.values()]
|
||||||
|
|
||||||
async def register_vector_store(self, vector_store: VectorStore) -> None:
|
async def register_vector_store(self, vector_store: VectorStore) -> None:
|
||||||
index = await SQLiteVecIndex.create(vector_store.embedding_dimension, self.config.db_path, vector_store.identifier)
|
index = await SQLiteVecIndex.create(
|
||||||
|
vector_store.embedding_dimension, self.config.db_path, vector_store.identifier
|
||||||
|
)
|
||||||
self.cache[vector_store.identifier] = VectorStoreWithIndex(vector_store, index, self.inference_api)
|
self.cache[vector_store.identifier] = VectorStoreWithIndex(vector_store, index, self.inference_api)
|
||||||
|
|
||||||
async def _get_and_cache_vector_store_index(self, vector_store_id: str) -> VectorStoreWithIndex | None:
|
async def _get_and_cache_vector_store_index(self, vector_store_id: str) -> VectorStoreWithIndex | None:
|
||||||
|
|
|
||||||
|
|
@ -13,8 +13,8 @@ from numpy.typing import NDArray
|
||||||
|
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
||||||
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
|
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,8 @@ from pymilvus import AnnSearchRequest, DataType, Function, FunctionType, MilvusC
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
||||||
from llama_stack.providers.inline.vector_io.milvus import MilvusVectorIOConfig as InlineMilvusVectorIOConfig
|
from llama_stack.providers.inline.vector_io.milvus import MilvusVectorIOConfig as InlineMilvusVectorIOConfig
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,8 @@ from pydantic import BaseModel, TypeAdapter
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,6 @@ from qdrant_client.models import PointStruct
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import (
|
from llama_stack.apis.vector_io import (
|
||||||
Chunk,
|
Chunk,
|
||||||
QueryChunksResponse,
|
QueryChunksResponse,
|
||||||
|
|
@ -24,6 +23,7 @@ from llama_stack.apis.vector_io import (
|
||||||
VectorStoreChunkingStrategy,
|
VectorStoreChunkingStrategy,
|
||||||
VectorStoreFileObject,
|
VectorStoreFileObject,
|
||||||
)
|
)
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
||||||
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
|
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
|
||||||
|
|
@ -171,7 +171,9 @@ class QdrantVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorStoresProtoc
|
||||||
|
|
||||||
for vector_store_data in stored_vector_stores:
|
for vector_store_data in stored_vector_stores:
|
||||||
vector_store = VectorStore.model_validate_json(vector_store_data)
|
vector_store = VectorStore.model_validate_json(vector_store_data)
|
||||||
index = VectorStoreWithIndex(vector_store, QdrantIndex(self.client, vector_store.identifier), self.inference_api)
|
index = VectorStoreWithIndex(
|
||||||
|
vector_store, QdrantIndex(self.client, vector_store.identifier), self.inference_api
|
||||||
|
)
|
||||||
self.cache[vector_store.identifier] = index
|
self.cache[vector_store.identifier] = index
|
||||||
self.openai_vector_stores = await self._load_openai_vector_stores()
|
self.openai_vector_stores = await self._load_openai_vector_stores()
|
||||||
|
|
||||||
|
|
@ -186,7 +188,9 @@ class QdrantVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorStoresProtoc
|
||||||
await self.kvstore.set(key=key, value=vector_store.model_dump_json())
|
await self.kvstore.set(key=key, value=vector_store.model_dump_json())
|
||||||
|
|
||||||
index = VectorStoreWithIndex(
|
index = VectorStoreWithIndex(
|
||||||
vector_store=vector_store, index=QdrantIndex(self.client, vector_store.identifier), inference_api=self.inference_api
|
vector_store=vector_store,
|
||||||
|
index=QdrantIndex(self.client, vector_store.identifier),
|
||||||
|
inference_api=self.inference_api,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.cache[vector_store.identifier] = index
|
self.cache[vector_store.identifier] = index
|
||||||
|
|
|
||||||
|
|
@ -16,8 +16,8 @@ from llama_stack.apis.common.content_types import InterleavedContent
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference
|
from llama_stack.apis.inference import Inference
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.core.request_headers import NeedsRequestProviderData
|
from llama_stack.core.request_headers import NeedsRequestProviderData
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
from llama_stack.providers.datatypes import VectorStoresProtocolPrivate
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,6 @@ from pydantic import TypeAdapter
|
||||||
|
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files, OpenAIFileObject
|
from llama_stack.apis.files import Files, OpenAIFileObject
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import (
|
from llama_stack.apis.vector_io import (
|
||||||
Chunk,
|
Chunk,
|
||||||
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
|
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
|
||||||
|
|
@ -43,6 +42,7 @@ from llama_stack.apis.vector_io import (
|
||||||
VectorStoreSearchResponse,
|
VectorStoreSearchResponse,
|
||||||
VectorStoreSearchResponsePage,
|
VectorStoreSearchResponsePage,
|
||||||
)
|
)
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.core.id_generation import generate_object_id
|
from llama_stack.core.id_generation import generate_object_id
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||||
|
|
|
||||||
|
|
@ -23,8 +23,8 @@ from llama_stack.apis.common.content_types import (
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import OpenAIEmbeddingsRequestWithExtraBody
|
from llama_stack.apis.inference import OpenAIEmbeddingsRequestWithExtraBody
|
||||||
from llama_stack.apis.tools import RAGDocument
|
from llama_stack.apis.tools import RAGDocument
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
|
from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.models.llama.llama3.tokenizer import Tokenizer
|
from llama_stack.models.llama.llama3.tokenizer import Tokenizer
|
||||||
from llama_stack.providers.datatypes import Api
|
from llama_stack.providers.datatypes import Api
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,9 @@ def client_with_empty_registry(client_with_models):
|
||||||
|
|
||||||
|
|
||||||
@vector_provider_wrapper
|
@vector_provider_wrapper
|
||||||
def test_vector_store_retrieve(client_with_empty_registry, embedding_model_id, embedding_dimension, vector_io_provider_id):
|
def test_vector_store_retrieve(
|
||||||
|
client_with_empty_registry, embedding_model_id, embedding_dimension, vector_io_provider_id
|
||||||
|
):
|
||||||
vector_store_name = "test_vector_store"
|
vector_store_name = "test_vector_store"
|
||||||
create_response = client_with_empty_registry.vector_stores.create(
|
create_response = client_with_empty_registry.vector_stores.create(
|
||||||
name=vector_store_name,
|
name=vector_store_name,
|
||||||
|
|
@ -69,7 +71,9 @@ def test_vector_store_retrieve(client_with_empty_registry, embedding_model_id, e
|
||||||
|
|
||||||
|
|
||||||
@vector_provider_wrapper
|
@vector_provider_wrapper
|
||||||
def test_vector_store_register(client_with_empty_registry, embedding_model_id, embedding_dimension, vector_io_provider_id):
|
def test_vector_store_register(
|
||||||
|
client_with_empty_registry, embedding_model_id, embedding_dimension, vector_io_provider_id
|
||||||
|
):
|
||||||
vector_store_name = "test_vector_store"
|
vector_store_name = "test_vector_store"
|
||||||
response = client_with_empty_registry.vector_stores.create(
|
response = client_with_empty_registry.vector_stores.create(
|
||||||
name=vector_store_name,
|
name=vector_store_name,
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,8 @@ from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
|
from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
|
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
|
||||||
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
||||||
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
|
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,8 @@ import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.providers.datatypes import HealthStatus
|
from llama_stack.providers.datatypes import HealthStatus
|
||||||
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
|
||||||
from llama_stack.providers.inline.vector_io.faiss.faiss import (
|
from llama_stack.providers.inline.vector_io.faiss.faiss import (
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,6 @@ import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.vector_stores import VectorStore
|
|
||||||
from llama_stack.apis.vector_io import (
|
from llama_stack.apis.vector_io import (
|
||||||
Chunk,
|
Chunk,
|
||||||
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
|
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
|
||||||
|
|
@ -21,6 +20,7 @@ from llama_stack.apis.vector_io import (
|
||||||
VectorStoreChunkingStrategyAuto,
|
VectorStoreChunkingStrategyAuto,
|
||||||
VectorStoreFileObject,
|
VectorStoreFileObject,
|
||||||
)
|
)
|
||||||
|
from llama_stack.apis.vector_stores import VectorStore
|
||||||
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
|
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
|
||||||
|
|
||||||
# This test is a unit test for the inline VectorIO providers. This should only contain
|
# This test is a unit test for the inline VectorIO providers. This should only contain
|
||||||
|
|
|
||||||
|
|
@ -134,7 +134,9 @@ async def test_duplicate_provider_registration(cached_disk_dist_registry):
|
||||||
provider_resource_id="test_vector_store_2",
|
provider_resource_id="test_vector_store_2",
|
||||||
provider_id="baz", # Same provider_id
|
provider_id="baz", # Same provider_id
|
||||||
)
|
)
|
||||||
with pytest.raises(ValueError, match="Object of type 'vector_store' and identifier 'test_vector_store_2' already exists"):
|
with pytest.raises(
|
||||||
|
ValueError, match="Object of type 'vector_store' and identifier 'test_vector_store_2' already exists"
|
||||||
|
):
|
||||||
await cached_disk_dist_registry.register(duplicate_vector_store)
|
await cached_disk_dist_registry.register(duplicate_vector_store)
|
||||||
|
|
||||||
result = await cached_disk_dist_registry.get("vector_store", "test_vector_store_2")
|
result = await cached_disk_dist_registry.get("vector_store", "test_vector_store_2")
|
||||||
|
|
@ -289,7 +291,9 @@ async def test_double_registration_different_objects(disk_dist_registry):
|
||||||
assert result1 is True
|
assert result1 is True
|
||||||
|
|
||||||
# Second registration with different data should fail
|
# Second registration with different data should fail
|
||||||
with pytest.raises(ValueError, match="Object of type 'vector_store' and identifier 'test_vector_store' already exists"):
|
with pytest.raises(
|
||||||
|
ValueError, match="Object of type 'vector_store' and identifier 'test_vector_store' already exists"
|
||||||
|
):
|
||||||
await disk_dist_registry.register(vector_store2)
|
await disk_dist_registry.register(vector_store2)
|
||||||
|
|
||||||
# Verify original object is unchanged
|
# Verify original object is unchanged
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue