mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 13:00:39 +00:00
chore: standardize vector store not found error (#2968)
# What does this PR do? 1. Creates a new `VectorStoreNotFoundError` class 2. Implements the new class where appropriate Relates to #2379 Signed-off-by: Nathan Weinberg <nweinber@redhat.com>
This commit is contained in:
parent
272a3e9937
commit
cd5c6a2fcd
9 changed files with 46 additions and 31 deletions
|
@ -26,6 +26,14 @@ class ModelNotFoundError(ValueError):
|
||||||
super().__init__(message)
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class VectorStoreNotFoundError(ValueError):
|
||||||
|
"""raised when Llama Stack cannot find a referenced vector store"""
|
||||||
|
|
||||||
|
def __init__(self, vector_store_name: str) -> None:
|
||||||
|
message = f"Vector store '{vector_store_name}' not found. Use client.vector_dbs.list() to list available vector stores."
|
||||||
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
class DatasetNotFoundError(ValueError):
|
class DatasetNotFoundError(ValueError):
|
||||||
"""raised when Llama Stack cannot find a referenced dataset"""
|
"""raised when Llama Stack cannot find a referenced dataset"""
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ from typing import Any
|
||||||
|
|
||||||
from pydantic import TypeAdapter
|
from pydantic import TypeAdapter
|
||||||
|
|
||||||
from llama_stack.apis.common.errors import ModelNotFoundError
|
from llama_stack.apis.common.errors import ModelNotFoundError, VectorStoreNotFoundError
|
||||||
from llama_stack.apis.models import ModelType
|
from llama_stack.apis.models import ModelType
|
||||||
from llama_stack.apis.resource import ResourceType
|
from llama_stack.apis.resource import ResourceType
|
||||||
from llama_stack.apis.vector_dbs import ListVectorDBsResponse, VectorDB, VectorDBs
|
from llama_stack.apis.vector_dbs import ListVectorDBsResponse, VectorDB, VectorDBs
|
||||||
|
@ -40,7 +40,7 @@ class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
|
||||||
async def get_vector_db(self, vector_db_id: str) -> VectorDB:
|
async def get_vector_db(self, vector_db_id: str) -> VectorDB:
|
||||||
vector_db = await self.get_object_by_identifier("vector_db", vector_db_id)
|
vector_db = await self.get_object_by_identifier("vector_db", vector_db_id)
|
||||||
if vector_db is None:
|
if vector_db is None:
|
||||||
raise ValueError(f"Vector DB '{vector_db_id}' not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
return vector_db
|
return vector_db
|
||||||
|
|
||||||
async def register_vector_db(
|
async def register_vector_db(
|
||||||
|
@ -85,7 +85,7 @@ class VectorDBsRoutingTable(CommonRoutingTableImpl, VectorDBs):
|
||||||
async def unregister_vector_db(self, vector_db_id: str) -> None:
|
async def unregister_vector_db(self, vector_db_id: str) -> None:
|
||||||
existing_vector_db = await self.get_vector_db(vector_db_id)
|
existing_vector_db = await self.get_vector_db(vector_db_id)
|
||||||
if existing_vector_db is None:
|
if existing_vector_db is None:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
await self.unregister_object(existing_vector_db)
|
await self.unregister_object(existing_vector_db)
|
||||||
|
|
||||||
async def openai_retrieve_vector_store(
|
async def openai_retrieve_vector_store(
|
||||||
|
|
|
@ -15,6 +15,7 @@ import faiss
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from numpy.typing import NDArray
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
|
@ -285,7 +286,7 @@ class FaissVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolPr
|
||||||
) -> QueryChunksResponse:
|
) -> QueryChunksResponse:
|
||||||
index = self.cache.get(vector_db_id)
|
index = self.cache.get(vector_db_id)
|
||||||
if index is None:
|
if index is None:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
return await index.query_chunks(query, params)
|
return await index.query_chunks(query, params)
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ import numpy as np
|
||||||
import sqlite_vec
|
import sqlite_vec
|
||||||
from numpy.typing import NDArray
|
from numpy.typing import NDArray
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import Inference
|
from llama_stack.apis.inference import Inference
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
|
@ -508,11 +509,11 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoc
|
||||||
return self.cache[vector_db_id]
|
return self.cache[vector_db_id]
|
||||||
|
|
||||||
if self.vector_db_store is None:
|
if self.vector_db_store is None:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
vector_db = self.vector_db_store.get_vector_db(vector_db_id)
|
vector_db = self.vector_db_store.get_vector_db(vector_db_id)
|
||||||
if not vector_db:
|
if not vector_db:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
index = VectorDBWithIndex(
|
index = VectorDBWithIndex(
|
||||||
vector_db=vector_db,
|
vector_db=vector_db,
|
||||||
|
@ -537,7 +538,7 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoc
|
||||||
async def insert_chunks(self, vector_db_id: str, chunks: list[Chunk], ttl_seconds: int | None = None) -> None:
|
async def insert_chunks(self, vector_db_id: str, chunks: list[Chunk], ttl_seconds: int | None = None) -> None:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
# The VectorDBWithIndex helper is expected to compute embeddings via the inference_api
|
# The VectorDBWithIndex helper is expected to compute embeddings via the inference_api
|
||||||
# and then call our index's add_chunks.
|
# and then call our index's add_chunks.
|
||||||
await index.insert_chunks(chunks)
|
await index.insert_chunks(chunks)
|
||||||
|
@ -547,14 +548,14 @@ class SQLiteVecVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoc
|
||||||
) -> QueryChunksResponse:
|
) -> QueryChunksResponse:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
return await index.query_chunks(query, params)
|
return await index.query_chunks(query, params)
|
||||||
|
|
||||||
async def delete_chunks(self, store_id: str, chunk_ids: list[str]) -> None:
|
async def delete_chunks(self, store_id: str, chunk_ids: list[str]) -> None:
|
||||||
"""Delete a chunk from a sqlite_vec index."""
|
"""Delete a chunk from a sqlite_vec index."""
|
||||||
index = await self._get_and_cache_vector_db_index(store_id)
|
index = await self._get_and_cache_vector_db_index(store_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {store_id} not found")
|
raise VectorStoreNotFoundError(store_id)
|
||||||
|
|
||||||
for chunk_id in chunk_ids:
|
for chunk_id in chunk_ids:
|
||||||
# Use the index's delete_chunk method
|
# Use the index's delete_chunk method
|
||||||
|
|
|
@ -13,6 +13,7 @@ from typing import Any
|
||||||
from numpy.typing import NDArray
|
from numpy.typing import NDArray
|
||||||
from pymilvus import DataType, Function, FunctionType, MilvusClient
|
from pymilvus import DataType, Function, FunctionType, MilvusClient
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files.files import Files
|
from llama_stack.apis.files.files import Files
|
||||||
from llama_stack.apis.inference import Inference, InterleavedContent
|
from llama_stack.apis.inference import Inference, InterleavedContent
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
|
@ -329,11 +330,11 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
return self.cache[vector_db_id]
|
return self.cache[vector_db_id]
|
||||||
|
|
||||||
if self.vector_db_store is None:
|
if self.vector_db_store is None:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
||||||
if not vector_db:
|
if not vector_db:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
index = VectorDBWithIndex(
|
index = VectorDBWithIndex(
|
||||||
vector_db=vector_db,
|
vector_db=vector_db,
|
||||||
|
@ -356,7 +357,7 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
) -> None:
|
) -> None:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
await index.insert_chunks(chunks)
|
await index.insert_chunks(chunks)
|
||||||
|
|
||||||
|
@ -368,7 +369,7 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
) -> QueryChunksResponse:
|
) -> QueryChunksResponse:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
if params and params.get("mode") == "keyword":
|
if params and params.get("mode") == "keyword":
|
||||||
# Check if this is inline Milvus (Milvus-Lite)
|
# Check if this is inline Milvus (Milvus-Lite)
|
||||||
|
@ -384,7 +385,7 @@ class MilvusVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtocolP
|
||||||
"""Delete a chunk from a milvus vector store."""
|
"""Delete a chunk from a milvus vector store."""
|
||||||
index = await self._get_and_cache_vector_db_index(store_id)
|
index = await self._get_and_cache_vector_db_index(store_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {store_id} not found")
|
raise VectorStoreNotFoundError(store_id)
|
||||||
|
|
||||||
for chunk_id in chunk_ids:
|
for chunk_id in chunk_ids:
|
||||||
# Use the index's delete_chunk method
|
# Use the index's delete_chunk method
|
||||||
|
|
|
@ -13,6 +13,7 @@ from psycopg2 import sql
|
||||||
from psycopg2.extras import Json, execute_values
|
from psycopg2.extras import Json, execute_values
|
||||||
from pydantic import BaseModel, TypeAdapter
|
from pydantic import BaseModel, TypeAdapter
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files.files import Files
|
from llama_stack.apis.files.files import Files
|
||||||
from llama_stack.apis.inference import InterleavedContent
|
from llama_stack.apis.inference import InterleavedContent
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
|
@ -275,7 +276,7 @@ class PGVectorVectorIOAdapter(OpenAIVectorStoreMixin, VectorIO, VectorDBsProtoco
|
||||||
"""Delete a chunk from a PostgreSQL vector store."""
|
"""Delete a chunk from a PostgreSQL vector store."""
|
||||||
index = await self._get_and_cache_vector_db_index(store_id)
|
index = await self._get_and_cache_vector_db_index(store_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {store_id} not found")
|
raise VectorStoreNotFoundError(store_id)
|
||||||
|
|
||||||
for chunk_id in chunk_ids:
|
for chunk_id in chunk_ids:
|
||||||
# Use the index's delete_chunk method
|
# Use the index's delete_chunk method
|
||||||
|
|
|
@ -12,6 +12,7 @@ from numpy.typing import NDArray
|
||||||
from qdrant_client import AsyncQdrantClient, models
|
from qdrant_client import AsyncQdrantClient, models
|
||||||
from qdrant_client.models import PointStruct
|
from qdrant_client.models import PointStruct
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.inference import InterleavedContent
|
from llama_stack.apis.inference import InterleavedContent
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
from llama_stack.apis.vector_io import (
|
from llama_stack.apis.vector_io import (
|
||||||
|
@ -173,7 +174,7 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
|
|
||||||
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
||||||
if not vector_db:
|
if not vector_db:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
index = VectorDBWithIndex(
|
index = VectorDBWithIndex(
|
||||||
vector_db=vector_db,
|
vector_db=vector_db,
|
||||||
|
@ -191,7 +192,7 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
) -> None:
|
) -> None:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
await index.insert_chunks(chunks)
|
await index.insert_chunks(chunks)
|
||||||
|
|
||||||
|
@ -203,7 +204,7 @@ class QdrantVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||||
) -> QueryChunksResponse:
|
) -> QueryChunksResponse:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
return await index.query_chunks(query, params)
|
return await index.query_chunks(query, params)
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@ from weaviate.classes.init import Auth
|
||||||
from weaviate.classes.query import Filter
|
from weaviate.classes.query import Filter
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import InterleavedContent
|
from llama_stack.apis.common.content_types import InterleavedContent
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files.files import Files
|
from llama_stack.apis.files.files import Files
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||||
|
@ -212,7 +213,7 @@ class WeaviateVectorIOAdapter(
|
||||||
|
|
||||||
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
vector_db = await self.vector_db_store.get_vector_db(vector_db_id)
|
||||||
if not vector_db:
|
if not vector_db:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
client = self._get_client()
|
client = self._get_client()
|
||||||
if not client.collections.exists(vector_db.identifier):
|
if not client.collections.exists(vector_db.identifier):
|
||||||
|
@ -234,7 +235,7 @@ class WeaviateVectorIOAdapter(
|
||||||
) -> None:
|
) -> None:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
await index.insert_chunks(chunks)
|
await index.insert_chunks(chunks)
|
||||||
|
|
||||||
|
@ -246,7 +247,7 @@ class WeaviateVectorIOAdapter(
|
||||||
) -> QueryChunksResponse:
|
) -> QueryChunksResponse:
|
||||||
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
index = await self._get_and_cache_vector_db_index(vector_db_id)
|
||||||
if not index:
|
if not index:
|
||||||
raise ValueError(f"Vector DB {vector_db_id} not found")
|
raise VectorStoreNotFoundError(vector_db_id)
|
||||||
|
|
||||||
return await index.query_chunks(query, params)
|
return await index.query_chunks(query, params)
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ import uuid
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
|
||||||
|
from llama_stack.apis.common.errors import VectorStoreNotFoundError
|
||||||
from llama_stack.apis.files import Files, OpenAIFileObject
|
from llama_stack.apis.files import Files, OpenAIFileObject
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
from llama_stack.apis.vector_io import (
|
from llama_stack.apis.vector_io import (
|
||||||
|
@ -322,7 +323,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreObject:
|
) -> VectorStoreObject:
|
||||||
"""Retrieves a vector store."""
|
"""Retrieves a vector store."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
store_info = self.openai_vector_stores[vector_store_id]
|
store_info = self.openai_vector_stores[vector_store_id]
|
||||||
return VectorStoreObject(**store_info)
|
return VectorStoreObject(**store_info)
|
||||||
|
@ -336,7 +337,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreObject:
|
) -> VectorStoreObject:
|
||||||
"""Modifies a vector store."""
|
"""Modifies a vector store."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
store_info = self.openai_vector_stores[vector_store_id].copy()
|
store_info = self.openai_vector_stores[vector_store_id].copy()
|
||||||
|
|
||||||
|
@ -365,7 +366,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreDeleteResponse:
|
) -> VectorStoreDeleteResponse:
|
||||||
"""Delete a vector store."""
|
"""Delete a vector store."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
# Delete from persistent storage (provider-specific)
|
# Delete from persistent storage (provider-specific)
|
||||||
await self._delete_openai_vector_store_from_storage(vector_store_id)
|
await self._delete_openai_vector_store_from_storage(vector_store_id)
|
||||||
|
@ -403,7 +404,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
raise ValueError(f"search_mode must be one of {valid_modes}, got {search_mode}")
|
raise ValueError(f"search_mode must be one of {valid_modes}, got {search_mode}")
|
||||||
|
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
if isinstance(query, list):
|
if isinstance(query, list):
|
||||||
search_query = " ".join(query)
|
search_query = " ".join(query)
|
||||||
|
@ -556,7 +557,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
chunking_strategy: VectorStoreChunkingStrategy | None = None,
|
chunking_strategy: VectorStoreChunkingStrategy | None = None,
|
||||||
) -> VectorStoreFileObject:
|
) -> VectorStoreFileObject:
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
attributes = attributes or {}
|
attributes = attributes or {}
|
||||||
chunking_strategy = chunking_strategy or VectorStoreChunkingStrategyAuto()
|
chunking_strategy = chunking_strategy or VectorStoreChunkingStrategyAuto()
|
||||||
|
@ -661,7 +662,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
order = order or "desc"
|
order = order or "desc"
|
||||||
|
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
store_info = self.openai_vector_stores[vector_store_id]
|
store_info = self.openai_vector_stores[vector_store_id]
|
||||||
|
|
||||||
|
@ -709,7 +710,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreFileObject:
|
) -> VectorStoreFileObject:
|
||||||
"""Retrieves a vector store file."""
|
"""Retrieves a vector store file."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
store_info = self.openai_vector_stores[vector_store_id]
|
store_info = self.openai_vector_stores[vector_store_id]
|
||||||
if file_id not in store_info["file_ids"]:
|
if file_id not in store_info["file_ids"]:
|
||||||
|
@ -725,7 +726,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreFileContentsResponse:
|
) -> VectorStoreFileContentsResponse:
|
||||||
"""Retrieves the contents of a vector store file."""
|
"""Retrieves the contents of a vector store file."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
file_info = await self._load_openai_vector_store_file(vector_store_id, file_id)
|
file_info = await self._load_openai_vector_store_file(vector_store_id, file_id)
|
||||||
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
||||||
|
@ -748,7 +749,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreFileObject:
|
) -> VectorStoreFileObject:
|
||||||
"""Updates a vector store file."""
|
"""Updates a vector store file."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
store_info = self.openai_vector_stores[vector_store_id]
|
store_info = self.openai_vector_stores[vector_store_id]
|
||||||
if file_id not in store_info["file_ids"]:
|
if file_id not in store_info["file_ids"]:
|
||||||
|
@ -766,7 +767,7 @@ class OpenAIVectorStoreMixin(ABC):
|
||||||
) -> VectorStoreFileDeleteResponse:
|
) -> VectorStoreFileDeleteResponse:
|
||||||
"""Deletes a vector store file."""
|
"""Deletes a vector store file."""
|
||||||
if vector_store_id not in self.openai_vector_stores:
|
if vector_store_id not in self.openai_vector_stores:
|
||||||
raise ValueError(f"Vector store {vector_store_id} not found")
|
raise VectorStoreNotFoundError(vector_store_id)
|
||||||
|
|
||||||
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
dict_chunks = await self._load_openai_vector_store_file_contents(vector_store_id, file_id)
|
||||||
chunks = [Chunk.model_validate(c) for c in dict_chunks]
|
chunks = [Chunk.model_validate(c) for c in dict_chunks]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue