diff --git a/src/llama_stack/providers/remote/vector_io/elasticsearch/elasticsearch.py b/src/llama_stack/providers/remote/vector_io/elasticsearch/elasticsearch.py index 85042413a..aa3a986dc 100644 --- a/src/llama_stack/providers/remote/vector_io/elasticsearch/elasticsearch.py +++ b/src/llama_stack/providers/remote/vector_io/elasticsearch/elasticsearch.py @@ -10,20 +10,21 @@ from elasticsearch import AsyncElasticsearch from elasticsearch.helpers import async_bulk from numpy.typing import NDArray -from llama_stack.apis.common.errors import VectorStoreNotFoundError -from llama_stack.apis.files import Files -from llama_stack.apis.inference import Inference, InterleavedContent -from llama_stack.apis.vector_io import ( - Chunk, - QueryChunksResponse, - VectorIO, -) -from llama_stack.apis.vector_stores import VectorStore +from llama_stack.core.storage.kvstore import kvstore_impl from llama_stack.log import get_logger -from llama_stack.providers.datatypes import VectorStoresProtocolPrivate -from llama_stack.providers.utils.kvstore import kvstore_impl from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex +from llama_stack_api import ( + Chunk, + Files, + Inference, + InterleavedContent, + QueryChunksResponse, + VectorIO, + VectorStore, + VectorStoreNotFoundError, + VectorStoresProtocolPrivate, +) from .config import ElasticsearchVectorIOConfig