refactor(storage): remove core storage api shims and fix imports

This commit is contained in:
Ashwin Bharambe 2025-11-18 11:04:57 -08:00
parent 90f07dad9e
commit 7e78d5220a
43 changed files with 148 additions and 107 deletions

View file

@ -11,9 +11,9 @@ from typing import Any, Literal
from pydantic import BaseModel, TypeAdapter
from llama_stack.core.datatypes import AccessRule, StackRunConfig
from llama_stack.log import get_logger
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.log import get_logger
from llama_stack_api import (
Conversation,
ConversationDeletedResource,

View file

@ -11,8 +11,8 @@ from datetime import UTC, datetime, timedelta
from starlette.types import ASGIApp, Receive, Scope, Send
from llama_stack.core.storage.datatypes import KVStoreReference, StorageBackendType
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore.kvstore import _KVSTORE_BACKENDS, kvstore_impl
from llama_stack.log import get_logger
from llama_stack_api.internal.kvstore import KVStore
logger = get_logger(name=__name__, category="core::server")

View file

@ -4,4 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.internal.kvstore import KVStore as KVStore
from .kvstore import * # noqa: F401, F403

View file

@ -1,9 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.internal.kvstore import KVStore
__all__ = ["KVStore"]

View file

@ -13,11 +13,19 @@ from __future__ import annotations
import asyncio
from collections import defaultdict
from datetime import datetime
from typing import cast
from llama_stack.core.storage.datatypes import KVStoreReference, StorageBackendConfig, StorageBackendType
from llama_stack.core.storage.datatypes import KVStoreReference, StorageBackendConfig
from llama_stack_api.internal.kvstore import KVStore
from .api import KVStore
from .config import KVStoreConfig
from .config import (
KVStoreConfig,
MongoDBKVStoreConfig,
PostgresKVStoreConfig,
RedisKVStoreConfig,
SqliteKVStoreConfig,
)
def kvstore_dependencies():
@ -33,7 +41,7 @@ def kvstore_dependencies():
class InmemoryKVStoreImpl(KVStore):
def __init__(self):
self._store = {}
self._store: dict[str, str] = {}
async def initialize(self) -> None:
pass
@ -41,7 +49,7 @@ class InmemoryKVStoreImpl(KVStore):
async def get(self, key: str) -> str | None:
return self._store.get(key)
async def set(self, key: str, value: str) -> None:
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
self._store[key] = value
async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
@ -70,7 +78,8 @@ def register_kvstore_backends(backends: dict[str, StorageBackendConfig]) -> None
_KVSTORE_INSTANCES.clear()
_KVSTORE_LOCKS.clear()
for name, cfg in backends.items():
_KVSTORE_BACKENDS[name] = cfg
typed_cfg = cast(KVStoreConfig, cfg)
_KVSTORE_BACKENDS[name] = typed_cfg
async def kvstore_impl(reference: KVStoreReference) -> KVStore:
@ -94,19 +103,20 @@ async def kvstore_impl(reference: KVStoreReference) -> KVStore:
config = backend_config.model_copy()
config.namespace = reference.namespace
if config.type == StorageBackendType.KV_REDIS.value:
impl: KVStore
if isinstance(config, RedisKVStoreConfig):
from .redis import RedisKVStoreImpl
impl = RedisKVStoreImpl(config)
elif config.type == StorageBackendType.KV_SQLITE.value:
elif isinstance(config, SqliteKVStoreConfig):
from .sqlite import SqliteKVStoreImpl
impl = SqliteKVStoreImpl(config)
elif config.type == StorageBackendType.KV_POSTGRES.value:
elif isinstance(config, PostgresKVStoreConfig):
from .postgres import PostgresKVStoreImpl
impl = PostgresKVStoreImpl(config)
elif config.type == StorageBackendType.KV_MONGODB.value:
elif isinstance(config, MongoDBKVStoreConfig):
from .mongodb import MongoDBKVStoreImpl
impl = MongoDBKVStoreImpl(config)

View file

@ -9,8 +9,8 @@ from datetime import datetime
from pymongo import AsyncMongoClient
from pymongo.asynchronous.collection import AsyncCollection
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import KVStore
from llama_stack.log import get_logger
from ..config import MongoDBKVStoreConfig

View file

@ -7,11 +7,12 @@
from datetime import datetime
import psycopg2
from psycopg2.extensions import connection as PGConnection
from psycopg2.extras import DictCursor
from llama_stack.log import get_logger
from llama_stack_api.internal.kvstore import KVStore
from ..api import KVStore
from ..config import PostgresKVStoreConfig
log = get_logger(name=__name__, category="providers::utils")
@ -20,12 +21,12 @@ log = get_logger(name=__name__, category="providers::utils")
class PostgresKVStoreImpl(KVStore):
def __init__(self, config: PostgresKVStoreConfig):
self.config = config
self.conn = None
self.cursor = None
self._conn: PGConnection | None = None
self._cursor: DictCursor | None = None
async def initialize(self) -> None:
try:
self.conn = psycopg2.connect(
self._conn = psycopg2.connect(
host=self.config.host,
port=self.config.port,
database=self.config.db,
@ -34,11 +35,11 @@ class PostgresKVStoreImpl(KVStore):
sslmode=self.config.ssl_mode,
sslrootcert=self.config.ca_cert_path,
)
self.conn.autocommit = True
self.cursor = self.conn.cursor(cursor_factory=DictCursor)
self._conn.autocommit = True
self._cursor = self._conn.cursor(cursor_factory=DictCursor)
# Create table if it doesn't exist
self.cursor.execute(
self._cursor.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.config.table_name} (
key TEXT PRIMARY KEY,
@ -51,6 +52,11 @@ class PostgresKVStoreImpl(KVStore):
log.exception("Could not connect to PostgreSQL database server")
raise RuntimeError("Could not connect to PostgreSQL database server") from e
def _cursor_or_raise(self) -> DictCursor:
if self._cursor is None:
raise RuntimeError("Postgres client not initialized")
return self._cursor
def _namespaced_key(self, key: str) -> str:
if not self.config.namespace:
return key
@ -58,7 +64,8 @@ class PostgresKVStoreImpl(KVStore):
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
key = self._namespaced_key(key)
self.cursor.execute(
cursor = self._cursor_or_raise()
cursor.execute(
f"""
INSERT INTO {self.config.table_name} (key, value, expiration)
VALUES (%s, %s, %s)
@ -70,7 +77,8 @@ class PostgresKVStoreImpl(KVStore):
async def get(self, key: str) -> str | None:
key = self._namespaced_key(key)
self.cursor.execute(
cursor = self._cursor_or_raise()
cursor.execute(
f"""
SELECT value FROM {self.config.table_name}
WHERE key = %s
@ -78,12 +86,13 @@ class PostgresKVStoreImpl(KVStore):
""",
(key,),
)
result = self.cursor.fetchone()
result = cursor.fetchone()
return result[0] if result else None
async def delete(self, key: str) -> None:
key = self._namespaced_key(key)
self.cursor.execute(
cursor = self._cursor_or_raise()
cursor.execute(
f"DELETE FROM {self.config.table_name} WHERE key = %s",
(key,),
)
@ -92,7 +101,8 @@ class PostgresKVStoreImpl(KVStore):
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
self.cursor.execute(
cursor = self._cursor_or_raise()
cursor.execute(
f"""
SELECT value FROM {self.config.table_name}
WHERE key >= %s AND key < %s
@ -101,14 +111,15 @@ class PostgresKVStoreImpl(KVStore):
""",
(start_key, end_key),
)
return [row[0] for row in self.cursor.fetchall()]
return [row[0] for row in cursor.fetchall()]
async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
self.cursor.execute(
cursor = self._cursor_or_raise()
cursor.execute(
f"SELECT key FROM {self.config.table_name} WHERE key >= %s AND key < %s",
(start_key, end_key),
)
return [row[0] for row in self.cursor.fetchall()]
return [row[0] for row in cursor.fetchall()]

View file

@ -8,16 +8,23 @@ from datetime import datetime
from redis.asyncio import Redis
from ..api import KVStore
from llama_stack_api.internal.kvstore import KVStore
from ..config import RedisKVStoreConfig
class RedisKVStoreImpl(KVStore):
def __init__(self, config: RedisKVStoreConfig):
self.config = config
self._redis: Redis | None = None
async def initialize(self) -> None:
self.redis = Redis.from_url(self.config.url)
self._redis = Redis.from_url(self.config.url)
def _client(self) -> Redis:
if self._redis is None:
raise RuntimeError("Redis client not initialized")
return self._redis
def _namespaced_key(self, key: str) -> str:
if not self.config.namespace:
@ -26,30 +33,37 @@ class RedisKVStoreImpl(KVStore):
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None:
key = self._namespaced_key(key)
await self.redis.set(key, value)
client = self._client()
await client.set(key, value)
if expiration:
await self.redis.expireat(key, expiration)
await client.expireat(key, expiration)
async def get(self, key: str) -> str | None:
key = self._namespaced_key(key)
value = await self.redis.get(key)
client = self._client()
value = await client.get(key)
if value is None:
return None
await self.redis.ttl(key)
await client.ttl(key)
if isinstance(value, bytes):
return value.decode("utf-8")
if isinstance(value, str):
return value
return str(value)
async def delete(self, key: str) -> None:
key = self._namespaced_key(key)
await self.redis.delete(key)
await self._client().delete(key)
async def values_in_range(self, start_key: str, end_key: str) -> list[str]:
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
client = self._client()
cursor = 0
pattern = start_key + "*" # Match all keys starting with start_key prefix
matching_keys = []
matching_keys: list[str | bytes] = []
while True:
cursor, keys = await self.redis.scan(cursor, match=pattern, count=1000)
cursor, keys = await client.scan(cursor, match=pattern, count=1000)
for key in keys:
key_str = key.decode("utf-8") if isinstance(key, bytes) else key
@ -61,7 +75,7 @@ class RedisKVStoreImpl(KVStore):
# Then fetch all values in a single MGET call
if matching_keys:
values = await self.redis.mget(matching_keys)
values = await client.mget(matching_keys)
return [
value.decode("utf-8") if isinstance(value, bytes) else value for value in values if value is not None
]
@ -70,7 +84,18 @@ class RedisKVStoreImpl(KVStore):
async def keys_in_range(self, start_key: str, end_key: str) -> list[str]:
"""Get all keys in the given range."""
matching_keys = await self.redis.zrangebylex(self.namespace, f"[{start_key}", f"[{end_key}")
if not matching_keys:
return []
return [k.decode("utf-8") for k in matching_keys]
start_key = self._namespaced_key(start_key)
end_key = self._namespaced_key(end_key)
client = self._client()
cursor = 0
pattern = start_key + "*"
result: list[str] = []
while True:
cursor, keys = await client.scan(cursor, match=pattern, count=1000)
for key in keys:
key_str = key.decode("utf-8") if isinstance(key, bytes) else str(key)
if start_key <= key_str <= end_key:
result.append(key_str)
if cursor == 0:
break
return result

View file

@ -10,8 +10,8 @@ from datetime import datetime
import aiosqlite
from llama_stack.log import get_logger
from llama_stack_api.internal.kvstore import KVStore
from ..api import KVStore
from ..config import SqliteKVStoreConfig
logger = get_logger(name=__name__, category="providers::utils")

View file

@ -4,4 +4,14 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.internal.sqlstore import (
ColumnDefinition as ColumnDefinition,
)
from llama_stack_api.internal.sqlstore import (
ColumnType as ColumnType,
)
from llama_stack_api.internal.sqlstore import (
SqlStore as SqlStore,
)
from .sqlstore import * # noqa: F401,F403

View file

@ -1,10 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api import PaginatedResponse
from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType, SqlStore
__all__ = ["ColumnDefinition", "ColumnType", "SqlStore", "PaginatedResponse"]

View file

@ -14,8 +14,8 @@ from llama_stack.core.datatypes import User
from llama_stack.core.request_headers import get_authenticated_user
from llama_stack.core.storage.datatypes import StorageBackendType
from llama_stack.log import get_logger
from .api import ColumnDefinition, ColumnType, PaginatedResponse, SqlStore
from llama_stack_api import PaginatedResponse
from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType, SqlStore
logger = get_logger(name=__name__, category="providers::utils")

View file

@ -29,8 +29,7 @@ from sqlalchemy.sql.elements import ColumnElement
from llama_stack.core.storage.datatypes import SqlAlchemySqlStoreConfig
from llama_stack.log import get_logger
from llama_stack_api import PaginatedResponse
from .api import ColumnDefinition, ColumnType, SqlStore
from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType, SqlStore
logger = get_logger(name=__name__, category="providers::utils")

View file

@ -16,8 +16,7 @@ from llama_stack.core.storage.datatypes import (
StorageBackendConfig,
StorageBackendType,
)
from .api import SqlStore
from llama_stack_api.internal.sqlstore import SqlStore
sql_store_pip_packages = ["sqlalchemy[asyncio]", "aiosqlite", "asyncpg"]

View file

@ -12,8 +12,8 @@ import pydantic
from llama_stack.core.datatypes import RoutableObjectWithProvider
from llama_stack.core.storage.datatypes import KVStoreReference
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import KVStore, kvstore_impl
from llama_stack.log import get_logger
logger = get_logger(__name__, category="core::registry")

View file

@ -17,6 +17,8 @@ from llama_stack.core.datatypes import (
ToolGroupInput,
VectorStoresConfig,
)
from llama_stack.core.storage.kvstore.config import PostgresKVStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import PostgresSqlStoreConfig
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.distributions.template import DistributionTemplate, RunConfigSettings
from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig
@ -35,8 +37,6 @@ from llama_stack.providers.remote.vector_io.pgvector.config import (
)
from llama_stack.providers.remote.vector_io.qdrant.config import QdrantVectorIOConfig
from llama_stack.providers.remote.vector_io.weaviate.config import WeaviateVectorIOConfig
from llama_stack.core.storage.kvstore.config import PostgresKVStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import PostgresSqlStoreConfig
from llama_stack_api import RemoteProviderSpec

View file

@ -35,13 +35,13 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageBackendType,
)
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.core.utils.image_types import LlamaStackImageType
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
from llama_stack.core.storage.kvstore.config import get_pip_packages as get_kv_pip_packages
from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import get_pip_packages as get_sql_pip_packages
from llama_stack.core.utils.dynamic import instantiate_class_type
from llama_stack.core.utils.image_types import LlamaStackImageType
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
from llama_stack_api import DatasetPurpose, ModelType

View file

@ -6,8 +6,8 @@
from llama_stack.core.datatypes import AccessRule
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import InmemoryKVStoreImpl, kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from llama_stack_api import (
Agents,

View file

@ -16,8 +16,8 @@ from typing import Any, Literal
from openai.types.batch import BatchError, Errors
from pydantic import BaseModel
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import KVStore
from llama_stack.log import get_logger
from llama_stack_api import (
Batches,
BatchObject,

View file

@ -5,8 +5,8 @@
# the root directory of this source tree.
from typing import Any
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
from llama_stack.providers.utils.pagination import paginate_records
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse

View file

@ -8,8 +8,8 @@ from typing import Any
from tqdm import tqdm
from llama_stack.providers.utils.common.data_schema_validator import ColumnName
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.common.data_schema_validator import ColumnName
from llama_stack_api import (
Agents,
Benchmark,

View file

@ -13,10 +13,10 @@ from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.id_generation import generate_object_id
from llama_stack.log import get_logger
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack_api import (
ExpiresAfter,
Files,

View file

@ -14,8 +14,8 @@ import faiss # type: ignore[import-untyped]
import numpy as np
from numpy.typing import NDArray
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (

View file

@ -14,8 +14,8 @@ import numpy as np
import sqlite_vec # type: ignore[import-untyped]
from numpy.typing import NDArray
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import (
RERANKER_TYPE_RRF,

View file

@ -10,9 +10,9 @@ from typing import Annotated, Any
from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack.core.datatypes import AccessRule
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack_api import (
ExpiresAfter,
Files,

View file

@ -19,9 +19,9 @@ if TYPE_CHECKING:
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.id_generation import generate_object_id
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.providers.utils.files.form_data import parse_expires_after
from llama_stack_api import (
ExpiresAfter,
Files,

View file

@ -11,9 +11,9 @@ from urllib.parse import urlparse
import chromadb
from numpy.typing import NDArray
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.chroma import ChromaVectorIOConfig as InlineChromaVectorIOConfig
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (

View file

@ -11,9 +11,9 @@ from typing import Any
from numpy.typing import NDArray
from pymilvus import AnnSearchRequest, DataType, Function, FunctionType, MilvusClient, RRFRanker, WeightedRanker
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.milvus import MilvusVectorIOConfig as InlineMilvusVectorIOConfig
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import (
RERANKER_TYPE_WEIGHTED,

View file

@ -13,9 +13,9 @@ from psycopg2 import sql
from psycopg2.extras import Json, execute_values
from pydantic import BaseModel, TypeAdapter
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack.providers.utils.vector_io.vector_utils import WeightedInMemoryAggregator, sanitize_collection_name

View file

@ -13,9 +13,9 @@ from numpy.typing import NDArray
from qdrant_client import AsyncQdrantClient, models
from qdrant_client.models import PointStruct
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.inline.vector_io.qdrant import QdrantVectorIOConfig as InlineQdrantVectorIOConfig
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import ChunkForDeletion, EmbeddingIndex, VectorStoreWithIndex
from llama_stack_api import (

View file

@ -13,8 +13,8 @@ from weaviate.classes.init import Auth
from weaviate.classes.query import Filter, HybridFusion
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger
from llama_stack.core.storage.kvstore import kvstore_impl
from llama_stack.log import get_logger
from llama_stack.providers.utils.memory.openai_vector_store_mixin import OpenAIVectorStoreMixin
from llama_stack.providers.utils.memory.vector_store import (
RERANKER_TYPE_RRF,

View file

@ -10,6 +10,8 @@ from sqlalchemy.exc import IntegrityError
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.storage.datatypes import InferenceStoreReference, StorageBackendType
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import _SQLSTORE_BACKENDS, sqlstore_impl
from llama_stack.log import get_logger
from llama_stack_api import (
ListOpenAIChatCompletionResponse,
@ -18,10 +20,7 @@ from llama_stack_api import (
OpenAIMessageParam,
Order,
)
from llama_stack.core.storage.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import _SQLSTORE_BACKENDS, sqlstore_impl
from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
logger = get_logger(name=__name__, category="inference")

View file

@ -6,6 +6,8 @@
from llama_stack.core.datatypes import AccessRule
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqlStoreReference
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack.log import get_logger
from llama_stack_api import (
ListOpenAIResponseInputItem,
@ -17,10 +19,7 @@ from llama_stack_api import (
OpenAIResponseObjectWithInput,
Order,
)
from llama_stack.core.storage.sqlstore.api import ColumnDefinition, ColumnType
from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
from llama_stack.core.storage.sqlstore.sqlstore import sqlstore_impl
from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
logger = get_logger(name=__name__, category="openai_responses")

View file

@ -1,3 +1,9 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# Internal subpackage for shared interfaces that are not part of the public API.
__all__: list[str] = []

View file

@ -9,11 +9,11 @@ import pytest
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.files.localfs import (
LocalfsFilesImpl,
LocalfsFilesImplConfig,
)
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError

View file

@ -6,9 +6,9 @@
import pytest
from llama_stack.core.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
from llama_stack.core.storage.kvstore.sqlite import SqliteKVStoreImpl
from llama_stack.core.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
@pytest.fixture(scope="function")

View file

@ -17,6 +17,7 @@ from openai.types.chat.chat_completion_chunk import (
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
OpenAIResponsesImpl,
)
@ -24,7 +25,6 @@ from llama_stack.providers.utils.responses.responses_store import (
ResponsesStore,
_OpenAIResponseObjectWithInputAndMessages,
)
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack_api.agents import Order
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,

View file

@ -13,9 +13,9 @@ from unittest.mock import AsyncMock
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.providers.inline.batches.reference.batches import ReferenceBatchesImpl
from llama_stack.providers.inline.batches.reference.config import ReferenceBatchesImplConfig
from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
@pytest.fixture

View file

@ -9,8 +9,8 @@ import pytest
from moto import mock_aws
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
from llama_stack.providers.remote.files.s3 import S3FilesImplConfig, get_adapter_impl
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.remote.files.s3 import S3FilesImplConfig, get_adapter_impl
class MockUploadFile:

View file

@ -11,13 +11,13 @@ import numpy as np
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.core.storage.kvstore import register_kvstore_backends
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
from llama_stack.providers.inline.vector_io.sqlite_vec import SQLiteVectorIOConfig
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteVecIndex, SQLiteVecVectorIOAdapter
from llama_stack.providers.remote.vector_io.pgvector.config import PGVectorVectorIOConfig
from llama_stack.providers.remote.vector_io.pgvector.pgvector import PGVectorIndex, PGVectorVectorIOAdapter
from llama_stack.core.storage.kvstore import register_kvstore_backends
from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore
EMBEDDING_DIMENSION = 768

View file

@ -9,12 +9,12 @@ import pytest
from llama_stack.core.datatypes import VectorStoreWithOwner
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.core.store.registry import (
KEY_FORMAT,
CachedDiskDistributionRegistry,
DiskDistributionRegistry,
)
from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack_api import Model, VectorStore

View file

@ -9,8 +9,8 @@ import time
import pytest
from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.utils.inference.inference_store import InferenceStore
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.inference.inference_store import InferenceStore
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,

View file

@ -11,8 +11,8 @@ from uuid import uuid4
import pytest
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order