refactor: Add ProviderContext for a flexible storage directory

- Introduce ProviderContext class to decouple provider storage paths from absolute paths
- Add storage_dir attribute to StackRunConfig to accept CLI options
- Implement storage directory resolution with prioritized fallbacks:
  1. CLI option (--state-directory)
  2. Environment variable (LLAMA_STACK_STATE_DIR)
  3. Default distribution directory
- Standardize provider signatures to follow context, config, deps pattern
- Update provider implementations to use the new context-based approach
- Add comprehensive tests to verify state directory resolution
This commit is contained in:
Roland Huß 2025-05-12 11:44:21 +02:00
parent dd07c7a5b5
commit e6c9aebe47
41 changed files with 242 additions and 81 deletions

View file

@ -6,16 +6,17 @@
from typing import Any
from llama_stack.providers.datatypes import Api
from llama_stack.providers.datatypes import Api, ProviderContext
from .config import ChromaVectorIOConfig
async def get_provider_impl(config: ChromaVectorIOConfig, deps: dict[Api, Any]):
async def get_provider_impl(context: ProviderContext, config: ChromaVectorIOConfig, deps: dict[Api, Any]):
from llama_stack.providers.remote.vector_io.chroma.chroma import (
ChromaVectorIOAdapter,
)
# Pass config directly since ChromaVectorIOAdapter doesn't accept context
impl = ChromaVectorIOAdapter(config, deps[Api.inference])
await impl.initialize()
return impl

View file

@ -6,16 +6,16 @@
from typing import Any
from llama_stack.providers.datatypes import Api
from llama_stack.providers.datatypes import Api, ProviderContext
from .config import FaissVectorIOConfig
async def get_provider_impl(config: FaissVectorIOConfig, deps: dict[Api, Any]):
async def get_provider_impl(context: ProviderContext, config: FaissVectorIOConfig, deps: dict[Api, Any]):
from .faiss import FaissVectorIOAdapter
assert isinstance(config, FaissVectorIOConfig), f"Unexpected config type: {type(config)}"
impl = FaissVectorIOAdapter(config, deps[Api.inference])
impl = FaissVectorIOAdapter(context, config, deps[Api.inference])
await impl.initialize()
return impl

View file

@ -19,7 +19,7 @@ from llama_stack.apis.common.content_types import InterleavedContent
from llama_stack.apis.inference.inference import Inference
from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
from llama_stack.providers.datatypes import VectorDBsProtocolPrivate
from llama_stack.providers.datatypes import ProviderContext, VectorDBsProtocolPrivate
from llama_stack.providers.utils.kvstore import kvstore_impl
from llama_stack.providers.utils.kvstore.api import KVStore
from llama_stack.providers.utils.memory.vector_store import (
@ -114,9 +114,11 @@ class FaissIndex(EmbeddingIndex):
class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
def __init__(self, config: FaissVectorIOConfig, inference_api: Inference) -> None:
def __init__(self, context: ProviderContext, config: FaissVectorIOConfig, inference_api: Inference) -> None:
self.context = context
self.config = config
self.inference_api = inference_api
self.storage_dir = context.storage_dir if context else None
self.cache: dict[str, VectorDBWithIndex] = {}
self.kvstore: KVStore | None = None

View file

@ -6,14 +6,15 @@
from typing import Any
from llama_stack.providers.datatypes import Api
from llama_stack.providers.datatypes import Api, ProviderContext
from .config import MilvusVectorIOConfig
async def get_provider_impl(config: MilvusVectorIOConfig, deps: dict[Api, Any]):
async def get_provider_impl(context: ProviderContext, config: MilvusVectorIOConfig, deps: dict[Api, Any]):
from llama_stack.providers.remote.vector_io.milvus.milvus import MilvusVectorIOAdapter
# Pass config directly since MilvusVectorIOAdapter doesn't accept context
impl = MilvusVectorIOAdapter(config, deps[Api.inference])
await impl.initialize()
return impl

View file

@ -6,15 +6,15 @@
from typing import Any
from llama_stack.providers.datatypes import Api
from llama_stack.providers.datatypes import Api, ProviderContext
from .config import SQLiteVectorIOConfig
async def get_provider_impl(config: SQLiteVectorIOConfig, deps: dict[Api, Any]):
async def get_provider_impl(context: ProviderContext, config: SQLiteVectorIOConfig, deps: dict[Api, Any]):
from .sqlite_vec import SQLiteVecVectorIOAdapter
assert isinstance(config, SQLiteVectorIOConfig), f"Unexpected config type: {type(config)}"
impl = SQLiteVecVectorIOAdapter(config, deps[Api.inference])
impl = SQLiteVecVectorIOAdapter(context, config, deps[Api.inference])
await impl.initialize()
return impl

View file

@ -10,6 +10,7 @@ import logging
import sqlite3
import struct
import uuid
from pathlib import Path
from typing import Any
import numpy as np
@ -19,7 +20,7 @@ from numpy.typing import NDArray
from llama_stack.apis.inference.inference import Inference
from llama_stack.apis.vector_dbs import VectorDB
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
from llama_stack.providers.datatypes import VectorDBsProtocolPrivate
from llama_stack.providers.datatypes import ProviderContext, VectorDBsProtocolPrivate
from llama_stack.providers.utils.memory.vector_store import EmbeddingIndex, VectorDBWithIndex
logger = logging.getLogger(__name__)
@ -206,15 +207,23 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
and creates a cache of VectorDBWithIndex instances (each wrapping a SQLiteVecIndex).
"""
def __init__(self, config, inference_api: Inference) -> None:
def __init__(self, context: ProviderContext, config, inference_api: Inference) -> None:
self.config = config
self.inference_api = inference_api
self.cache: dict[str, VectorDBWithIndex] = {}
self.storage_dir = context.storage_dir
self.db_path = self._resolve_path(self.config.db_path)
def _resolve_path(self, path: str | Path) -> Path:
path = Path(path)
if path.is_absolute():
return path
return self.storage_dir / path
async def initialize(self) -> None:
def _setup_connection():
# Open a connection to the SQLite database (the file is specified in the config).
connection = _create_sqlite_connection(self.config.db_path)
connection = _create_sqlite_connection(self.db_path)
cur = connection.cursor()
try:
# Create a table to persist vector DB registrations.
@ -237,9 +246,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
for row in rows:
vector_db_data = row[0]
vector_db = VectorDB.model_validate_json(vector_db_data)
index = await SQLiteVecIndex.create(
vector_db.embedding_dimension, self.config.db_path, vector_db.identifier
)
index = await SQLiteVecIndex.create(vector_db.embedding_dimension, str(self.db_path), vector_db.identifier)
self.cache[vector_db.identifier] = VectorDBWithIndex(vector_db, index, self.inference_api)
async def shutdown(self) -> None:
@ -248,7 +255,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
async def register_vector_db(self, vector_db: VectorDB) -> None:
def _register_db():
connection = _create_sqlite_connection(self.config.db_path)
connection = _create_sqlite_connection(self.db_path)
cur = connection.cursor()
try:
cur.execute(
@ -261,7 +268,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
connection.close()
await asyncio.to_thread(_register_db)
index = await SQLiteVecIndex.create(vector_db.embedding_dimension, self.config.db_path, vector_db.identifier)
index = await SQLiteVecIndex.create(vector_db.embedding_dimension, str(self.db_path), vector_db.identifier)
self.cache[vector_db.identifier] = VectorDBWithIndex(vector_db, index, self.inference_api)
async def list_vector_dbs(self) -> list[VectorDB]:
@ -275,7 +282,7 @@ class SQLiteVecVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
del self.cache[vector_db_id]
def _delete_vector_db_from_registry():
connection = _create_sqlite_connection(self.config.db_path)
connection = _create_sqlite_connection(self.db_path)
cur = connection.cursor()
try:
cur.execute("DELETE FROM vector_dbs WHERE id = ?", (vector_db_id,))