mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-30 10:09:32 +00:00
refactor: Add ProviderContext for a flexible storage directory
- Introduce ProviderContext class to decouple provider storage paths from absolute paths - Add storage_dir attribute to StackRunConfig to accept CLI options - Implement storage directory resolution with prioritized fallbacks: 1. CLI option (--state-directory) 2. Environment variable (LLAMA_STACK_STATE_DIR) 3. Default distribution directory - Standardize provider signatures to follow context, config, deps pattern - Update provider implementations to use the new context-based approach - Add comprehensive tests to verify state directory resolution
This commit is contained in:
parent
dd07c7a5b5
commit
e6c9aebe47
41 changed files with 242 additions and 81 deletions
|
|
@ -6,16 +6,16 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.providers.datatypes import Api
|
||||
from llama_stack.providers.datatypes import Api, ProviderContext
|
||||
|
||||
from .config import FaissVectorIOConfig
|
||||
|
||||
|
||||
async def get_provider_impl(config: FaissVectorIOConfig, deps: dict[Api, Any]):
|
||||
async def get_provider_impl(context: ProviderContext, config: FaissVectorIOConfig, deps: dict[Api, Any]):
|
||||
from .faiss import FaissVectorIOAdapter
|
||||
|
||||
assert isinstance(config, FaissVectorIOConfig), f"Unexpected config type: {type(config)}"
|
||||
|
||||
impl = FaissVectorIOAdapter(config, deps[Api.inference])
|
||||
impl = FaissVectorIOAdapter(context, config, deps[Api.inference])
|
||||
await impl.initialize()
|
||||
return impl
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ from llama_stack.apis.common.content_types import InterleavedContent
|
|||
from llama_stack.apis.inference.inference import Inference
|
||||
from llama_stack.apis.vector_dbs import VectorDB
|
||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse, VectorIO
|
||||
from llama_stack.providers.datatypes import VectorDBsProtocolPrivate
|
||||
from llama_stack.providers.datatypes import ProviderContext, VectorDBsProtocolPrivate
|
||||
from llama_stack.providers.utils.kvstore import kvstore_impl
|
||||
from llama_stack.providers.utils.kvstore.api import KVStore
|
||||
from llama_stack.providers.utils.memory.vector_store import (
|
||||
|
|
@ -114,9 +114,11 @@ class FaissIndex(EmbeddingIndex):
|
|||
|
||||
|
||||
class FaissVectorIOAdapter(VectorIO, VectorDBsProtocolPrivate):
|
||||
def __init__(self, config: FaissVectorIOConfig, inference_api: Inference) -> None:
|
||||
def __init__(self, context: ProviderContext, config: FaissVectorIOConfig, inference_api: Inference) -> None:
|
||||
self.context = context
|
||||
self.config = config
|
||||
self.inference_api = inference_api
|
||||
self.storage_dir = context.storage_dir if context else None
|
||||
self.cache: dict[str, VectorDBWithIndex] = {}
|
||||
self.kvstore: KVStore | None = None
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue