refactor: Add ProviderContext for a flexible storage directory

- Introduce ProviderContext class to decouple provider storage paths from absolute paths
- Add storage_dir attribute to StackRunConfig to accept CLI options
- Implement storage directory resolution with prioritized fallbacks:
  1. CLI option (--state-directory)
  2. Environment variable (LLAMA_STACK_STATE_DIR)
  3. Default distribution directory
- Standardize provider signatures to follow context, config, deps pattern
- Update provider implementations to use the new context-based approach
- Add comprehensive tests to verify state directory resolution
This commit is contained in:
Roland Huß 2025-05-12 11:44:21 +02:00
parent dd07c7a5b5
commit e6c9aebe47
41 changed files with 242 additions and 81 deletions

View file

@ -7,6 +7,7 @@
from typing import Any
from llama_stack.distribution.datatypes import Api
from llama_stack.providers.datatypes import ProviderContext
from .config import TorchtunePostTrainingConfig
@ -14,12 +15,14 @@ from .config import TorchtunePostTrainingConfig
async def get_provider_impl(
context: ProviderContext,
config: TorchtunePostTrainingConfig,
deps: dict[Api, Any],
):
from .post_training import TorchtunePostTrainingImpl
impl = TorchtunePostTrainingImpl(
context,
config,
deps[Api.datasetio],
deps[Api.datasets],

View file

@ -20,6 +20,7 @@ from llama_stack.apis.post_training import (
PostTrainingJobStatusResponse,
TrainingConfig,
)
from llama_stack.providers.datatypes import ProviderContext
from llama_stack.providers.inline.post_training.torchtune.config import (
TorchtunePostTrainingConfig,
)
@ -42,10 +43,12 @@ _JOB_TYPE_SUPERVISED_FINE_TUNE = "supervised-fine-tune"
class TorchtunePostTrainingImpl:
def __init__(
self,
context: ProviderContext,
config: TorchtunePostTrainingConfig,
datasetio_api: DatasetIO,
datasets: Datasets,
) -> None:
self.context = context
self.config = config
self.datasetio_api = datasetio_api
self.datasets_api = datasets