mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-24 00:47:00 +00:00
**This PR changes configurations in a backward incompatible way.**
Run configs today repeat full SQLite/Postgres snippets everywhere a
store is needed, which means duplicated credentials, extra connection
pools, and lots of drift between files. This PR introduces named storage
backends so the stack and providers can share a single catalog and
reference those backends by name.
## Key Changes
- Add `storage.backends` to `StackRunConfig`, register each KV/SQL
backend once at startup, and validate that references point to the right
family.
- Move server stores under `storage.stores` with lightweight references
(backend + namespace/table) instead of full configs.
- Update every provider/config/doc to use the new reference style;
docs/codegen now surface the simplified YAML.
## Migration
Before:
```yaml
metadata_store:
type: sqlite
db_path: ~/.llama/distributions/foo/registry.db
inference_store:
type: postgres
host: ${env.POSTGRES_HOST}
port: ${env.POSTGRES_PORT}
db: ${env.POSTGRES_DB}
user: ${env.POSTGRES_USER}
password: ${env.POSTGRES_PASSWORD}
conversations_store:
type: postgres
host: ${env.POSTGRES_HOST}
port: ${env.POSTGRES_PORT}
db: ${env.POSTGRES_DB}
user: ${env.POSTGRES_USER}
password: ${env.POSTGRES_PASSWORD}
```
After:
```yaml
storage:
backends:
kv_default:
type: kv_sqlite
db_path: ~/.llama/distributions/foo/kvstore.db
sql_default:
type: sql_postgres
host: ${env.POSTGRES_HOST}
port: ${env.POSTGRES_PORT}
db: ${env.POSTGRES_DB}
user: ${env.POSTGRES_USER}
password: ${env.POSTGRES_PASSWORD}
stores:
metadata:
backend: kv_default
namespace: registry
inference:
backend: sql_default
table_name: inference_store
max_write_queue_size: 10000
num_writers: 4
conversations:
backend: sql_default
table_name: openai_conversations
```
Provider configs follow the same pattern—for example, a Chroma vector
adapter switches from:
```yaml
providers:
vector_io:
- provider_id: chromadb
provider_type: remote::chromadb
config:
url: ${env.CHROMADB_URL}
kvstore:
type: sqlite
db_path: ~/.llama/distributions/foo/chroma.db
```
to:
```yaml
providers:
vector_io:
- provider_id: chromadb
provider_type: remote::chromadb
config:
url: ${env.CHROMADB_URL}
persistence:
backend: kv_default
namespace: vector_io::chroma_remote
```
Once the backends are declared, everything else just points at them, so
rotating credentials or swapping to Postgres happens in one place and
the stack reuses a single connection pool.
65 lines
2 KiB
Python
65 lines
2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import boto3
|
|
import pytest
|
|
from moto import mock_aws
|
|
|
|
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
|
|
from llama_stack.providers.remote.files.s3 import S3FilesImplConfig, get_adapter_impl
|
|
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
|
|
|
|
|
class MockUploadFile:
|
|
def __init__(self, content: bytes, filename: str, content_type: str = "text/plain"):
|
|
self.content = content
|
|
self.filename = filename
|
|
self.content_type = content_type
|
|
|
|
async def read(self):
|
|
return self.content
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_text_file():
|
|
content = b"Hello, this is a test file for the S3 Files API!"
|
|
return MockUploadFile(content, "sample_text_file-0.txt")
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_text_file2():
|
|
content = b"Hello, this is a second test file for the S3 Files API!"
|
|
return MockUploadFile(content, "sample_text_file-1.txt")
|
|
|
|
|
|
@pytest.fixture
|
|
def s3_config(tmp_path):
|
|
db_path = tmp_path / "s3_files_metadata.db"
|
|
|
|
backend_name = f"sql_s3_{tmp_path.name}"
|
|
register_sqlstore_backends({backend_name: SqliteSqlStoreConfig(db_path=db_path.as_posix())})
|
|
return S3FilesImplConfig(
|
|
bucket_name=f"test-bucket-{tmp_path.name}",
|
|
region="not-a-region",
|
|
auto_create_bucket=True,
|
|
metadata_store=SqlStoreReference(backend=backend_name, table_name="s3_files_metadata"),
|
|
)
|
|
|
|
|
|
@pytest.fixture
|
|
def s3_client():
|
|
# we use `with mock_aws()` because @mock_aws decorator does not support
|
|
# being a generator
|
|
with mock_aws():
|
|
# must yield or the mock will be reset before it is used
|
|
yield boto3.client("s3")
|
|
|
|
|
|
@pytest.fixture
|
|
async def s3_provider(s3_config, s3_client): # s3_client provides the moto mock, don't remove it
|
|
provider = await get_adapter_impl(s3_config, {})
|
|
yield provider
|
|
await provider.shutdown()
|