mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-22 08:17:18 +00:00
**This PR changes configurations in a backward incompatible way.** Run configs today repeat full SQLite/Postgres snippets everywhere a store is needed, which means duplicated credentials, extra connection pools, and lots of drift between files. This PR introduces named storage backends so the stack and providers can share a single catalog and reference those backends by name. ## Key Changes - Add `storage.backends` to `StackRunConfig`, register each KV/SQL backend once at startup, and validate that references point to the right family. - Move server stores under `storage.stores` with lightweight references (backend + namespace/table) instead of full configs. - Update every provider/config/doc to use the new reference style; docs/codegen now surface the simplified YAML. ## Migration Before: ```yaml metadata_store: type: sqlite db_path: ~/.llama/distributions/foo/registry.db inference_store: type: postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} conversations_store: type: postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} ``` After: ```yaml storage: backends: kv_default: type: kv_sqlite db_path: ~/.llama/distributions/foo/kvstore.db sql_default: type: sql_postgres host: ${env.POSTGRES_HOST} port: ${env.POSTGRES_PORT} db: ${env.POSTGRES_DB} user: ${env.POSTGRES_USER} password: ${env.POSTGRES_PASSWORD} stores: metadata: backend: kv_default namespace: registry inference: backend: sql_default table_name: inference_store max_write_queue_size: 10000 num_writers: 4 conversations: backend: sql_default table_name: openai_conversations ``` Provider configs follow the same pattern—for example, a Chroma vector adapter switches from: ```yaml providers: vector_io: - provider_id: chromadb provider_type: remote::chromadb config: url: ${env.CHROMADB_URL} kvstore: type: sqlite db_path: ~/.llama/distributions/foo/chroma.db ``` to: ```yaml providers: vector_io: - provider_id: chromadb provider_type: remote::chromadb config: url: ${env.CHROMADB_URL} persistence: backend: kv_default namespace: vector_io::chroma_remote ``` Once the backends are declared, everything else just points at them, so rotating credentials or swapping to Postgres happens in one place and the stack reuses a single connection pool.
159 lines
5.5 KiB
Python
159 lines
5.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import tempfile
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
from openai.types.conversations.conversation import Conversation as OpenAIConversation
|
|
from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
|
|
from pydantic import TypeAdapter
|
|
|
|
from llama_stack.apis.agents.openai_responses import (
|
|
OpenAIResponseInputMessageContentText,
|
|
OpenAIResponseMessage,
|
|
)
|
|
from llama_stack.core.conversations.conversations import (
|
|
ConversationServiceConfig,
|
|
ConversationServiceImpl,
|
|
)
|
|
from llama_stack.core.datatypes import StackRunConfig
|
|
from llama_stack.core.storage.datatypes import (
|
|
ServerStoresConfig,
|
|
SqliteSqlStoreConfig,
|
|
SqlStoreReference,
|
|
StorageConfig,
|
|
)
|
|
from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
|
|
|
|
|
|
@pytest.fixture
|
|
async def service():
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
db_path = Path(tmpdir) / "test_conversations.db"
|
|
|
|
storage = StorageConfig(
|
|
backends={
|
|
"sql_test": SqliteSqlStoreConfig(db_path=str(db_path)),
|
|
},
|
|
stores=ServerStoresConfig(
|
|
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
|
|
),
|
|
)
|
|
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
|
|
run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage)
|
|
|
|
config = ConversationServiceConfig(run_config=run_config, policy=[])
|
|
service = ConversationServiceImpl(config, {})
|
|
await service.initialize()
|
|
yield service
|
|
|
|
|
|
async def test_conversation_lifecycle(service):
|
|
conversation = await service.create_conversation(metadata={"test": "data"})
|
|
|
|
assert conversation.id.startswith("conv_")
|
|
assert conversation.metadata == {"test": "data"}
|
|
|
|
retrieved = await service.get_conversation(conversation.id)
|
|
assert retrieved.id == conversation.id
|
|
|
|
deleted = await service.openai_delete_conversation(conversation.id)
|
|
assert deleted.id == conversation.id
|
|
|
|
|
|
async def test_conversation_items(service):
|
|
conversation = await service.create_conversation()
|
|
|
|
items = [
|
|
OpenAIResponseMessage(
|
|
type="message",
|
|
role="user",
|
|
content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
|
|
id="msg_test123",
|
|
status="completed",
|
|
)
|
|
]
|
|
item_list = await service.add_items(conversation.id, items)
|
|
|
|
assert len(item_list.data) == 1
|
|
assert item_list.data[0].id == "msg_test123"
|
|
|
|
items = await service.list(conversation.id)
|
|
assert len(items.data) == 1
|
|
|
|
|
|
async def test_invalid_conversation_id(service):
|
|
with pytest.raises(ValueError, match="Expected an ID that begins with 'conv_'"):
|
|
await service._get_validated_conversation("invalid_id")
|
|
|
|
|
|
async def test_empty_parameter_validation(service):
|
|
with pytest.raises(ValueError, match="Expected a non-empty value"):
|
|
await service.retrieve("", "item_123")
|
|
|
|
|
|
async def test_openai_type_compatibility(service):
|
|
conversation = await service.create_conversation(metadata={"test": "value"})
|
|
|
|
conversation_dict = conversation.model_dump()
|
|
openai_conversation = OpenAIConversation.model_validate(conversation_dict)
|
|
|
|
for attr in ["id", "object", "created_at", "metadata"]:
|
|
assert getattr(openai_conversation, attr) == getattr(conversation, attr)
|
|
|
|
items = [
|
|
OpenAIResponseMessage(
|
|
type="message",
|
|
role="user",
|
|
content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
|
|
id="msg_test456",
|
|
status="completed",
|
|
)
|
|
]
|
|
item_list = await service.add_items(conversation.id, items)
|
|
|
|
for attr in ["object", "data", "first_id", "last_id", "has_more"]:
|
|
assert hasattr(item_list, attr)
|
|
assert item_list.object == "list"
|
|
|
|
items = await service.list(conversation.id)
|
|
item = await service.retrieve(conversation.id, items.data[0].id)
|
|
item_dict = item.model_dump()
|
|
|
|
openai_item_adapter = TypeAdapter(OpenAIConversationItem)
|
|
openai_item_adapter.validate_python(item_dict)
|
|
|
|
|
|
async def test_policy_configuration():
|
|
from llama_stack.core.access_control.datatypes import Action, Scope
|
|
from llama_stack.core.datatypes import AccessRule
|
|
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
db_path = Path(tmpdir) / "test_conversations_policy.db"
|
|
|
|
restrictive_policy = [
|
|
AccessRule(forbid=Scope(principal="test_user", actions=[Action.CREATE, Action.READ], resource="*"))
|
|
]
|
|
|
|
storage = StorageConfig(
|
|
backends={
|
|
"sql_test": SqliteSqlStoreConfig(db_path=str(db_path)),
|
|
},
|
|
stores=ServerStoresConfig(
|
|
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
|
|
),
|
|
)
|
|
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
|
|
run_config = StackRunConfig(image_name="test", apis=[], providers={}, storage=storage)
|
|
|
|
config = ConversationServiceConfig(run_config=run_config, policy=restrictive_policy)
|
|
service = ConversationServiceImpl(config, {})
|
|
await service.initialize()
|
|
|
|
assert service.policy == restrictive_policy
|
|
assert len(service.policy) == 1
|
|
assert service.policy[0].forbid is not None
|