mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-16 23:02:37 +00:00
Some checks failed
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
Integration Tests (Replay) / generate-matrix (push) Successful in 3s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 4s
Test Llama Stack Build / generate-matrix (push) Failing after 3s
Test Llama Stack Build / build (push) Has been skipped
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Test llama stack list-deps / generate-matrix (push) Failing after 3s
Test llama stack list-deps / list-deps (push) Has been skipped
API Conformance Tests / check-schema-compatibility (push) Successful in 11s
Python Package Build Test / build (3.13) (push) Successful in 19s
Python Package Build Test / build (3.12) (push) Successful in 23s
Test Llama Stack Build / build-single-provider (push) Successful in 33s
Test llama stack list-deps / show-single-provider (push) Successful in 36s
Test llama stack list-deps / list-deps-from-config (push) Successful in 44s
Vector IO Integration Tests / test-matrix (push) Failing after 57s
Test External API and Providers / test-external (venv) (push) Failing after 1m37s
Unit Tests / unit-tests (3.12) (push) Failing after 1m56s
UI Tests / ui-tests (22) (push) Successful in 2m2s
Unit Tests / unit-tests (3.13) (push) Failing after 2m35s
Pre-commit / pre-commit (22) (push) Successful in 3m16s
Test Llama Stack Build / build-custom-container-distribution (push) Successful in 3m34s
Test Llama Stack Build / build-ubi9-container-distribution (push) Successful in 3m59s
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 4m30s
# What does this PR do? the build.yaml is only used in the following ways: 1. list-deps 2. distribution code-gen since `llama stack build` no longer exists, I found myself asking "why do we need two different files for list-deps and run"? Removing the BuildConfig and altering the usage of the DistributionTemplate in llama stack list-deps is the first step in removing the build yaml entirely. Removing the BuildConfig and build.yaml cuts the files users need to maintain in half, and allows us to focus on the stability of _just_ the run.yaml This PR removes the build.yaml, BuildConfig datatype, and its usage throughout the codebase. Users are now expected to point to run.yaml files when running list-deps, and our codebase automatically uses these types now for things like `get_provider_registry`. **Additionally, two renames: `StackRunConfig` -> `StackConfig` and `run.yaml` -> `config.yaml`.** The build.yaml made sense for when we were managing the build process for the user and actually _producing_ a run.yaml _from_ the build.yaml, but now that we are simply just getting the provider registry and listing the deps, switching to config.yaml simplifies the scope here greatly. ## Test Plan existing list-deps usage should work in the tests. --------- Signed-off-by: Charlie Doern <cdoern@redhat.com>
162 lines
5.7 KiB
Python
162 lines
5.7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import tempfile
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
from openai.types.conversations.conversation import Conversation as OpenAIConversation
|
|
from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
|
|
from pydantic import TypeAdapter
|
|
|
|
from llama_stack.core.conversations.conversations import (
|
|
ConversationServiceConfig,
|
|
ConversationServiceImpl,
|
|
)
|
|
from llama_stack.core.datatypes import StackConfig
|
|
from llama_stack.core.storage.datatypes import (
|
|
ServerStoresConfig,
|
|
SqliteSqlStoreConfig,
|
|
SqlStoreReference,
|
|
StorageConfig,
|
|
)
|
|
from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
|
|
from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage
|
|
|
|
|
|
@pytest.fixture
|
|
async def service():
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
db_path = Path(tmpdir) / "test_conversations.db"
|
|
|
|
storage = StorageConfig(
|
|
backends={
|
|
"sql_test": SqliteSqlStoreConfig(db_path=str(db_path)),
|
|
},
|
|
stores=ServerStoresConfig(
|
|
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
|
|
metadata=None,
|
|
inference=None,
|
|
prompts=None,
|
|
),
|
|
)
|
|
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
|
|
stack_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage)
|
|
|
|
config = ConversationServiceConfig(config=stack_config, policy=[])
|
|
service = ConversationServiceImpl(config, {})
|
|
await service.initialize()
|
|
yield service
|
|
|
|
|
|
async def test_conversation_lifecycle(service):
|
|
conversation = await service.create_conversation(metadata={"test": "data"})
|
|
|
|
assert conversation.id.startswith("conv_")
|
|
assert conversation.metadata == {"test": "data"}
|
|
|
|
retrieved = await service.get_conversation(conversation.id)
|
|
assert retrieved.id == conversation.id
|
|
|
|
deleted = await service.openai_delete_conversation(conversation.id)
|
|
assert deleted.id == conversation.id
|
|
|
|
|
|
async def test_conversation_items(service):
|
|
conversation = await service.create_conversation()
|
|
|
|
items = [
|
|
OpenAIResponseMessage(
|
|
type="message",
|
|
role="user",
|
|
content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
|
|
id="msg_test123",
|
|
status="completed",
|
|
)
|
|
]
|
|
item_list = await service.add_items(conversation.id, items)
|
|
|
|
assert len(item_list.data) == 1
|
|
assert item_list.data[0].id == "msg_test123"
|
|
|
|
items = await service.list_items(conversation.id)
|
|
assert len(items.data) == 1
|
|
|
|
|
|
async def test_invalid_conversation_id(service):
|
|
with pytest.raises(ValueError, match="Expected an ID that begins with 'conv_'"):
|
|
await service._get_validated_conversation("invalid_id")
|
|
|
|
|
|
async def test_empty_parameter_validation(service):
|
|
with pytest.raises(ValueError, match="Expected a non-empty value"):
|
|
await service.retrieve("", "item_123")
|
|
|
|
|
|
async def test_openai_type_compatibility(service):
|
|
conversation = await service.create_conversation(metadata={"test": "value"})
|
|
|
|
conversation_dict = conversation.model_dump()
|
|
openai_conversation = OpenAIConversation.model_validate(conversation_dict)
|
|
|
|
for attr in ["id", "object", "created_at", "metadata"]:
|
|
assert getattr(openai_conversation, attr) == getattr(conversation, attr)
|
|
|
|
items = [
|
|
OpenAIResponseMessage(
|
|
type="message",
|
|
role="user",
|
|
content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
|
|
id="msg_test456",
|
|
status="completed",
|
|
)
|
|
]
|
|
item_list = await service.add_items(conversation.id, items)
|
|
|
|
for attr in ["object", "data", "first_id", "last_id", "has_more"]:
|
|
assert hasattr(item_list, attr)
|
|
assert item_list.object == "list"
|
|
|
|
items = await service.list_items(conversation.id)
|
|
item = await service.retrieve(conversation.id, items.data[0].id)
|
|
item_dict = item.model_dump()
|
|
|
|
openai_item_adapter = TypeAdapter(OpenAIConversationItem)
|
|
openai_item_adapter.validate_python(item_dict)
|
|
|
|
|
|
async def test_policy_configuration():
|
|
from llama_stack.core.access_control.datatypes import Action, Scope
|
|
from llama_stack.core.datatypes import AccessRule
|
|
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
db_path = Path(tmpdir) / "test_conversations_policy.db"
|
|
|
|
restrictive_policy = [
|
|
AccessRule(forbid=Scope(principal="test_user", actions=[Action.CREATE, Action.READ], resource="*"))
|
|
]
|
|
|
|
storage = StorageConfig(
|
|
backends={
|
|
"sql_test": SqliteSqlStoreConfig(db_path=str(db_path)),
|
|
},
|
|
stores=ServerStoresConfig(
|
|
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
|
|
metadata=None,
|
|
inference=None,
|
|
prompts=None,
|
|
),
|
|
)
|
|
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
|
|
stack_config = StackConfig(image_name="test", apis=[], providers={}, storage=storage)
|
|
|
|
config = ConversationServiceConfig(config=stack_config, policy=restrictive_policy)
|
|
service = ConversationServiceImpl(config, {})
|
|
await service.initialize()
|
|
|
|
assert service.policy == restrictive_policy
|
|
assert len(service.policy) == 1
|
|
assert service.policy[0].forbid is not None
|