feat: Add OpenAI Conversations API (#3429)

# What does this PR do?

Initial implementation for `Conversations` and `ConversationItems` using
`AuthorizedSqlStore` with endpoints to:
- CREATE
- UPDATE
- GET/RETRIEVE/LIST
- DELETE

Set `level=LLAMA_STACK_API_V1`.

NOTE: This does not currently incorporate changes for Responses, that'll
be done in a subsequent PR.

Closes https://github.com/llamastack/llama-stack/issues/3235

## Test Plan
- Unit tests
- Integration tests

Also comparison of [OpenAPI spec for OpenAI
API](https://github.com/openai/openai-openapi/tree/manual_spec)
```bash
oasdiff breaking --fail-on ERR docs/static/llama-stack-spec.yaml https://raw.githubusercontent.com/openai/openai-openapi/refs/heads/manual_spec/openapi.yaml --strip-prefix-base "/v1/openai/v1" \
--match-path '(^/v1/openai/v1/conversations.*|^/conversations.*)'
```

Note I still have some uncertainty about this, I borrowed this info from
@cdoern on https://github.com/llamastack/llama-stack/pull/3514 but need
to spend more time to confirm it's working, at the moment it suggests it
does.

UPDATE on `oasdiff`, I investigated the OpenAI spec further and it looks
like currently the spec does not list Conversations, so that analysis is
useless. Noting for future reference.

---------

Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
Francisco Arceo 2025-10-03 11:47:18 -04:00 committed by GitHub
parent a09e30bd87
commit a20e8eac8c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
24 changed files with 5704 additions and 2183 deletions

View file

@ -0,0 +1,60 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.apis.conversations.conversations import (
Conversation,
ConversationCreateRequest,
ConversationItem,
ConversationItemList,
)
def test_conversation_create_request_defaults():
request = ConversationCreateRequest()
assert request.items == []
assert request.metadata == {}
def test_conversation_model_defaults():
conversation = Conversation(
id="conv_123456789",
created_at=1234567890,
metadata=None,
object="conversation",
)
assert conversation.id == "conv_123456789"
assert conversation.object == "conversation"
assert conversation.metadata is None
def test_openai_client_compatibility():
from openai.types.conversations.message import Message
from pydantic import TypeAdapter
openai_message = Message(
id="msg_123",
content=[{"type": "input_text", "text": "Hello"}],
role="user",
status="in_progress",
type="message",
object="message",
)
adapter = TypeAdapter(ConversationItem)
validated_item = adapter.validate_python(openai_message.model_dump())
assert validated_item.id == "msg_123"
assert validated_item.type == "message"
def test_conversation_item_list():
item_list = ConversationItemList(data=[])
assert item_list.object == "list"
assert item_list.data == []
assert item_list.first_id is None
assert item_list.last_id is None
assert item_list.has_more is False

View file

@ -0,0 +1,132 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import tempfile
from pathlib import Path
import pytest
from openai.types.conversations.conversation import Conversation as OpenAIConversation
from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
from pydantic import TypeAdapter
from llama_stack.apis.agents.openai_responses import (
OpenAIResponseInputMessageContentText,
OpenAIResponseMessage,
)
from llama_stack.core.conversations.conversations import (
ConversationServiceConfig,
ConversationServiceImpl,
)
from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
@pytest.fixture
async def service():
with tempfile.TemporaryDirectory() as tmpdir:
db_path = Path(tmpdir) / "test_conversations.db"
config = ConversationServiceConfig(conversations_store=SqliteSqlStoreConfig(db_path=str(db_path)), policy=[])
service = ConversationServiceImpl(config, {})
await service.initialize()
yield service
async def test_conversation_lifecycle(service):
conversation = await service.create_conversation(metadata={"test": "data"})
assert conversation.id.startswith("conv_")
assert conversation.metadata == {"test": "data"}
retrieved = await service.get_conversation(conversation.id)
assert retrieved.id == conversation.id
deleted = await service.openai_delete_conversation(conversation.id)
assert deleted.id == conversation.id
async def test_conversation_items(service):
conversation = await service.create_conversation()
items = [
OpenAIResponseMessage(
type="message",
role="user",
content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
id="msg_test123",
status="completed",
)
]
item_list = await service.add_items(conversation.id, items)
assert len(item_list.data) == 1
assert item_list.data[0].id == "msg_test123"
items = await service.list(conversation.id)
assert len(items.data) == 1
async def test_invalid_conversation_id(service):
with pytest.raises(ValueError, match="Expected an ID that begins with 'conv_'"):
await service._get_validated_conversation("invalid_id")
async def test_empty_parameter_validation(service):
with pytest.raises(ValueError, match="Expected a non-empty value"):
await service.retrieve("", "item_123")
async def test_openai_type_compatibility(service):
conversation = await service.create_conversation(metadata={"test": "value"})
conversation_dict = conversation.model_dump()
openai_conversation = OpenAIConversation.model_validate(conversation_dict)
for attr in ["id", "object", "created_at", "metadata"]:
assert getattr(openai_conversation, attr) == getattr(conversation, attr)
items = [
OpenAIResponseMessage(
type="message",
role="user",
content=[OpenAIResponseInputMessageContentText(type="input_text", text="Hello")],
id="msg_test456",
status="completed",
)
]
item_list = await service.add_items(conversation.id, items)
for attr in ["object", "data", "first_id", "last_id", "has_more"]:
assert hasattr(item_list, attr)
assert item_list.object == "list"
items = await service.list(conversation.id)
item = await service.retrieve(conversation.id, items.data[0].id)
item_dict = item.model_dump()
openai_item_adapter = TypeAdapter(OpenAIConversationItem)
openai_item_adapter.validate_python(item_dict)
async def test_policy_configuration():
from llama_stack.core.access_control.datatypes import Action, Scope
from llama_stack.core.datatypes import AccessRule
with tempfile.TemporaryDirectory() as tmpdir:
db_path = Path(tmpdir) / "test_conversations_policy.db"
restrictive_policy = [
AccessRule(forbid=Scope(principal="test_user", actions=[Action.CREATE, Action.READ], resource="*"))
]
config = ConversationServiceConfig(
conversations_store=SqliteSqlStoreConfig(db_path=str(db_path)), policy=restrictive_policy
)
service = ConversationServiceImpl(config, {})
await service.initialize()
assert service.policy == restrictive_policy
assert len(service.policy) == 1
assert service.policy[0].forbid is not None

View file

@ -368,6 +368,32 @@ async def test_where_operator_gt_and_update_delete():
assert {r["id"] for r in rows_after} == {1, 3}
async def test_batch_insert():
with TemporaryDirectory() as tmp_dir:
db_path = tmp_dir + "/test.db"
store = SqlAlchemySqlStoreImpl(SqliteSqlStoreConfig(db_path=db_path))
await store.create_table(
"batch_test",
{
"id": ColumnType.INTEGER,
"name": ColumnType.STRING,
"value": ColumnType.INTEGER,
},
)
batch_data = [
{"id": 1, "name": "first", "value": 10},
{"id": 2, "name": "second", "value": 20},
{"id": 3, "name": "third", "value": 30},
]
await store.insert("batch_test", batch_data)
result = await store.fetch_all("batch_test", order_by=[("id", "asc")])
assert result.data == batch_data
async def test_where_operator_edge_cases():
with TemporaryDirectory() as tmp_dir:
db_path = tmp_dir + "/test.db"